From f6e888e0c5d26d031343909c705fa8c6ccafd02a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Feb 2025 09:46:34 +0000 Subject: [PATCH] build(deps): bump github.com/cilium/cilium from 1.14.1 to 1.17.1 Bumps [github.com/cilium/cilium](https://github.com/cilium/cilium) from 1.14.1 to 1.17.1. - [Release notes](https://github.com/cilium/cilium/releases) - [Changelog](https://github.com/cilium/cilium/blob/1.17.1/CHANGELOG.md) - [Commits](https://github.com/cilium/cilium/compare/1.14.1...1.17.1) --- updated-dependencies: - dependency-name: github.com/cilium/cilium dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 158 +- go.sum | 429 +- vendor/github.com/cespare/xxhash/v2/README.md | 2 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 29 +- .../cespare/xxhash/v2/xxhash_asm.go | 2 +- .../cespare/xxhash/v2/xxhash_other.go | 2 +- .../cespare/xxhash/v2/xxhash_safe.go | 2 +- .../cespare/xxhash/v2/xxhash_unsafe.go | 2 +- vendor/github.com/cilium/cilium/AUTHORS | 363 +- .../cilium/api/v1/client/bgp/bgp_client.go | 112 +- .../v1/client/bgp/get_bgp_peers_responses.go | 162 +- .../bgp/get_bgp_route_policies_parameters.go | 169 + .../bgp/get_bgp_route_policies_responses.go | 255 + .../client/bgp/get_bgp_routes_parameters.go | 286 + .../v1/client/bgp/get_bgp_routes_responses.go | 255 + .../cilium/api/v1/client/cilium_api_client.go | 10 - .../api/v1/client/daemon/daemon_client.go | 30 +- .../get_cgroup_dump_metadata_responses.go | 25 +- .../daemon/get_cluster_nodes_responses.go | 14 +- .../v1/client/daemon/get_config_responses.go | 14 +- .../client/daemon/get_debuginfo_responses.go | 25 +- .../client/daemon/get_healthz_parameters.go | 41 +- .../v1/client/daemon/get_healthz_responses.go | 14 +- .../daemon/get_map_name_events_responses.go | 20 +- .../client/daemon/get_map_name_responses.go | 23 +- .../api/v1/client/daemon/get_map_responses.go | 14 +- .../client/daemon/get_node_ids_responses.go | 14 +- .../client/daemon/patch_config_responses.go | 43 +- .../endpoint/delete_endpoint_id_parameters.go | 8 +- .../endpoint/delete_endpoint_id_responses.go | 61 +- .../endpoint/delete_endpoint_parameters.go | 153 + .../endpoint/delete_endpoint_responses.go | 353 + .../api/v1/client/endpoint/endpoint_client.go | 71 +- .../get_endpoint_id_config_parameters.go | 8 +- .../get_endpoint_id_config_responses.go | 32 +- .../get_endpoint_id_healthz_parameters.go | 8 +- .../get_endpoint_id_healthz_responses.go | 41 +- .../get_endpoint_id_labels_parameters.go | 8 +- .../get_endpoint_id_labels_responses.go | 32 +- .../get_endpoint_id_log_parameters.go | 8 +- .../endpoint/get_endpoint_id_log_responses.go | 41 +- .../endpoint/get_endpoint_id_parameters.go | 8 +- .../endpoint/get_endpoint_id_responses.go | 43 +- .../client/endpoint/get_endpoint_responses.go | 32 +- .../patch_endpoint_id_config_parameters.go | 8 +- .../patch_endpoint_id_config_responses.go | 59 +- .../patch_endpoint_id_labels_parameters.go | 8 +- .../patch_endpoint_id_labels_responses.go | 50 +- .../endpoint/patch_endpoint_id_parameters.go | 8 +- .../endpoint/patch_endpoint_id_responses.go | 61 +- .../endpoint/put_endpoint_id_parameters.go | 8 +- .../endpoint/put_endpoint_id_responses.go | 75 +- .../client/ipam/delete_ipam_ip_responses.go | 59 +- .../cilium/api/v1/client/ipam/ipam_client.go | 28 +- .../v1/client/ipam/post_ipam_ip_responses.go | 59 +- .../api/v1/client/ipam/post_ipam_responses.go | 34 +- .../client/metrics/get_metrics_parameters.go | 131 - .../client/metrics/get_metrics_responses.go | 156 - .../api/v1/client/metrics/metrics_client.go | 82 - .../policy/delete_fqdn_cache_responses.go | 32 +- .../client/policy/delete_policy_responses.go | 54 +- .../policy/get_fqdn_cache_id_parameters.go | 8 +- .../policy/get_fqdn_cache_id_responses.go | 34 +- .../client/policy/get_fqdn_cache_responses.go | 34 +- .../client/policy/get_fqdn_names_responses.go | 25 +- .../get_identity_endpoints_responses.go | 23 +- .../policy/get_identity_id_responses.go | 54 +- .../client/policy/get_identity_responses.go | 45 +- .../api/v1/client/policy/get_ip_parameters.go | 25 + .../api/v1/client/policy/get_ip_responses.go | 34 +- .../v1/client/policy/get_policy_responses.go | 23 +- .../policy/get_policy_selectors_responses.go | 14 +- .../api/v1/client/policy/policy_client.go | 28 +- .../v1/client/policy/put_policy_parameters.go | 80 + .../v1/client/policy/put_policy_responses.go | 56 +- .../prefilter/delete_prefilter_responses.go | 45 +- .../prefilter/get_prefilter_responses.go | 25 +- .../prefilter/patch_prefilter_responses.go | 45 +- .../v1/client/prefilter/prefilter_client.go | 28 +- .../recorder/delete_recorder_id_responses.go | 41 +- .../recorder/get_recorder_id_responses.go | 23 +- .../recorder/get_recorder_masks_responses.go | 14 +- .../client/recorder/get_recorder_responses.go | 14 +- .../recorder/put_recorder_id_responses.go | 41 +- .../api/v1/client/recorder/recorder_client.go | 28 +- .../service/delete_service_id_responses.go | 41 +- .../v1/client/service/get_lrp_responses.go | 14 +- .../service/get_service_id_responses.go | 23 +- .../client/service/get_service_responses.go | 14 +- .../service/put_service_id_responses.go | 74 +- .../api/v1/client/service/service_client.go | 28 +- .../statedb/get_statedb_dump_parameters.go | 131 - .../statedb/get_statedb_dump_responses.go | 101 - .../api/v1/client/statedb/statedb_client.go | 83 - .../connectivity/connectivity_client.go | 28 +- .../connectivity/get_status_responses.go | 14 +- .../put_status_probe_responses.go | 34 +- .../client/restapi/get_healthz_responses.go | 25 +- .../health/client/restapi/restapi_client.go | 28 +- .../api/v1/health/models/endpoint_status.go | 10 + .../api/v1/health/models/health_response.go | 9 + .../health/models/health_status_response.go | 10 + .../api/v1/health/models/host_status.go | 10 + .../api/v1/health/models/node_status.go | 15 + .../api/v1/health/models/path_status.go | 10 + .../cilium/api/v1/models/attach_mode.go | 81 + .../cilium/cilium/api/v1/models/b_p_f_map.go | 5 + .../cilium/api/v1/models/b_p_f_map_entry.go | 2 +- .../cilium/api/v1/models/b_p_f_map_list.go | 5 + .../cilium/api/v1/models/b_p_f_map_status.go | 5 + .../cilium/api/v1/models/backend_address.go | 8 +- .../cilium/api/v1/models/bandwidth_manager.go | 2 +- .../cilium/cilium/api/v1/models/bgp_family.go | 56 + .../cilium/cilium/api/v1/models/bgp_nlri.go | 53 + .../cilium/cilium/api/v1/models/bgp_path.go | 235 + .../api/v1/models/bgp_path_attribute.go | 53 + .../cilium/cilium/api/v1/models/bgp_peer.go | 13 + .../cilium/cilium/api/v1/models/bgp_route.go | 133 + .../cilium/api/v1/models/bgp_route_policy.go | 182 + .../models/bgp_route_policy_prefix_match.go | 59 + .../v1/models/bgp_route_policy_statement.go | 253 + .../cilium/api/v1/models/c_id_r_policy.go | 10 + .../api/v1/models/c_n_i_chaining_status.go | 2 +- .../api/v1/models/cgroup_dump_metadata.go | 5 + .../api/v1/models/cgroup_pod_metadata.go | 5 + .../cilium/api/v1/models/clock_source.go | 2 +- .../api/v1/models/cluster_mesh_status.go | 5 + .../api/v1/models/cluster_node_status.go | 10 + .../api/v1/models/cluster_nodes_response.go | 5 + .../cilium/api/v1/models/cluster_status.go | 10 + .../cilium/api/v1/models/controller_status.go | 10 + .../api/v1/models/controller_statuses.go | 5 + .../api/v1/models/daemon_configuration.go | 10 + .../v1/models/daemon_configuration_spec.go | 6 +- .../v1/models/daemon_configuration_status.go | 44 + .../cilium/api/v1/models/datapath_mode.go | 8 +- .../cilium/cilium/api/v1/models/debug_info.go | 32 +- .../cilium/api/v1/models/encryption_status.go | 62 +- .../cilium/cilium/api/v1/models/endpoint.go | 10 + .../models/endpoint_batch_delete_request.go | 53 + .../api/v1/models/endpoint_change_request.go | 33 +- .../v1/models/endpoint_configuration_spec.go | 9 + .../models/endpoint_configuration_status.go | 13 + .../cilium/api/v1/models/endpoint_health.go | 12 + .../api/v1/models/endpoint_identifiers.go | 13 +- .../api/v1/models/endpoint_networking.go | 17 +- .../cilium/api/v1/models/endpoint_policy.go | 14 + .../api/v1/models/endpoint_policy_status.go | 15 + .../cilium/api/v1/models/endpoint_status.go | 36 + .../api/v1/models/endpoint_status_change.go | 6 +- .../api/v1/models/endpoint_status_log.go | 5 + .../cilium/api/v1/models/frontend_address.go | 4 +- .../cilium/api/v1/models/frontend_mapping.go | 10 + .../cilium/api/v1/models/host_firewall.go | 2 +- .../cilium/api/v1/models/host_routing.go | 110 - .../cilium/api/v1/models/hubble_status.go | 14 +- .../cilium/api/v1/models/i_psec_status.go | 67 + .../api/v1/models/identity_endpoints.go | 5 + .../cilium/api/v1/models/ip_a_m_response.go | 12 + .../cilium/api/v1/models/ip_a_m_status.go | 4 + .../cilium/api/v1/models/ip_list_entry.go | 5 + .../cilium/cilium/api/v1/models/k8s_status.go | 2 +- .../api/v1/models/kube_proxy_replacement.go | 148 +- .../cilium/cilium/api/v1/models/l4_policy.go | 10 + .../cilium/api/v1/models/l_r_p_backend.go | 5 + .../cilium/cilium/api/v1/models/l_r_p_spec.go | 5 + .../cilium/cilium/api/v1/models/label.go | 59 + .../cilium/api/v1/models/label_array.go | 81 + .../api/v1/models/label_configuration.go | 10 + .../v1/models/label_configuration_status.go | 5 + .../cilium/cilium/api/v1/models/map_event.go | 4 +- .../cilium/api/v1/models/masquerading.go | 7 +- .../cilium/api/v1/models/name_manager.go | 5 + .../cilium/api/v1/models/named_ports.go | 5 + .../cilium/api/v1/models/node_addressing.go | 10 + .../cilium/api/v1/models/node_element.go | 23 + .../cilium/cilium/api/v1/models/port.go | 2 +- .../cilium/cilium/api/v1/models/prefilter.go | 10 + .../cilium/api/v1/models/prefilter_status.go | 5 + .../cilium/api/v1/models/proxy_statistics.go | 7 +- .../cilium/api/v1/models/proxy_status.go | 7 +- .../cilium/cilium/api/v1/models/recorder.go | 10 + .../cilium/api/v1/models/recorder_filter.go | 2 +- .../cilium/api/v1/models/recorder_mask.go | 5 + .../api/v1/models/recorder_mask_status.go | 5 + .../cilium/api/v1/models/recorder_spec.go | 5 + .../cilium/api/v1/models/recorder_status.go | 5 + .../cilium/api/v1/models/remote_cluster.go | 13 + .../api/v1/models/remote_cluster_config.go | 3 + .../api/v1/models/remote_cluster_synced.go | 3 + .../v1/models/request_response_statistics.go | 10 + .../cilium/cilium/api/v1/models/routing.go | 163 + .../cilium/api/v1/models/selector_cache.go | 5 + .../v1/models/selector_identity_mapping.go | 55 +- .../cilium/cilium/api/v1/models/service.go | 10 + .../cilium/api/v1/models/service_spec.go | 21 +- .../cilium/api/v1/models/service_status.go | 5 + .../cilium/cilium/api/v1/models/srv6.go | 113 + .../cilium/api/v1/models/state_d_b_query.go | 62 + .../cilium/cilium/api/v1/models/status.go | 2 +- .../cilium/api/v1/models/status_response.go | 397 +- .../cilium/api/v1/models/trace_selector.go | 10 + .../cilium/cilium/api/v1/models/trace_to.go | 5 + .../api/v1/models/wireguard_interface.go | 11 +- .../cilium/api/v1/models/wireguard_peer.go | 2 +- .../cilium/api/v1/models/wireguard_status.go | 9 +- .../api/v1/models/zz_generated.deepcopy.go | 120 +- .../api/v1/models/zz_generated.deepequal.go | 3 + .../pkg/alibabacloud/eni/types/types.go | 9 + .../eni/types/zz_generated.deepcopy.go | 7 +- .../eni/types/zz_generated.deepequal.go | 18 +- .../cilium/cilium/pkg/api/apierror.go | 10 +- .../cilium/cilium/pkg/api/apipanic.go | 17 +- .../cilium/cilium/pkg/aws/eni/types/types.go | 9 + .../aws/eni/types/zz_generated.deepequal.go | 4 + .../cilium/cilium/pkg/azure/types/types.go | 4 + .../github.com/cilium/cilium/pkg/cidr/cidr.go | 78 +- .../cilium/cilium/pkg/cidr/cidr_linux.go | 14 + .../cilium/pkg/cidr/cidr_unspecified.go | 12 + .../cilium/cilium/pkg/client/client.go | 239 +- .../cilium/cilium/pkg/client/endpoint.go | 16 +- .../cilium/cilium/pkg/client/policy.go | 10 + .../pkg/clustermesh/types/addressing.go | 99 +- .../cilium/pkg/clustermesh/types/option.go | 137 + .../cilium/pkg/clustermesh/types/types.go | 85 +- .../cilium/cilium/pkg/command/exec/doc.go | 5 + .../cilium/cilium/pkg/command/exec/exec.go | 124 + .../cilium/cilium/pkg/command/output.go | 2 +- .../cilium/pkg/comparator/comparator.go | 70 +- .../cilium/pkg/components/components.go | 25 - .../cilium/pkg/container/cache/cache.go | 61 + .../cilium/pkg/container/cache/caches.go | 37 + .../datapath/linux/probes/attach_cgroup.go | 69 + .../pkg/datapath/linux/probes/attach_type.go | 80 + .../cilium/pkg/datapath/linux/probes/doc.go | 5 + .../pkg/datapath/linux/probes/kernel_hz.go | 151 + .../linux/probes/managed_neighbors.go | 94 + .../pkg/datapath/linux/probes/probes.go | 853 + .../pkg/datapath/linux/probes/probes_linux.go | 12 + .../linux/probes/probes_unspecified.go | 12 + .../linux/safenetlink/netlink_linux.go | 403 + .../linux/safenetlink/netlink_unspecified.go | 141 + .../cilium/cilium/pkg/defaults/defaults.go | 194 +- .../cilium/cilium/pkg/defaults/node.go | 21 + .../cilium/cilium/pkg/endpoint/id/id.go | 46 +- .../cilium/cilium/pkg/health/client/client.go | 124 +- .../cilium/pkg/health/client/modules.go | 153 + .../cilium/cilium/pkg/health/client/tree.go | 220 + .../cilium/pkg/health/defaults/defaults.go | 3 + .../cilium/cilium/pkg/hive/cell/config.go | 156 - .../cilium/cilium/pkg/hive/cell/health.go | 240 - .../cilium/cilium/pkg/hive/cell/invoke.go | 73 - .../cilium/cilium/pkg/hive/cell/metric.go | 138 - .../cilium/cilium/pkg/hive/cell/module.go | 90 - .../cilium/pkg/hive/health/types/types.go | 111 + .../cilium/pkg/hive/internal/reflect.go | 36 - .../github.com/cilium/cilium/pkg/ip/cidr.go | 31 +- vendor/github.com/cilium/cilium/pkg/ip/ip.go | 131 +- .../cilium/cilium/pkg/ipam/option/option.go | 6 - .../cilium/cilium/pkg/ipam/types/types.go | 104 +- .../pkg/ipam/types/zz_generated.deepcopy.go | 37 + .../pkg/ipam/types/zz_generated.deepequal.go | 94 +- .../cilium/pkg/k8s/apis/cilium.io/const.go | 32 +- .../cilium/pkg/k8s/apis/cilium.io/register.go | 2 +- .../pkg/k8s/apis/cilium.io/utils/utils.go | 54 +- .../pkg/k8s/apis/cilium.io/v2/ccec_types.go | 1 - .../pkg/k8s/apis/cilium.io/v2/ccnp_types.go | 19 +- .../pkg/k8s/apis/cilium.io/v2/cec_types.go | 32 +- .../pkg/k8s/apis/cilium.io/v2/cegp_types.go | 6 +- .../pkg/k8s/apis/cilium.io/v2/cew_types.go | 2 - .../pkg/k8s/apis/cilium.io/v2/clrp_types.go | 27 +- .../pkg/k8s/apis/cilium.io/v2/cnc_types.go | 55 + .../pkg/k8s/apis/cilium.io/v2/cnp_types.go | 65 +- .../pkg/k8s/apis/cilium.io/v2/register.go | 13 + .../cilium/pkg/k8s/apis/cilium.io/v2/types.go | 39 +- .../cilium.io/v2/zz_generated.deepcopy.go | 142 +- .../cilium.io/v2/zz_generated.deepequal.go | 90 +- .../cilium.io/v2alpha1/bgp_advert_types.go | 145 + .../cilium.io/v2alpha1/bgp_cluster_types.go | 175 + .../v2alpha1/bgp_node_override_types.go | 101 + .../apis/cilium.io/v2alpha1/bgp_node_types.go | 239 + .../apis/cilium.io/v2alpha1/bgp_peer_types.go | 276 + .../k8s/apis/cilium.io/v2alpha1/bgpp_types.go | 184 +- .../k8s/apis/cilium.io/v2alpha1/cnc_types.go | 2 +- .../v2alpha1/l2announcement_types.go | 10 +- .../apis/cilium.io/v2alpha1/lbipam_types.go | 35 +- .../k8s/apis/cilium.io/v2alpha1/register.go | 51 +- .../v2alpha1/zz_generated.deepcopy.go | 1176 +- .../v2alpha1/zz_generated.deepequal.go | 1165 +- .../client/clientset/versioned/clientset.go | 4 +- .../typed/cilium.io/v2/cilium.io_client.go | 15 +- .../v2/ciliumclusterwideenvoyconfig.go | 136 +- .../v2/ciliumclusterwidenetworkpolicy.go | 154 +- .../cilium.io/v2/ciliumegressgatewaypolicy.go | 134 +- .../typed/cilium.io/v2/ciliumendpoint.go | 163 +- .../typed/cilium.io/v2/ciliumenvoyconfig.go | 144 +- .../cilium.io/v2/ciliumexternalworkload.go | 152 +- .../typed/cilium.io/v2/ciliumidentity.go | 134 +- .../cilium.io/v2/ciliumlocalredirectpolicy.go | 163 +- .../typed/cilium.io/v2/ciliumnetworkpolicy.go | 163 +- .../typed/cilium.io/v2/ciliumnode.go | 152 +- .../typed/cilium.io/v2/ciliumnodeconfig.go | 55 + .../typed/cilium.io/v2/generated_expansion.go | 2 + .../cilium.io/v2alpha1/cilium.io_client.go | 35 +- .../v2alpha1/ciliumbgpadvertisement.go | 57 + .../v2alpha1/ciliumbgpclusterconfig.go | 59 + .../cilium.io/v2alpha1/ciliumbgpnodeconfig.go | 57 + .../v2alpha1/ciliumbgpnodeconfigoverride.go | 59 + .../cilium.io/v2alpha1/ciliumbgppeerconfig.go | 57 + .../v2alpha1/ciliumbgppeeringpolicy.go | 136 +- .../cilium.io/v2alpha1/ciliumcidrgroup.go | 134 +- .../cilium.io/v2alpha1/ciliumendpointslice.go | 134 +- .../v2alpha1/ciliuml2announcementpolicy.go | 156 +- .../v2alpha1/ciliumloadbalancerippool.go | 154 +- .../cilium.io/v2alpha1/ciliumnodeconfig.go | 144 +- .../cilium.io/v2alpha1/ciliumpodippool.go | 134 +- .../cilium.io/v2alpha1/generated_expansion.go | 10 + .../v2/ciliumclusterwideenvoyconfig.go | 16 +- .../v2/ciliumclusterwidenetworkpolicy.go | 16 +- .../cilium.io/v2/ciliumegressgatewaypolicy.go | 16 +- .../cilium.io/v2/ciliumendpoint.go | 16 +- .../cilium.io/v2/ciliumenvoyconfig.go | 16 +- .../cilium.io/v2/ciliumexternalworkload.go | 16 +- .../cilium.io/v2/ciliumidentity.go | 16 +- .../cilium.io/v2/ciliumlocalredirectpolicy.go | 16 +- .../cilium.io/v2/ciliumnetworkpolicy.go | 16 +- .../cilium.io/v2/ciliumnode.go | 16 +- .../cilium.io/v2/ciliumnodeconfig.go | 77 + .../cilium.io/v2/interface.go | 7 + .../v2alpha1/ciliumbgpadvertisement.go | 76 + .../v2alpha1/ciliumbgpclusterconfig.go | 76 + .../cilium.io/v2alpha1/ciliumbgpnodeconfig.go | 76 + .../v2alpha1/ciliumbgpnodeconfigoverride.go | 76 + .../cilium.io/v2alpha1/ciliumbgppeerconfig.go | 76 + .../v2alpha1/ciliumbgppeeringpolicy.go | 16 +- .../cilium.io/v2alpha1/ciliumcidrgroup.go | 16 +- .../cilium.io/v2alpha1/ciliumendpointslice.go | 16 +- .../v2alpha1/ciliuml2announcementpolicy.go | 16 +- .../v2alpha1/ciliumloadbalancerippool.go | 16 +- .../cilium.io/v2alpha1/ciliumnodeconfig.go | 16 +- .../cilium.io/v2alpha1/ciliumpodippool.go | 16 +- .../cilium.io/v2alpha1/interface.go | 35 + .../informers/externalversions/factory.go | 15 +- .../informers/externalversions/generic.go | 14 +- .../v2/ciliumclusterwideenvoyconfig.go | 36 +- .../v2/ciliumclusterwidenetworkpolicy.go | 36 +- .../cilium.io/v2/ciliumegressgatewaypolicy.go | 36 +- .../listers/cilium.io/v2/ciliumendpoint.go | 51 +- .../listers/cilium.io/v2/ciliumenvoyconfig.go | 51 +- .../cilium.io/v2/ciliumexternalworkload.go | 36 +- .../listers/cilium.io/v2/ciliumidentity.go | 36 +- .../cilium.io/v2/ciliumlocalredirectpolicy.go | 51 +- .../cilium.io/v2/ciliumnetworkpolicy.go | 51 +- .../client/listers/cilium.io/v2/ciliumnode.go | 36 +- .../listers/cilium.io/v2/ciliumnodeconfig.go | 57 + .../cilium.io/v2/expansion_generated.go | 8 + .../v2alpha1/ciliumbgpadvertisement.go | 35 + .../v2alpha1/ciliumbgpclusterconfig.go | 35 + .../cilium.io/v2alpha1/ciliumbgpnodeconfig.go | 35 + .../v2alpha1/ciliumbgpnodeconfigoverride.go | 35 + .../cilium.io/v2alpha1/ciliumbgppeerconfig.go | 35 + .../v2alpha1/ciliumbgppeeringpolicy.go | 36 +- .../cilium.io/v2alpha1/ciliumcidrgroup.go | 36 +- .../cilium.io/v2alpha1/ciliumendpointslice.go | 36 +- .../v2alpha1/ciliuml2announcementpolicy.go | 36 +- .../v2alpha1/ciliumloadbalancerippool.go | 36 +- .../cilium.io/v2alpha1/ciliumnodeconfig.go | 51 +- .../cilium.io/v2alpha1/ciliumpodippool.go | 36 +- .../cilium.io/v2alpha1/expansion_generated.go | 20 + .../pkg/k8s/slim/k8s/api/core/v1/doc.go | 3 +- .../k8s/slim/k8s/api/core/v1/generated.pb.go | 798 +- .../k8s/slim/k8s/api/core/v1/generated.proto | 165 +- .../pkg/k8s/slim/k8s/api/core/v1/types.go | 217 +- .../k8s/slim/k8s/api/core/v1/types_cilium.go | 24 + .../k8s/api/core/v1/zz_generated.deepcopy.go | 45 +- .../k8s/api/core/v1/zz_generated.deepequal.go | 52 + .../pkg/k8s/slim/k8s/apis/labels/labels.go | 31 + .../pkg/k8s/slim/k8s/apis/labels/selector.go | 107 +- .../k8s/apis/labels/zz_generated.deepequal.go | 24 + .../k8s/slim/k8s/apis/meta/v1/generated.pb.go | 229 +- .../k8s/slim/k8s/apis/meta/v1/generated.proto | 14 +- .../pkg/k8s/slim/k8s/apis/meta/v1/helpers.go | 129 +- .../pkg/k8s/slim/k8s/apis/meta/v1/meta.go | 35 +- .../pkg/k8s/slim/k8s/apis/meta/v1/register.go | 28 +- .../pkg/k8s/slim/k8s/apis/meta/v1/types.go | 45 +- .../k8s/apis/meta/v1/validation/validation.go | 154 +- .../apis/meta/v1/zz_generated.conversion.go | 23 + .../apis/meta/v1/zz_generated.deepequal.go | 6 + .../k8s/apis/meta/v1/zz_generated.defaults.go | 20 + .../pkg/k8s/slim/k8s/apis/util/intstr/doc.go | 8 + .../slim/k8s/apis/util/intstr/generated.pb.go | 356 + .../slim/k8s/apis/util/intstr/generated.proto | 30 + .../k8s/slim/k8s/apis/util/intstr/intstr.go | 221 + .../util/intstr/zz_generated.deepequal.go | 29 + .../cilium/cilium/pkg/k8s/utils/utils.go | 143 +- .../cilium/cilium/pkg/labels/array.go | 71 +- .../cilium/cilium/pkg/labels/arraylist.go | 55 +- .../cilium/cilium/pkg/labels/cidr.go | 119 + .../cilium/cilium/pkg/labels/cidr/cidr.go | 107 - .../cilium/cilium/pkg/labels/cidr/doc.go | 6 - .../cilium/cilium/pkg/labels/labels.go | 322 +- .../cilium/cilium/pkg/labels/oplabels.go | 37 +- .../cilium/pkg/loadbalancer/loadbalancer.go | 356 +- .../pkg/loadbalancer/zz_generated.deepcopy.go | 10 +- .../github.com/cilium/cilium/pkg/lock/map.go | 118 + .../cilium/cilium/pkg/lock/sortable_mutex.go | 86 + .../cilium/pkg/lock/stoppable_waitgroup.go | 31 +- .../cilium/pkg/logging/logfields/logfields.go | 117 +- .../cilium/cilium/pkg/logging/logging.go | 159 +- .../cilium/cilium/pkg/logging/slog.go | 121 + .../github.com/cilium/cilium/pkg/mac/mac.go | 2 +- .../cilium/cilium/pkg/mac/mac_linux.go | 27 +- .../cilium/cilium/pkg/mac/mac_unspecified.go | 6 + .../cilium/cilium/pkg/metrics/bpf.go | 143 +- .../cilium/cilium/pkg/metrics/cell.go | 123 +- .../cilium/cilium/pkg/metrics/cmd.go | 513 + .../cilium/cilium/pkg/metrics/dump.html.tmpl | 159 + .../cilium/cilium/pkg/metrics/histogram.go | 89 + .../cilium/cilium/pkg/metrics/interfaces.go | 39 +- .../cilium/cilium/pkg/metrics/json.go | 28 + .../cilium/cilium/pkg/metrics/logging_hook.go | 65 +- .../pkg/metrics/metric/collections/product.go | 74 + .../cilium/pkg/metrics/metric/counter.go | 86 +- .../cilium/cilium/pkg/metrics/metric/gauge.go | 128 +- .../cilium/pkg/metrics/metric/histogram.go | 76 +- .../cilium/pkg/metrics/metric/metric.go | 181 +- .../cilium/cilium/pkg/metrics/metrics.go | 616 +- .../cilium/cilium/pkg/metrics/metrics_unix.go | 15 +- .../cilium/pkg/metrics/metrics_windows.go | 8 + .../cilium/cilium/pkg/metrics/plot.go | 211 + .../cilium/cilium/pkg/metrics/registry.go | 122 +- .../cilium/cilium/pkg/metrics/sampler.go | 424 + .../github.com/cilium/cilium/pkg/netns/doc.go | 6 + .../cilium/cilium/pkg/netns/netns_linux.go | 245 + .../cilium/cilium/pkg/netns/netns_other.go | 32 + .../cilium/pkg/node/addressing/addresstype.go | 49 +- .../cilium/cilium/pkg/option/.gitignore | 1 + .../cilium/cilium/pkg/option/config.go | 2077 +- .../cilium/cilium/pkg/option/daemon.go | 5 +- .../cilium/cilium/pkg/option/endpoint.go | 1 + .../cilium/cilium/pkg/option/features.go | 19 + .../cilium/cilium/pkg/option/option.go | 148 +- .../cilium/pkg/option/runtime_options.go | 7 + .../cilium/cilium/pkg/policy/api/cidr.go | 175 +- .../cilium/cilium/pkg/policy/api/egress.go | 103 +- .../cilium/cilium/pkg/policy/api/entity.go | 29 +- .../cilium/cilium/pkg/policy/api/fqdn.go | 18 + .../cilium/cilium/pkg/policy/api/groups.go | 20 +- .../cilium/cilium/pkg/policy/api/http.go | 7 +- .../cilium/cilium/pkg/policy/api/icmp.go | 141 +- .../cilium/cilium/pkg/policy/api/ingress.go | 133 +- .../cilium/cilium/pkg/policy/api/l4.go | 43 +- .../cilium/cilium/pkg/policy/api/rule.go | 113 +- .../cilium/pkg/policy/api/rule_validation.go | 524 +- .../cilium/cilium/pkg/policy/api/selector.go | 11 +- .../cilium/cilium/pkg/policy/api/service.go | 9 +- .../cilium/cilium/pkg/policy/api/utils.go | 31 +- .../pkg/policy/api/zz_generated.deepcopy.go | 107 +- .../pkg/policy/api/zz_generated.deepequal.go | 150 +- .../cilium/cilium/pkg/promise/promise.go | 25 +- .../cilium/cilium/pkg/resiliency/error.go | 14 + .../cilium/cilium/pkg/resiliency/errorset.go | 63 + .../cilium/cilium/pkg/resiliency/helpers.go | 32 + .../cilium/cilium/pkg/resiliency/retry.go | 13 + .../cilium/cilium/pkg/safetime/safetime.go | 2 +- .../cilium/cilium/pkg/slices/slices.go | 50 +- .../cilium/cilium/pkg/source/source.go | 144 + .../cilium/cilium/pkg/spanstat/spanstat.go | 3 +- .../github.com/cilium/cilium/pkg/time/time.go | 131 + .../cilium/cilium/pkg/u8proto/u8proto.go | 67 + .../cilium/cilium/pkg/version/version_unix.go | 32 +- .../cilium/cilium/pkg/versioncheck/check.go | 4 +- vendor/github.com/cilium/ebpf/.clang-format | 25 + vendor/github.com/cilium/ebpf/.gitattributes | 1 + vendor/github.com/cilium/ebpf/.gitignore | 14 + vendor/github.com/cilium/ebpf/.golangci.yaml | 19 + vendor/github.com/cilium/ebpf/.vimto.toml | 12 + vendor/github.com/cilium/ebpf/CODEOWNERS | 13 + .../github.com/cilium/ebpf/CODE_OF_CONDUCT.md | 46 + vendor/github.com/cilium/ebpf/CONTRIBUTING.md | 5 + vendor/github.com/cilium/ebpf/LICENSE | 23 + vendor/github.com/cilium/ebpf/MAINTAINERS.md | 3 + vendor/github.com/cilium/ebpf/Makefile | 114 + vendor/github.com/cilium/ebpf/README.md | 73 + vendor/github.com/cilium/ebpf/asm/alu.go | 180 + .../github.com/cilium/ebpf/asm/alu_string.go | 117 + vendor/github.com/cilium/ebpf/asm/doc.go | 2 + vendor/github.com/cilium/ebpf/asm/func.go | 250 + .../github.com/cilium/ebpf/asm/func_string.go | 235 + .../github.com/cilium/ebpf/asm/instruction.go | 953 + vendor/github.com/cilium/ebpf/asm/jump.go | 135 + .../github.com/cilium/ebpf/asm/jump_string.go | 53 + .../github.com/cilium/ebpf/asm/load_store.go | 225 + .../cilium/ebpf/asm/load_store_string.go | 84 + vendor/github.com/cilium/ebpf/asm/metadata.go | 80 + vendor/github.com/cilium/ebpf/asm/opcode.go | 303 + .../cilium/ebpf/asm/opcode_string.go | 30 + vendor/github.com/cilium/ebpf/asm/register.go | 51 + .../cilium/ebpf/attachtype_string.go | 79 + vendor/github.com/cilium/ebpf/btf/btf.go | 713 + .../github.com/cilium/ebpf/btf/btf_types.go | 520 + .../cilium/ebpf/btf/btf_types_string.go | 80 + vendor/github.com/cilium/ebpf/btf/core.go | 1261 + vendor/github.com/cilium/ebpf/btf/doc.go | 5 + vendor/github.com/cilium/ebpf/btf/ext_info.go | 835 + vendor/github.com/cilium/ebpf/btf/feature.go | 158 + vendor/github.com/cilium/ebpf/btf/format.go | 353 + vendor/github.com/cilium/ebpf/btf/handle.go | 317 + vendor/github.com/cilium/ebpf/btf/kernel.go | 157 + vendor/github.com/cilium/ebpf/btf/marshal.go | 654 + vendor/github.com/cilium/ebpf/btf/strings.go | 198 + .../github.com/cilium/ebpf/btf/traversal.go | 159 + vendor/github.com/cilium/ebpf/btf/types.go | 1417 + .../github.com/cilium/ebpf/btf/workarounds.go | 26 + vendor/github.com/cilium/ebpf/collection.go | 1036 + vendor/github.com/cilium/ebpf/cpu.go | 66 + vendor/github.com/cilium/ebpf/doc.go | 25 + vendor/github.com/cilium/ebpf/elf_reader.go | 1457 + vendor/github.com/cilium/ebpf/elf_sections.go | 109 + vendor/github.com/cilium/ebpf/features/doc.go | 19 + vendor/github.com/cilium/ebpf/features/map.go | 321 + .../github.com/cilium/ebpf/features/misc.go | 135 + .../github.com/cilium/ebpf/features/prog.go | 300 + .../cilium/ebpf/features/version.go | 18 + vendor/github.com/cilium/ebpf/info.go | 795 + .../github.com/cilium/ebpf/internal/buffer.go | 31 + .../github.com/cilium/ebpf/internal/deque.go | 91 + vendor/github.com/cilium/ebpf/internal/elf.go | 102 + .../cilium/ebpf/internal/endian_be.go | 9 + .../cilium/ebpf/internal/endian_le.go | 9 + .../github.com/cilium/ebpf/internal/errors.go | 179 + .../cilium/ebpf/internal/feature.go | 227 + vendor/github.com/cilium/ebpf/internal/io.go | 128 + .../cilium/ebpf/internal/kallsyms/cache.go | 20 + .../cilium/ebpf/internal/kallsyms/kallsyms.go | 277 + .../cilium/ebpf/internal/kallsyms/reader.go | 118 + .../cilium/ebpf/internal/kconfig/kconfig.go | 274 + .../cilium/ebpf/internal/linux/auxv.go | 60 + .../cilium/ebpf/internal/linux/doc.go | 2 + .../cilium/ebpf/internal/linux/kconfig.go | 31 + .../cilium/ebpf/internal/linux/platform.go | 43 + .../cilium/ebpf/internal/linux/statfs.go | 23 + .../cilium/ebpf/internal/linux/vdso.go | 144 + .../cilium/ebpf/internal/linux/version.go | 34 + .../github.com/cilium/ebpf/internal/math.go | 33 + .../github.com/cilium/ebpf/internal/output.go | 97 + .../github.com/cilium/ebpf/internal/prog.go | 11 + .../cilium/ebpf/internal/sys/doc.go | 6 + .../github.com/cilium/ebpf/internal/sys/fd.go | 165 + .../cilium/ebpf/internal/sys/pinning.go | 65 + .../cilium/ebpf/internal/sys/ptr.go | 52 + .../cilium/ebpf/internal/sys/ptr_32_be.go | 14 + .../cilium/ebpf/internal/sys/ptr_32_le.go | 14 + .../cilium/ebpf/internal/sys/ptr_64.go | 13 + .../cilium/ebpf/internal/sys/signals.go | 83 + .../cilium/ebpf/internal/sys/syscall.go | 216 + .../cilium/ebpf/internal/sys/types.go | 1556 ++ .../cilium/ebpf/internal/sysenc/buffer.go | 85 + .../cilium/ebpf/internal/sysenc/doc.go | 3 + .../cilium/ebpf/internal/sysenc/layout.go | 41 + .../cilium/ebpf/internal/sysenc/marshal.go | 177 + .../internal/testutils/fdtrace/fd_trace.go | 103 + .../ebpf/internal/testutils/fdtrace/main.go | 31 + .../cilium/ebpf/internal/tracefs/kprobe.go | 364 + .../ebpf/internal/tracefs/probetype_string.go | 24 + .../cilium/ebpf/internal/tracefs/uprobe.go | 16 + .../cilium/ebpf/internal/unix/doc.go | 11 + .../cilium/ebpf/internal/unix/types_linux.go | 217 + .../cilium/ebpf/internal/unix/types_other.go | 312 + .../cilium/ebpf/internal/version.go | 77 + vendor/github.com/cilium/ebpf/link/anchor.go | 137 + vendor/github.com/cilium/ebpf/link/cgroup.go | 208 + vendor/github.com/cilium/ebpf/link/doc.go | 2 + vendor/github.com/cilium/ebpf/link/iter.go | 84 + vendor/github.com/cilium/ebpf/link/kprobe.go | 369 + .../cilium/ebpf/link/kprobe_multi.go | 191 + vendor/github.com/cilium/ebpf/link/link.go | 537 + .../github.com/cilium/ebpf/link/netfilter.go | 90 + vendor/github.com/cilium/ebpf/link/netkit.go | 89 + vendor/github.com/cilium/ebpf/link/netns.go | 55 + .../github.com/cilium/ebpf/link/perf_event.go | 332 + vendor/github.com/cilium/ebpf/link/program.go | 107 + vendor/github.com/cilium/ebpf/link/query.go | 111 + .../cilium/ebpf/link/raw_tracepoint.go | 87 + .../cilium/ebpf/link/socket_filter.go | 40 + .../github.com/cilium/ebpf/link/syscalls.go | 200 + vendor/github.com/cilium/ebpf/link/tcx.go | 89 + .../github.com/cilium/ebpf/link/tracepoint.go | 70 + vendor/github.com/cilium/ebpf/link/tracing.go | 218 + vendor/github.com/cilium/ebpf/link/uprobe.go | 335 + .../cilium/ebpf/link/uprobe_multi.go | 219 + vendor/github.com/cilium/ebpf/link/xdp.go | 80 + vendor/github.com/cilium/ebpf/linker.go | 500 + vendor/github.com/cilium/ebpf/map.go | 1750 ++ vendor/github.com/cilium/ebpf/marshalers.go | 210 + vendor/github.com/cilium/ebpf/memory.go | 145 + vendor/github.com/cilium/ebpf/netlify.toml | 4 + vendor/github.com/cilium/ebpf/prog.go | 1177 + vendor/github.com/cilium/ebpf/syscalls.go | 355 + vendor/github.com/cilium/ebpf/types.go | 321 + vendor/github.com/cilium/ebpf/types_string.go | 123 + vendor/github.com/cilium/ebpf/variable.go | 230 + vendor/github.com/cilium/hive/.gitignore | 26 + vendor/github.com/cilium/hive/CODEOWNERS | 12 + .../cilium/hive}/LICENSE | 6 +- vendor/github.com/cilium/hive/Makefile | 15 + vendor/github.com/cilium/hive/README.md | 14 + .../cilium/{cilium/pkg => }/hive/cell/cell.go | 11 +- vendor/github.com/cilium/hive/cell/config.go | 199 + .../{cilium/pkg => }/hive/cell/decorator.go | 40 +- .../{cilium/pkg => }/hive/cell/group.go | 4 +- vendor/github.com/cilium/hive/cell/health.go | 58 + .../cilium/{cilium/pkg => }/hive/cell/info.go | 0 vendor/github.com/cilium/hive/cell/invoke.go | 111 + .../pkg/hive => hive/cell}/lifecycle.go | 129 +- vendor/github.com/cilium/hive/cell/module.go | 206 + .../{cilium/pkg => }/hive/cell/provide.go | 47 +- .../cilium/hive/cell/simple_health.go | 151 + .../cilium/{cilium/pkg => }/hive/command.go | 5 - .../cilium/{cilium/pkg => }/hive/doc.go | 0 .../cilium/{cilium/pkg => }/hive/hive.go | 275 +- .../cilium/hive/internal/map_string.go | 138 + .../cilium/hive/internal/reflect.go | 58 + vendor/github.com/cilium/hive/job/job.go | 265 + vendor/github.com/cilium/hive/job/metrics.go | 35 + vendor/github.com/cilium/hive/job/observer.go | 129 + vendor/github.com/cilium/hive/job/oneshot.go | 159 + vendor/github.com/cilium/hive/job/timer.go | 219 + vendor/github.com/cilium/hive/script.go | 166 + vendor/github.com/cilium/hive/script/LICENSE | 27 + .../github.com/cilium/hive/script/README.md | 4 + .../cilium/hive/script/README.md.original | 11 + vendor/github.com/cilium/hive/script/cmds.go | 1218 + .../cilium/hive/script/cmds_other.go | 11 + .../cilium/hive/script/cmds_posix.go | 16 + vendor/github.com/cilium/hive/script/conds.go | 198 + .../github.com/cilium/hive/script/engine.go | 997 + .../github.com/cilium/hive/script/errors.go | 64 + .../cilium/hive/script/internal/diff/diff.go | 261 + .../cilium/hive/script/makeraw_unix.go | 37 + .../cilium/hive/script/makeraw_unix_bsd.go | 16 + .../cilium/hive/script/makeraw_unix_other.go | 14 + .../cilium/hive/script/makeraw_unsupported.go | 16 + vendor/github.com/cilium/hive/script/state.go | 249 + .../{cilium/pkg => }/hive/shutdowner.go | 0 .../cilium/statedb}/LICENSE | 6 +- .../github.com/cilium/statedb/index/bool.go | 23 + vendor/github.com/cilium/statedb/index/int.go | 97 + .../github.com/cilium/statedb/index/keyset.go | 53 + vendor/github.com/cilium/statedb/index/map.go | 12 + .../github.com/cilium/statedb/index/netip.go | 43 + vendor/github.com/cilium/statedb/index/seq.go | 38 + vendor/github.com/cilium/statedb/index/set.go | 26 + .../github.com/cilium/statedb/index/string.go | 45 + .../github.com/cilium/statedb/part/cache.go | 37 + .../cilium/statedb/part/iterator.go | 188 + vendor/github.com/cilium/statedb/part/map.go | 270 + vendor/github.com/cilium/statedb/part/node.go | 510 + vendor/github.com/cilium/statedb/part/ops.go | 42 + .../cilium/statedb/part/registry.go | 74 + vendor/github.com/cilium/statedb/part/set.go | 252 + vendor/github.com/cilium/statedb/part/tree.go | 130 + vendor/github.com/cilium/statedb/part/txn.go | 485 + vendor/github.com/cilium/stream/CODEOWNERS | 12 + vendor/github.com/cilium/stream/LICENSE | 202 + vendor/github.com/cilium/stream/Makefile | 15 + vendor/github.com/cilium/stream/README.md | 164 + vendor/github.com/cilium/stream/observable.go | 59 + vendor/github.com/cilium/stream/operators.go | 383 + vendor/github.com/cilium/stream/sinks.go | 194 + vendor/github.com/cilium/stream/sources.go | 262 + .../containernetworking/cni/libcni/api.go | 242 +- .../containernetworking/cni/libcni/conf.go | 94 +- .../cni/pkg/invoke/delegate.go | 25 +- .../cni/pkg/invoke/exec.go | 16 +- .../cni/pkg/invoke/os_unix.go | 1 + .../cni/pkg/ns/ns_darwin.go} | 11 +- .../cni/pkg/ns/ns_linux.go | 50 + .../cni/pkg/ns/ns_windows.go} | 20 +- .../containernetworking/cni/pkg/skel/skel.go | 227 +- .../cni/pkg/types/100/types.go | 57 +- .../containernetworking/cni/pkg/types/args.go | 4 +- .../cni/pkg/types/create/create.go | 3 + .../cni/pkg/types/types.go | 139 +- .../cni/pkg/utils/utils.go | 6 +- .../cni/pkg/version/plugin.go | 24 + .../cni/pkg/version/version.go | 9 +- .../emicklei/go-restful/v3/CHANGES.md | 12 + .../emicklei/go-restful/v3/README.md | 4 +- .../emicklei/go-restful/v3/compress.go | 10 + .../go-restful/v3/entity_accessors.go | 7 + .../github.com/emicklei/go-restful/v3/json.go | 11 - .../emicklei/go-restful/v3/jsoniter.go | 12 - .../emicklei/go-restful/v3/jsr311.go | 2 +- .../json-patch/v5/internal/json/decode.go | 1385 + .../json-patch/v5/internal/json/encode.go | 1486 + .../json-patch/v5/internal/json/fold.go | 141 + .../json-patch/v5/internal/json/fuzz.go | 42 + .../json-patch/v5/internal/json/indent.go | 143 + .../json-patch/v5/internal/json/scanner.go | 610 + .../json-patch/v5/internal/json/stream.go | 495 + .../json-patch/v5/internal/json/tables.go | 218 + .../json-patch/v5/internal/json/tags.go | 38 + .../github.com/evanphx/json-patch/v5/merge.go | 110 +- .../github.com/evanphx/json-patch/v5/patch.go | 436 +- vendor/github.com/fatih/color/README.md | 37 +- vendor/github.com/fatih/color/color.go | 133 +- .../github.com/fatih/color/color_windows.go | 19 + vendor/github.com/fatih/color/doc.go | 137 +- .../github.com/felixge/httpsnoop/.travis.yml | 6 - vendor/github.com/felixge/httpsnoop/Makefile | 2 +- vendor/github.com/felixge/httpsnoop/README.md | 4 +- .../felixge/httpsnoop/capture_metrics.go | 2 +- .../httpsnoop/wrap_generated_gteq_1.8.go | 2 +- .../httpsnoop/wrap_generated_lt_1.8.go | 2 +- .../github.com/fsnotify/fsnotify/.cirrus.yml | 7 +- .../fsnotify/fsnotify/.gitattributes | 1 - .../github.com/fsnotify/fsnotify/.gitignore | 3 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 34 +- .../fsnotify/fsnotify/CONTRIBUTING.md | 120 +- .../fsnotify/fsnotify/backend_fen.go | 324 +- .../fsnotify/fsnotify/backend_inotify.go | 594 +- .../fsnotify/fsnotify/backend_kqueue.go | 747 +- .../fsnotify/fsnotify/backend_other.go | 204 +- .../fsnotify/fsnotify/backend_windows.go | 305 +- .../github.com/fsnotify/fsnotify/fsnotify.go | 368 +- .../fsnotify/fsnotify/internal/darwin.go | 39 + .../fsnotify/internal/debug_darwin.go | 57 + .../fsnotify/internal/debug_dragonfly.go | 33 + .../fsnotify/internal/debug_freebsd.go | 42 + .../fsnotify/internal/debug_kqueue.go | 32 + .../fsnotify/fsnotify/internal/debug_linux.go | 56 + .../fsnotify/internal/debug_netbsd.go | 25 + .../fsnotify/internal/debug_openbsd.go | 28 + .../fsnotify/internal/debug_solaris.go | 45 + .../fsnotify/internal/debug_windows.go | 40 + .../fsnotify/fsnotify/internal/freebsd.go | 31 + .../fsnotify/fsnotify/internal/internal.go | 2 + .../fsnotify/fsnotify/internal/unix.go | 31 + .../fsnotify/fsnotify/internal/unix2.go | 7 + .../fsnotify/fsnotify/internal/windows.go | 41 + vendor/github.com/fsnotify/fsnotify/mkdoc.zsh | 259 - .../fsnotify/fsnotify/system_bsd.go | 1 - .../fsnotify/fsnotify/system_darwin.go | 1 - .../cbor/v2}/.gitignore | 0 .../fxamacker/cbor/v2/.golangci.yml | 104 + .../fxamacker/cbor/v2/CODE_OF_CONDUCT.md | 133 + .../fxamacker/cbor/v2/CONTRIBUTING.md | 41 + .../cbor/v2}/LICENSE | 4 +- vendor/github.com/fxamacker/cbor/v2/README.md | 691 + .../github.com/fxamacker/cbor/v2/SECURITY.md | 7 + .../fxamacker/cbor/v2/bytestring.go | 63 + vendor/github.com/fxamacker/cbor/v2/cache.go | 363 + vendor/github.com/fxamacker/cbor/v2/common.go | 182 + vendor/github.com/fxamacker/cbor/v2/decode.go | 3187 +++ .../github.com/fxamacker/cbor/v2/diagnose.go | 724 + vendor/github.com/fxamacker/cbor/v2/doc.go | 129 + vendor/github.com/fxamacker/cbor/v2/encode.go | 1989 ++ .../fxamacker/cbor/v2/encode_map.go | 94 + .../fxamacker/cbor/v2/encode_map_go117.go | 60 + .../fxamacker/cbor/v2/simplevalue.go | 69 + vendor/github.com/fxamacker/cbor/v2/stream.go | 277 + .../fxamacker/cbor/v2/structfields.go | 260 + vendor/github.com/fxamacker/cbor/v2/tag.go | 299 + vendor/github.com/fxamacker/cbor/v2/valid.go | 394 + vendor/github.com/go-ole/go-ole/.travis.yml | 8 - vendor/github.com/go-ole/go-ole/ChangeLog.md | 49 - vendor/github.com/go-ole/go-ole/README.md | 46 - vendor/github.com/go-ole/go-ole/appveyor.yml | 54 - vendor/github.com/go-ole/go-ole/com.go | 344 - vendor/github.com/go-ole/go-ole/com_func.go | 174 - vendor/github.com/go-ole/go-ole/connect.go | 192 - vendor/github.com/go-ole/go-ole/constants.go | 153 - vendor/github.com/go-ole/go-ole/error.go | 51 - vendor/github.com/go-ole/go-ole/error_func.go | 8 - .../github.com/go-ole/go-ole/error_windows.go | 24 - vendor/github.com/go-ole/go-ole/guid.go | 284 - .../go-ole/go-ole/iconnectionpoint.go | 20 - .../go-ole/go-ole/iconnectionpoint_func.go | 21 - .../go-ole/go-ole/iconnectionpoint_windows.go | 43 - .../go-ole/iconnectionpointcontainer.go | 17 - .../go-ole/iconnectionpointcontainer_func.go | 11 - .../iconnectionpointcontainer_windows.go | 25 - vendor/github.com/go-ole/go-ole/idispatch.go | 94 - .../go-ole/go-ole/idispatch_func.go | 19 - .../go-ole/go-ole/idispatch_windows.go | 202 - .../github.com/go-ole/go-ole/ienumvariant.go | 19 - .../go-ole/go-ole/ienumvariant_func.go | 19 - .../go-ole/go-ole/ienumvariant_windows.go | 63 - .../github.com/go-ole/go-ole/iinspectable.go | 18 - .../go-ole/go-ole/iinspectable_func.go | 15 - .../go-ole/go-ole/iinspectable_windows.go | 72 - .../go-ole/go-ole/iprovideclassinfo.go | 21 - .../go-ole/go-ole/iprovideclassinfo_func.go | 7 - .../go-ole/iprovideclassinfo_windows.go | 21 - vendor/github.com/go-ole/go-ole/itypeinfo.go | 34 - .../go-ole/go-ole/itypeinfo_func.go | 7 - .../go-ole/go-ole/itypeinfo_windows.go | 21 - vendor/github.com/go-ole/go-ole/iunknown.go | 57 - .../github.com/go-ole/go-ole/iunknown_func.go | 19 - .../go-ole/go-ole/iunknown_windows.go | 58 - vendor/github.com/go-ole/go-ole/ole.go | 190 - .../go-ole/go-ole/oleutil/connection.go | 100 - .../go-ole/go-ole/oleutil/connection_func.go | 10 - .../go-ole/oleutil/connection_windows.go | 58 - .../go-ole/go-ole/oleutil/go-get.go | 6 - .../go-ole/go-ole/oleutil/oleutil.go | 127 - vendor/github.com/go-ole/go-ole/safearray.go | 27 - .../go-ole/go-ole/safearray_func.go | 211 - .../go-ole/go-ole/safearray_windows.go | 337 - .../go-ole/go-ole/safearrayconversion.go | 140 - .../go-ole/go-ole/safearrayslices.go | 33 - vendor/github.com/go-ole/go-ole/utility.go | 101 - vendor/github.com/go-ole/go-ole/variables.go | 15 - vendor/github.com/go-ole/go-ole/variant.go | 105 - .../github.com/go-ole/go-ole/variant_386.go | 11 - .../github.com/go-ole/go-ole/variant_amd64.go | 12 - .../github.com/go-ole/go-ole/variant_arm.go | 11 - .../github.com/go-ole/go-ole/variant_arm64.go | 13 - .../go-ole/go-ole/variant_date_386.go | 22 - .../go-ole/go-ole/variant_date_amd64.go | 20 - .../go-ole/go-ole/variant_date_arm.go | 22 - .../go-ole/go-ole/variant_date_arm64.go | 23 - .../go-ole/go-ole/variant_ppc64le.go | 12 - .../github.com/go-ole/go-ole/variant_s390x.go | 12 - vendor/github.com/go-ole/go-ole/vt_string.go | 58 - vendor/github.com/go-ole/go-ole/winrt.go | 99 - vendor/github.com/go-ole/go-ole/winrt_doc.go | 36 - .../go-openapi/analysis/.golangci.yml | 53 +- .../github.com/go-openapi/analysis/README.md | 10 +- .../go-openapi/analysis/appveyor.yml | 32 - vendor/github.com/go-openapi/analysis/doc.go | 10 +- .../github.com/go-openapi/analysis/flatten.go | 64 +- .../go-openapi/analysis/flatten_name.go | 39 +- .../go-openapi/analysis/flatten_options.go | 1 + .../analysis/internal/debug/debug.go | 4 +- .../internal/flatten/replace/replace.go | 42 +- .../analysis/internal/flatten/sortref/keys.go | 2 +- .../github.com/go-openapi/analysis/mixin.go | 16 +- .../github.com/go-openapi/analysis/schema.go | 12 +- .../github.com/go-openapi/loads/.golangci.yml | 49 +- vendor/github.com/go-openapi/loads/README.md | 2 +- vendor/github.com/go-openapi/loads/doc.go | 9 +- vendor/github.com/go-openapi/loads/loaders.go | 9 +- vendor/github.com/go-openapi/loads/spec.go | 35 +- .../go-openapi/runtime/bytestream.go | 131 +- .../go-openapi/runtime/client/request.go | 4 +- .../go-openapi/runtime/client/runtime.go | 56 +- vendor/github.com/go-openapi/runtime/csv.go | 327 +- .../go-openapi/runtime/csv_options.go | 121 + .../go-openapi/runtime/logger/standard.go | 2 + .../go-openapi/runtime/middleware/context.go | 157 +- .../runtime/middleware/denco/router.go | 30 +- .../go-openapi/runtime/middleware/go18.go | 10 - .../go-openapi/runtime/middleware/pre_go18.go | 9 - .../go-openapi/runtime/middleware/rapidoc.go | 70 +- .../go-openapi/runtime/middleware/redoc.go | 69 +- .../go-openapi/runtime/middleware/request.go | 21 +- .../go-openapi/runtime/middleware/router.go | 103 +- .../go-openapi/runtime/middleware/spec.go | 77 +- .../runtime/middleware/swaggerui.go | 89 +- .../runtime/middleware/swaggerui_oauth2.go | 31 +- .../runtime/middleware/ui_defaults.go | 8 - .../runtime/middleware/ui_options.go | 173 + .../runtime/middleware/validation.go | 12 +- .../go-openapi/strfmt/.golangci.yml | 92 +- vendor/github.com/go-openapi/strfmt/README.md | 5 +- vendor/github.com/go-openapi/strfmt/bson.go | 6 +- .../github.com/go-openapi/strfmt/default.go | 52 +- vendor/github.com/go-openapi/strfmt/format.go | 5 +- vendor/github.com/go-openapi/strfmt/time.go | 4 +- .../go-openapi/validate/.golangci.yml | 56 +- .../go-openapi/validate/BENCHMARK.md | 31 + .../github.com/go-openapi/validate/README.md | 8 +- .../go-openapi/validate/default_validator.go | 108 +- .../go-openapi/validate/example_validator.go | 66 +- .../github.com/go-openapi/validate/formats.go | 78 +- .../github.com/go-openapi/validate/helpers.go | 21 +- .../go-openapi/validate/object_validator.go | 448 +- .../github.com/go-openapi/validate/options.go | 1 + .../github.com/go-openapi/validate/pools.go | 366 + .../go-openapi/validate/pools_debug.go | 1012 + .../github.com/go-openapi/validate/result.go | 131 +- .../github.com/go-openapi/validate/schema.go | 258 +- .../go-openapi/validate/schema_option.go | 31 +- .../go-openapi/validate/schema_props.go | 412 +- .../go-openapi/validate/slice_validator.go | 57 +- vendor/github.com/go-openapi/validate/spec.go | 133 +- .../go-openapi/validate/spec_messages.go | 5 + vendor/github.com/go-openapi/validate/type.go | 72 +- .../go-openapi/validate/validator.go | 934 +- .../github.com/go-openapi/validate/values.go | 8 +- vendor/github.com/gobuffalo/flect/README.md | 51 +- .../github.com/gobuffalo/flect/SHOULDERS.md | 10 +- vendor/github.com/gobuffalo/flect/humanize.go | 13 +- .../gobuffalo/flect/plural_rules.go | 607 +- .../github.com/gobuffalo/flect/pluralize.go | 12 +- vendor/github.com/gobuffalo/flect/rule.go | 7 + .../gobuffalo/flect/singular_rules.go | 19 +- .../github.com/gobuffalo/flect/singularize.go | 18 +- vendor/github.com/gobuffalo/flect/titleize.go | 14 +- vendor/github.com/gobuffalo/flect/version.go | 2 +- .../numcpus => google/btree}/LICENSE | 0 vendor/github.com/google/btree/README.md | 10 + vendor/github.com/google/btree/btree.go | 893 + .../github.com/google/btree/btree_generic.go | 1083 + .../google/go-cmp/cmp/cmpopts/equate.go | 185 + .../google/go-cmp/cmp/cmpopts/ignore.go | 206 + .../google/go-cmp/cmp/cmpopts/sort.go | 147 + .../go-cmp/cmp/cmpopts/struct_filter.go | 189 + .../google/go-cmp/cmp/cmpopts/xform.go | 36 + .../github.com/gopacket/gopacket/.gitignore | 39 + vendor/github.com/gopacket/gopacket/AUTHORS | 54 + .../gopacket/gopacket/CONTRIBUTING.md | 215 + vendor/github.com/gopacket/gopacket/LICENSE | 28 + vendor/github.com/gopacket/gopacket/README.md | 17 + .../github.com/gopacket/gopacket/SECURITY.md | 29 + vendor/github.com/gopacket/gopacket/base.go | 185 + .../github.com/gopacket/gopacket/checksum.go | 58 + vendor/github.com/gopacket/gopacket/decode.go | 158 + vendor/github.com/gopacket/gopacket/doc.go | 425 + vendor/github.com/gopacket/gopacket/flows.go | 236 + vendor/github.com/gopacket/gopacket/gc | 288 + .../gopacket/gopacket/layerclass.go | 107 + .../gopacket/gopacket/layers/.lint_blacklist | 40 + .../gopacket/gopacket/layers/arp.go | 118 + .../gopacket/gopacket/layers/asf.go | 166 + .../gopacket/layers/asf_presencepong.go | 194 + .../gopacket/gopacket/layers/base.go | 52 + .../gopacket/gopacket/layers/bfd.go | 484 + .../gopacket/gopacket/layers/bitfield.go | 18 + .../gopacket/gopacket/layers/cdp.go | 659 + .../gopacket/gopacket/layers/ctp.go | 110 + .../gopacket/gopacket/layers/dhcpv4.go | 595 + .../gopacket/gopacket/layers/dhcpv6.go | 360 + .../gopacket/layers/dhcpv6_options.go | 622 + .../gopacket/gopacket/layers/dns.go | 1463 + .../gopacket/gopacket/layers/doc.go | 61 + .../gopacket/gopacket/layers/dot11.go | 2263 ++ .../gopacket/gopacket/layers/dot1q.go | 76 + .../gopacket/gopacket/layers/eap.go | 115 + .../gopacket/gopacket/layers/eapol.go | 303 + .../gopacket/gopacket/layers/endpoints.go | 98 + .../gopacket/gopacket/layers/enums.go | 450 + .../gopacket/layers/enums_generated.go | 401 + .../gopacket/gopacket/layers/erspan2.go | 86 + .../gopacket/gopacket/layers/etherip.go | 46 + .../gopacket/gopacket/layers/ethernet.go | 124 + .../gopacket/gopacket/layers/fddi.go | 42 + .../gopacket/gopacket/layers/fuzz_layer.go | 39 + .../gopacket/gopacket/layers/gen_linted.sh | 3 + .../gopacket/gopacket/layers/geneve.go | 194 + .../gopacket/gopacket/layers/gre.go | 214 + .../gopacket/gopacket/layers/gtp.go | 197 + .../gopacket/gopacket/layers/iana_ports.go | 22944 ++++++++++++++++ .../gopacket/gopacket/layers/icmp4.go | 281 + .../gopacket/gopacket/layers/icmp6.go | 282 + .../gopacket/gopacket/layers/icmp6msg.go | 578 + .../gopacket/gopacket/layers/igmp.go | 357 + .../gopacket/gopacket/layers/ip4.go | 324 + .../gopacket/gopacket/layers/ip6.go | 739 + .../gopacket/gopacket/layers/ipsec.go | 78 + .../gopacket/gopacket/layers/layertypes.go | 225 + .../gopacket/gopacket/layers/lcm.go | 219 + .../gopacket/gopacket/layers/linux_sll.go | 98 + .../gopacket/gopacket/layers/linux_sll2.go | 176 + .../gopacket/gopacket/layers/llc.go | 194 + .../gopacket/gopacket/layers/lldp.go | 1603 ++ .../gopacket/gopacket/layers/loopback.go | 80 + .../gopacket/gopacket/layers/mdp.go | 162 + .../gopacket/gopacket/layers/mldv1.go | 182 + .../gopacket/gopacket/layers/mldv2.go | 619 + .../gopacket/gopacket/layers/modbustcp.go | 155 + .../gopacket/gopacket/layers/mpls.go | 90 + .../gopacket/gopacket/layers/multipathtcp.go | 161 + .../gopacket/gopacket/layers/ndp.go | 612 + .../gopacket/gopacket/layers/ntp.go | 415 + .../gopacket/gopacket/layers/ospf.go | 716 + .../gopacket/gopacket/layers/pflog.go | 84 + .../gopacket/gopacket/layers/ports.go | 187 + .../gopacket/gopacket/layers/ppp.go | 89 + .../gopacket/gopacket/layers/pppoe.go | 61 + .../gopacket/gopacket/layers/prism.go | 146 + .../gopacket/gopacket/layers/radiotap.go | 1678 ++ .../gopacket/gopacket/layers/radius.go | 560 + .../gopacket/gopacket/layers/rmcp.go | 170 + .../gopacket/gopacket/layers/rudp.go | 94 + .../gopacket/gopacket/layers/sctp.go | 770 + .../gopacket/gopacket/layers/sflow.go | 2599 ++ .../gopacket/gopacket/layers/sip.go | 592 + .../gopacket/gopacket/layers/stp.go | 150 + .../gopacket/gopacket/layers/tcp.go | 637 + .../gopacket/gopacket/layers/tcpip.go | 85 + .../gopacket/gopacket/layers/test_creator.py | 104 + .../gopacket/gopacket/layers/tls.go | 283 + .../gopacket/gopacket/layers/tls_alert.go | 165 + .../gopacket/gopacket/layers/tls_appdata.go | 34 + .../gopacket/layers/tls_cipherspec.go | 64 + .../gopacket/gopacket/layers/tls_handshake.go | 208 + .../gopacket/gopacket/layers/udp.go | 158 + .../gopacket/gopacket/layers/udplite.go | 45 + .../gopacket/gopacket/layers/usb.go | 293 + .../gopacket/gopacket/layers/vrrp.go | 156 + .../gopacket/gopacket/layers/vxlan.go | 123 + .../gopacket/gopacket/layers_decoder.go | 101 + .../github.com/gopacket/gopacket/layertype.go | 111 + vendor/github.com/gopacket/gopacket/packet.go | 1029 + vendor/github.com/gopacket/gopacket/parser.go | 351 + .../github.com/gopacket/gopacket/runtests.sh | 10 + vendor/github.com/gopacket/gopacket/time.go | 72 + vendor/github.com/gopacket/gopacket/writer.go | 233 + vendor/github.com/hashicorp/hcl/decoder.go | 46 +- .../github.com/hashicorp/hcl/hcl/ast/ast.go | 15 +- .../klauspost/compress/.gitattributes | 2 + .../compress}/.gitignore | 12 +- .../klauspost/compress/.goreleaser.yml | 123 + .../github.com/klauspost/compress/README.md | 700 + .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/compressible.go | 85 + .../klauspost/compress/flate/matchlen_amd64.s | 10 +- .../klauspost/compress/fse/README.md | 79 + .../klauspost/compress/fse/bitreader.go | 122 + .../klauspost/compress/fse/bitwriter.go | 167 + .../klauspost/compress/fse/bytereader.go | 47 + .../klauspost/compress/fse/compress.go | 683 + .../klauspost/compress/fse/decompress.go | 376 + .../github.com/klauspost/compress/fse/fse.go | 144 + vendor/github.com/klauspost/compress/gen.sh | 4 + .../klauspost/compress/huff0/.gitignore | 1 + .../klauspost/compress/huff0/README.md | 89 + .../klauspost/compress/huff0/bitreader.go | 229 + .../klauspost/compress/huff0/bitwriter.go | 102 + .../klauspost/compress/huff0/compress.go | 742 + .../klauspost/compress/huff0/decompress.go | 1167 + .../compress/huff0/decompress_amd64.go | 226 + .../compress/huff0/decompress_amd64.s | 830 + .../compress/huff0/decompress_generic.go | 299 + .../klauspost/compress/huff0/huff0.go | 337 + .../compress/internal/cpuinfo/cpuinfo.go | 34 + .../internal/cpuinfo/cpuinfo_amd64.go | 11 + .../compress/internal/cpuinfo/cpuinfo_amd64.s | 36 + .../compress/internal/snapref/LICENSE | 27 + .../compress/internal/snapref/decode.go | 264 + .../compress/internal/snapref/decode_other.go | 113 + .../compress/internal/snapref/encode.go | 289 + .../compress/internal/snapref/encode_other.go | 250 + .../compress/internal/snapref/snappy.go | 98 + vendor/github.com/klauspost/compress/s2sx.mod | 4 + vendor/github.com/klauspost/compress/s2sx.sum | 0 .../klauspost/compress/zstd/README.md | 441 + .../klauspost/compress/zstd/bitreader.go | 136 + .../klauspost/compress/zstd/bitwriter.go | 112 + .../klauspost/compress/zstd/blockdec.go | 729 + .../klauspost/compress/zstd/blockenc.go | 909 + .../compress/zstd/blocktype_string.go | 85 + .../klauspost/compress/zstd/bytebuf.go | 131 + .../klauspost/compress/zstd/bytereader.go | 82 + .../klauspost/compress/zstd/decodeheader.go | 261 + .../klauspost/compress/zstd/decoder.go | 948 + .../compress/zstd/decoder_options.go | 169 + .../klauspost/compress/zstd/dict.go | 565 + .../klauspost/compress/zstd/enc_base.go | 173 + .../klauspost/compress/zstd/enc_best.go | 560 + .../klauspost/compress/zstd/enc_better.go | 1252 + .../klauspost/compress/zstd/enc_dfast.go | 1123 + .../klauspost/compress/zstd/enc_fast.go | 891 + .../klauspost/compress/zstd/encoder.go | 619 + .../compress/zstd/encoder_options.go | 339 + .../klauspost/compress/zstd/framedec.go | 413 + .../klauspost/compress/zstd/frameenc.go | 137 + .../klauspost/compress/zstd/fse_decoder.go | 307 + .../compress/zstd/fse_decoder_amd64.go | 65 + .../compress/zstd/fse_decoder_amd64.s | 126 + .../compress/zstd/fse_decoder_generic.go | 73 + .../klauspost/compress/zstd/fse_encoder.go | 701 + .../klauspost/compress/zstd/fse_predefined.go | 158 + .../klauspost/compress/zstd/hash.go | 35 + .../klauspost/compress/zstd/history.go | 116 + .../compress/zstd/internal/xxhash/LICENSE.txt | 22 + .../compress/zstd/internal/xxhash/README.md | 71 + .../compress/zstd/internal/xxhash/xxhash.go | 230 + .../zstd/internal/xxhash/xxhash_amd64.s | 210 + .../zstd/internal/xxhash/xxhash_arm64.s | 184 + .../zstd/internal/xxhash/xxhash_asm.go | 16 + .../zstd/internal/xxhash/xxhash_other.go | 76 + .../zstd/internal/xxhash/xxhash_safe.go | 11 + .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 66 + .../compress/zstd/matchlen_generic.go | 33 + .../klauspost/compress/zstd/seqdec.go | 503 + .../klauspost/compress/zstd/seqdec_amd64.go | 394 + .../klauspost/compress/zstd/seqdec_amd64.s | 4151 +++ .../klauspost/compress/zstd/seqdec_generic.go | 237 + .../klauspost/compress/zstd/seqenc.go | 114 + .../klauspost/compress/zstd/snappy.go | 434 + .../github.com/klauspost/compress/zstd/zip.go | 141 + .../klauspost/compress/zstd/zstd.go | 121 + vendor/github.com/lufia/plan9stats/LICENSE | 29 - vendor/github.com/lufia/plan9stats/README.md | 2 - vendor/github.com/lufia/plan9stats/cpu.go | 288 - vendor/github.com/lufia/plan9stats/doc.go | 2 - vendor/github.com/lufia/plan9stats/host.go | 303 - vendor/github.com/lufia/plan9stats/int.go | 31 - vendor/github.com/lufia/plan9stats/opts.go | 21 - vendor/github.com/lufia/plan9stats/stats.go | 88 - .../mackerelio/go-osstat/LICENSE.txt | 178 + .../go-osstat/memory/memory_darwin.go | 128 + .../go-osstat/memory/memory_freebsd.go | 119 + .../go-osstat/memory/memory_linux.go | 85 + .../go-osstat/memory/memory_other.go | 19 + .../go-osstat/memory/memory_windows.go | 54 + .../github.com/mattn/go-isatty/isatty_bsd.go | 5 +- .../mattn/go-isatty/isatty_others.go | 5 +- .../mattn/go-isatty/isatty_tcgets.go | 3 +- .../golang_protobuf_extensions/NOTICE | 1 - .../pbutil/.gitignore | 1 - .../pbutil/Makefile | 7 - .../pbutil/decode.go | 75 - .../pbutil/encode.go | 46 - .../mitchellh/go-wordwrap/LICENSE.md | 21 + .../mitchellh/go-wordwrap/README.md | 39 + .../mitchellh/go-wordwrap/wordwrap.go | 83 + .../pelletier/go-toml/v2/.gitignore | 3 +- .../pelletier/go-toml/v2/.goreleaser.yaml | 3 + .../pelletier/go-toml/v2/CONTRIBUTING.md | 31 +- .../github.com/pelletier/go-toml/v2/LICENSE | 3 +- .../github.com/pelletier/go-toml/v2/README.md | 161 +- .../pelletier/go-toml/v2/SECURITY.md | 3 - vendor/github.com/pelletier/go-toml/v2/ci.sh | 23 +- .../github.com/pelletier/go-toml/v2/decode.go | 2 +- .../go-toml/v2/internal/tracker/seen.go | 74 +- .../pelletier/go-toml/v2/marshaler.go | 79 +- .../pelletier/go-toml/v2/unmarshaler.go | 73 +- .../pelletier/go-toml/v2/unstable/parser.go | 6 + .../go-toml/v2/unstable/unmarshaler.go | 7 + vendor/github.com/petermattis/goid/README.md | 3 +- .../goid/{goid_go1.5_arm.go => goid_go1.5.go} | 8 +- .../goid/{goid_go1.5_arm64.s => goid_go1.5.s} | 20 +- .../petermattis/goid/goid_go1.5_amd64.s | 31 - .../petermattis/goid/goid_go1.5_arm.s | 28 - .../github.com/petermattis/goid/goid_slow.go | 4 +- .../petermattis/goid/runtime_gccgo_go1.8.go | 2 +- .../petermattis/goid/runtime_go1.23.go | 38 + .../petermattis/goid/runtime_go1.9.go | 4 +- vendor/github.com/pmezard/go-difflib/LICENSE | 27 - .../pmezard/go-difflib/difflib/difflib.go | 772 - .../power-devops/perfstat/c_helpers.c | 159 - .../power-devops/perfstat/c_helpers.h | 58 - .../power-devops/perfstat/config.go | 18 - .../power-devops/perfstat/cpustat.go | 98 - .../power-devops/perfstat/diskstat.go | 137 - .../github.com/power-devops/perfstat/doc.go | 315 - .../power-devops/perfstat/fsstat.go | 31 - .../power-devops/perfstat/helpers.go | 764 - .../power-devops/perfstat/lparstat.go | 26 - .../power-devops/perfstat/lvmstat.go | 72 - .../power-devops/perfstat/memstat.go | 84 - .../power-devops/perfstat/netstat.go | 117 - .../power-devops/perfstat/procstat.go | 75 - .../power-devops/perfstat/sysconf.go | 195 - .../power-devops/perfstat/systemcfg.go | 635 - .../power-devops/perfstat/types_cpu.go | 186 - .../power-devops/perfstat/types_disk.go | 176 - .../power-devops/perfstat/types_fs.go | 195 - .../power-devops/perfstat/types_lpar.go | 68 - .../power-devops/perfstat/types_lvm.go | 31 - .../power-devops/perfstat/types_memory.go | 101 - .../power-devops/perfstat/types_network.go | 163 - .../power-devops/perfstat/types_process.go | 43 - .../power-devops/perfstat/uptime.go | 35 - .../prometheus/client_golang/NOTICE | 5 - .../internal/github.com/golang/gddo/LICENSE | 27 + .../golang/gddo/httputil/header/header.go | 145 + .../golang/gddo/httputil/negotiate.go | 36 + .../collectors/go_collector_latest.go | 4 +- .../client_golang/prometheus/go_collector.go | 55 +- .../prometheus/go_collector_latest.go | 19 +- .../client_golang/prometheus/histogram.go | 320 +- .../internal/go_collector_options.go | 2 + .../client_golang/prometheus/labels.go | 2 + .../client_golang/prometheus/metric.go | 2 +- .../prometheus/process_collector.go | 29 +- .../prometheus/process_collector_other.go | 18 +- .../prometheus/process_collector_wasip1.go | 26 + .../prometheus/promhttp/delegator.go | 6 + .../client_golang/prometheus/promhttp/http.go | 113 +- .../client_golang/prometheus/registry.go | 17 +- .../client_golang/prometheus/summary.go | 42 + .../client_golang/prometheus/vec.go | 2 +- .../prometheus/client_model/go/metrics.pb.go | 195 +- .../prometheus/common/expfmt/decode.go | 17 +- .../prometheus/common/expfmt/encode.go | 87 +- .../prometheus/common/expfmt/expfmt.go | 184 +- .../common/expfmt/openmetrics_create.go | 283 +- .../prometheus/common/expfmt/text_create.go | 118 +- .../prometheus/common/expfmt/text_parse.go | 170 +- .../bitbucket.org/ww/goautoneg/README.txt | 67 - .../bitbucket.org/ww/goautoneg/autoneg.go | 160 - .../prometheus/common/model/alert.go | 38 +- .../prometheus/common/model/labels.go | 25 +- .../prometheus/common/model/labelset.go | 11 - .../common/model/labelset_string.go | 43 + .../prometheus/common/model/metadata.go | 28 + .../prometheus/common/model/metric.go | 365 +- .../prometheus/common/model/signature.go | 6 +- .../prometheus/common/model/silence.go | 19 +- .../prometheus/common/model/value.go | 16 +- .../prometheus/common/model/value_float.go | 17 +- .../common/model/value_histogram.go | 7 +- .../prometheus/procfs/.golangci.yml | 7 + .../prometheus/procfs/MAINTAINERS.md | 3 +- .../prometheus/procfs/Makefile.common | 26 +- vendor/github.com/prometheus/procfs/arp.go | 6 +- .../github.com/prometheus/procfs/buddyinfo.go | 6 +- .../github.com/prometheus/procfs/cpuinfo.go | 4 +- vendor/github.com/prometheus/procfs/crypto.go | 6 +- .../prometheus/procfs/fs_statfs_notype.go | 4 +- .../prometheus/procfs/fs_statfs_type.go | 4 +- .../github.com/prometheus/procfs/fscache.go | 4 +- vendor/github.com/prometheus/procfs/ipvs.go | 6 +- .../github.com/prometheus/procfs/loadavg.go | 2 +- vendor/github.com/prometheus/procfs/mdstat.go | 60 +- .../github.com/prometheus/procfs/meminfo.go | 220 +- .../github.com/prometheus/procfs/mountinfo.go | 2 +- .../prometheus/procfs/mountstats.go | 92 +- .../prometheus/procfs/net_conntrackstat.go | 4 +- .../prometheus/procfs/net_ip_socket.go | 46 +- .../prometheus/procfs/net_sockstat.go | 4 +- .../prometheus/procfs/net_softnet.go | 2 +- .../prometheus/procfs/net_tls_stat.go | 119 + .../github.com/prometheus/procfs/net_unix.go | 14 +- .../prometheus/procfs/net_wireless.go | 22 +- vendor/github.com/prometheus/procfs/proc.go | 8 +- .../prometheus/procfs/proc_fdinfo.go | 8 +- .../prometheus/procfs/proc_limits.go | 2 +- .../github.com/prometheus/procfs/proc_maps.go | 20 +- .../github.com/prometheus/procfs/proc_ns.go | 4 +- .../github.com/prometheus/procfs/proc_psi.go | 2 +- .../prometheus/procfs/proc_smaps.go | 2 +- .../github.com/prometheus/procfs/proc_stat.go | 7 + .../prometheus/procfs/proc_status.go | 50 +- .../github.com/prometheus/procfs/proc_sys.go | 2 +- .../github.com/prometheus/procfs/softirqs.go | 22 +- vendor/github.com/prometheus/procfs/stat.go | 22 +- vendor/github.com/prometheus/procfs/swaps.go | 6 +- vendor/github.com/prometheus/procfs/thread.go | 2 +- .../github.com/prometheus/procfs/zoneinfo.go | 4 +- .../sagikazarmark/locafero/.editorconfig | 21 + .../github.com/sagikazarmark/locafero/.envrc | 4 + .../sagikazarmark/locafero/.gitignore | 8 + .../sagikazarmark/locafero/.golangci.yaml | 27 + .../github.com/sagikazarmark/locafero/LICENSE | 19 + .../sagikazarmark/locafero/README.md | 37 + .../sagikazarmark/locafero/file_type.go | 28 + .../sagikazarmark/locafero/finder.go | 165 + .../sagikazarmark/locafero/flake.lock | 273 + .../sagikazarmark/locafero/flake.nix | 47 + .../sagikazarmark/locafero/helpers.go | 41 + .../sagikazarmark/locafero/justfile | 11 + .../slog-shim}/.editorconfig | 18 +- .../github.com/sagikazarmark/slog-shim/.envrc | 4 + .../sagikazarmark/slog-shim/.gitignore | 4 + .../sagikazarmark/slog-shim/LICENSE | 27 + .../sagikazarmark/slog-shim/README.md | 81 + .../sagikazarmark/slog-shim/attr.go | 74 + .../sagikazarmark/slog-shim/attr_120.go | 75 + .../sagikazarmark/slog-shim/flake.lock | 273 + .../sagikazarmark/slog-shim/flake.nix | 57 + .../sagikazarmark/slog-shim/handler.go | 45 + .../sagikazarmark/slog-shim/handler_120.go | 45 + .../sagikazarmark/slog-shim/json_handler.go | 23 + .../slog-shim/json_handler_120.go | 24 + .../sagikazarmark/slog-shim/level.go | 61 + .../sagikazarmark/slog-shim/level_120.go | 61 + .../sagikazarmark/slog-shim/logger.go | 98 + .../sagikazarmark/slog-shim/logger_120.go | 99 + .../sagikazarmark/slog-shim/record.go | 31 + .../sagikazarmark/slog-shim/record_120.go | 32 + .../sagikazarmark/slog-shim/text_handler.go | 23 + .../slog-shim/text_handler_120.go | 24 + .../sagikazarmark/slog-shim/value.go | 109 + .../sagikazarmark/slog-shim/value_120.go | 110 + .../sasha-s/go-deadlock/.travis.yml | 9 +- .../github.com/sasha-s/go-deadlock/Readme.md | 2 +- .../sasha-s/go-deadlock/deadlock.go | 122 +- vendor/github.com/sasha-s/go-deadlock/test.sh | 2 +- .../github.com/sasha-s/go-deadlock/trylock.go | 39 + vendor/github.com/shirou/gopsutil/v3/LICENSE | 61 - .../gopsutil/v3/internal/common/binary.go | 637 - .../gopsutil/v3/internal/common/common.go | 386 - .../v3/internal/common/common_darwin.go | 66 - .../v3/internal/common/common_freebsd.go | 82 - .../v3/internal/common/common_linux.go | 314 - .../v3/internal/common/common_openbsd.go | 66 - .../v3/internal/common/common_unix.go | 62 - .../v3/internal/common/common_windows.go | 304 - .../gopsutil/v3/internal/common/endian.go | 10 - .../gopsutil/v3/internal/common/sleep.go | 21 - .../gopsutil/v3/internal/common/warnings.go | 30 - .../github.com/shirou/gopsutil/v3/mem/mem.go | 118 - .../shirou/gopsutil/v3/mem/mem_aix.go | 16 - .../shirou/gopsutil/v3/mem/mem_aix_cgo.go | 51 - .../shirou/gopsutil/v3/mem/mem_aix_nocgo.go | 81 - .../shirou/gopsutil/v3/mem/mem_bsd.go | 87 - .../shirou/gopsutil/v3/mem/mem_darwin.go | 71 - .../shirou/gopsutil/v3/mem/mem_darwin_cgo.go | 58 - .../gopsutil/v3/mem/mem_darwin_nocgo.go | 89 - .../shirou/gopsutil/v3/mem/mem_fallback.go | 34 - .../shirou/gopsutil/v3/mem/mem_freebsd.go | 167 - .../shirou/gopsutil/v3/mem/mem_linux.go | 526 - .../shirou/gopsutil/v3/mem/mem_openbsd.go | 100 - .../shirou/gopsutil/v3/mem/mem_openbsd_386.go | 38 - .../gopsutil/v3/mem/mem_openbsd_amd64.go | 32 - .../shirou/gopsutil/v3/mem/mem_openbsd_arm.go | 38 - .../gopsutil/v3/mem/mem_openbsd_arm64.go | 38 - .../shirou/gopsutil/v3/mem/mem_plan9.go | 68 - .../shirou/gopsutil/v3/mem/mem_solaris.go | 213 - .../shirou/gopsutil/v3/mem/mem_windows.go | 166 - .../github.com/sourcegraph/conc/.golangci.yml | 11 + .../perfstat => sourcegraph/conc}/LICENSE | 4 +- vendor/github.com/sourcegraph/conc/README.md | 464 + .../internal/multierror/multierror_go119.go | 10 + .../internal/multierror/multierror_go120.go | 10 + .../github.com/sourcegraph/conc/iter/iter.go | 85 + .../github.com/sourcegraph/conc/iter/map.go | 65 + .../sourcegraph/conc/panics/panics.go | 102 + .../github.com/sourcegraph/conc/panics/try.go | 11 + .../github.com/sourcegraph/conc/waitgroup.go | 52 + vendor/github.com/spf13/cast/README.md | 4 +- vendor/github.com/spf13/cast/caste.go | 94 +- .../spf13/jwalterweatherman/README.md | 148 - .../jwalterweatherman/default_notepad.go | 111 - .../spf13/jwalterweatherman/log_counter.go | 46 - .../spf13/jwalterweatherman/notepad.go | 225 - vendor/github.com/spf13/pflag/flag.go | 29 +- vendor/github.com/spf13/pflag/ip.go | 3 + vendor/github.com/spf13/pflag/ipnet_slice.go | 147 + vendor/github.com/spf13/pflag/string_array.go | 4 - vendor/github.com/spf13/viper/.editorconfig | 3 + vendor/github.com/spf13/viper/.envrc | 4 + vendor/github.com/spf13/viper/.gitignore | 3 + vendor/github.com/spf13/viper/.golangci.yaml | 14 +- vendor/github.com/spf13/viper/.yamlignore | 2 + vendor/github.com/spf13/viper/.yamllint.yaml | 6 + vendor/github.com/spf13/viper/Makefile | 45 +- vendor/github.com/spf13/viper/README.md | 72 +- .../github.com/spf13/viper/TROUBLESHOOTING.md | 4 +- .../spf13/viper/experimental_logger.go | 11 - .../spf13/viper/{viper_go1_15.go => file.go} | 5 +- vendor/github.com/spf13/viper/file_finder.go | 38 + vendor/github.com/spf13/viper/flags.go | 4 +- vendor/github.com/spf13/viper/flake.lock | 273 + vendor/github.com/spf13/viper/flake.nix | 57 + vendor/github.com/spf13/viper/fs.go | 65 - .../spf13/viper/internal/encoding/decoder.go | 6 +- .../viper/internal/encoding/dotenv/codec.go | 6 +- .../internal/encoding/dotenv/map_utils.go | 18 +- .../spf13/viper/internal/encoding/encoder.go | 6 +- .../viper/internal/encoding/hcl/codec.go | 4 +- .../viper/internal/encoding/ini/codec.go | 6 +- .../viper/internal/encoding/ini/map_utils.go | 26 +- .../internal/encoding/javaproperties/codec.go | 6 +- .../encoding/javaproperties/map_utils.go | 26 +- .../viper/internal/encoding/json/codec.go | 4 +- .../viper/internal/encoding/toml/codec.go | 4 +- .../viper/internal/encoding/yaml/codec.go | 4 +- .../viper/internal/features/bind_struct.go | 5 + .../internal/features/bind_struct_default.go | 5 + vendor/github.com/spf13/viper/logger.go | 57 +- vendor/github.com/spf13/viper/util.go | 47 +- vendor/github.com/spf13/viper/viper.go | 343 +- vendor/github.com/spf13/viper/viper_go1_16.go | 32 - vendor/github.com/spf13/viper/watch.go | 12 - .../spf13/viper/watch_unsupported.go | 32 - .../github.com/subosito/gotenv/CHANGELOG.md | 37 + vendor/github.com/subosito/gotenv/gotenv.go | 50 +- .../tklauser/go-sysconf/.cirrus.yml | 23 - .../github.com/tklauser/go-sysconf/.gitignore | 1 - vendor/github.com/tklauser/go-sysconf/LICENSE | 29 - .../github.com/tklauser/go-sysconf/README.md | 46 - .../github.com/tklauser/go-sysconf/sysconf.go | 21 - .../tklauser/go-sysconf/sysconf_bsd.go | 38 - .../tklauser/go-sysconf/sysconf_darwin.go | 267 - .../tklauser/go-sysconf/sysconf_dragonfly.go | 220 - .../tklauser/go-sysconf/sysconf_freebsd.go | 226 - .../tklauser/go-sysconf/sysconf_generic.go | 46 - .../tklauser/go-sysconf/sysconf_linux.go | 345 - .../tklauser/go-sysconf/sysconf_netbsd.go | 250 - .../tklauser/go-sysconf/sysconf_openbsd.go | 271 - .../tklauser/go-sysconf/sysconf_posix.go | 83 - .../tklauser/go-sysconf/sysconf_solaris.go | 14 - .../go-sysconf/sysconf_unsupported.go | 17 - .../go-sysconf/zsysconf_defs_darwin.go | 254 - .../go-sysconf/zsysconf_defs_dragonfly.go | 228 - .../go-sysconf/zsysconf_defs_freebsd.go | 229 - .../go-sysconf/zsysconf_defs_linux.go | 147 - .../go-sysconf/zsysconf_defs_netbsd.go | 164 - .../go-sysconf/zsysconf_defs_openbsd.go | 263 - .../go-sysconf/zsysconf_defs_solaris.go | 139 - .../go-sysconf/zsysconf_values_freebsd_386.go | 12 - .../zsysconf_values_freebsd_amd64.go | 12 - .../go-sysconf/zsysconf_values_freebsd_arm.go | 12 - .../zsysconf_values_freebsd_arm64.go | 12 - .../zsysconf_values_freebsd_riscv64.go | 12 - .../go-sysconf/zsysconf_values_linux_386.go | 114 - .../go-sysconf/zsysconf_values_linux_amd64.go | 114 - .../go-sysconf/zsysconf_values_linux_arm.go | 114 - .../go-sysconf/zsysconf_values_linux_arm64.go | 114 - .../zsysconf_values_linux_loong64.go | 114 - .../go-sysconf/zsysconf_values_linux_mips.go | 114 - .../zsysconf_values_linux_mips64.go | 114 - .../zsysconf_values_linux_mips64le.go | 114 - .../zsysconf_values_linux_mipsle.go | 114 - .../go-sysconf/zsysconf_values_linux_ppc64.go | 114 - .../zsysconf_values_linux_ppc64le.go | 114 - .../zsysconf_values_linux_riscv64.go | 114 - .../go-sysconf/zsysconf_values_linux_s390x.go | 114 - .../go-sysconf/zsysconf_values_netbsd_386.go | 11 - .../zsysconf_values_netbsd_amd64.go | 11 - .../go-sysconf/zsysconf_values_netbsd_arm.go | 11 - .../zsysconf_values_netbsd_arm64.go | 11 - .../github.com/tklauser/numcpus/.cirrus.yml | 13 - vendor/github.com/tklauser/numcpus/README.md | 52 - vendor/github.com/tklauser/numcpus/numcpus.go | 75 - .../tklauser/numcpus/numcpus_bsd.go | 66 - .../tklauser/numcpus/numcpus_linux.go | 120 - .../tklauser/numcpus/numcpus_solaris.go | 56 - .../tklauser/numcpus/numcpus_unsupported.go | 42 - .../tklauser/numcpus/numcpus_windows.go | 41 - .../github.com/vishvananda/netlink/.gitignore | 1 + .../vishvananda/netlink/addr_linux.go | 67 +- .../vishvananda/netlink/bridge_linux.go | 60 +- .../vishvananda/netlink/chain_linux.go | 16 +- .../github.com/vishvananda/netlink/class.go | 2 + .../vishvananda/netlink/class_linux.go | 19 +- .../vishvananda/netlink/conntrack_linux.go | 399 +- .../netlink/conntrack_unspecified.go | 19 + .../vishvananda/netlink/devlink_linux.go | 463 +- .../github.com/vishvananda/netlink/filter.go | 52 +- .../vishvananda/netlink/filter_linux.go | 97 +- vendor/github.com/vishvananda/netlink/fou.go | 15 +- .../vishvananda/netlink/fou_linux.go | 66 +- .../vishvananda/netlink/fou_unspecified.go | 1 + .../vishvananda/netlink/genetlink_linux.go | 13 +- .../vishvananda/netlink/gtp_linux.go | 13 +- .../vishvananda/netlink/handle_unspecified.go | 8 + .../vishvananda/netlink/inet_diag.go | 9 + vendor/github.com/vishvananda/netlink/link.go | 116 +- .../vishvananda/netlink/link_linux.go | 480 +- .../vishvananda/netlink/neigh_linux.go | 60 +- .../vishvananda/netlink/netlink_linux.go | 3 + .../netlink/netlink_unspecified.go | 12 + .../vishvananda/netlink/nl/conntrack_linux.go | 38 + .../vishvananda/netlink/nl/devlink_linux.go | 106 +- .../vishvananda/netlink/nl/ip6tnl_linux.go | 21 + .../vishvananda/netlink/nl/link_linux.go | 108 +- .../vishvananda/netlink/nl/nl_linux.go | 275 +- .../vishvananda/netlink/nl/seg6local_linux.go | 4 + .../vishvananda/netlink/nl/syscall.go | 1 + .../vishvananda/netlink/nl/tc_linux.go | 480 +- .../vishvananda/netlink/nl/vdpa_linux.go | 41 + .../vishvananda/netlink/proc_event_linux.go | 9 - .../vishvananda/netlink/protinfo.go | 24 +- .../vishvananda/netlink/protinfo_linux.go | 17 +- .../github.com/vishvananda/netlink/qdisc.go | 20 + .../vishvananda/netlink/qdisc_linux.go | 79 +- .../vishvananda/netlink/rdma_link_linux.go | 24 +- .../github.com/vishvananda/netlink/route.go | 9 +- .../vishvananda/netlink/route_linux.go | 369 +- vendor/github.com/vishvananda/netlink/rule.go | 13 +- .../vishvananda/netlink/rule_linux.go | 83 +- .../vishvananda/netlink/rule_nonlinux.go | 8 + .../github.com/vishvananda/netlink/socket.go | 77 + .../vishvananda/netlink/socket_linux.go | 538 +- .../vishvananda/netlink/socket_xdp_linux.go | 207 + vendor/github.com/vishvananda/netlink/tcp.go | 8 + .../vishvananda/netlink/tcp_linux.go | 15 + .../vishvananda/netlink/unix_diag.go | 27 + .../vishvananda/netlink/vdpa_linux.go | 491 + .../github.com/vishvananda/netlink/virtio.go | 132 + .../vishvananda/netlink/xdp_diag.go | 34 + .../vishvananda/netlink/xdp_linux.go | 46 + .../netlink/{xfrm.go => xfrm_linux.go} | 2 +- .../vishvananda/netlink/xfrm_policy.go | 97 - .../vishvananda/netlink/xfrm_policy_linux.go | 109 +- .../vishvananda/netlink/xfrm_state.go | 148 - .../vishvananda/netlink/xfrm_state_linux.go | 157 +- .../vishvananda/netlink/xfrm_unspecified.go | 7 + .../vishvananda/netns/.golangci.yml | 24 + .../vishvananda/netns/.yamllint.yml | 9 + .../vishvananda/netns/netns_linux.go | 23 +- .../vishvananda/netns/netns_others.go | 16 +- vendor/github.com/x448/float16/.travis.yml | 13 + vendor/github.com/x448/float16/LICENSE | 22 + vendor/github.com/x448/float16/README.md | 133 + vendor/github.com/x448/float16/float16.go | 302 + vendor/github.com/yusufpapurcu/wmi/LICENSE | 20 - vendor/github.com/yusufpapurcu/wmi/README.md | 6 - .../yusufpapurcu/wmi/swbemservices.go | 261 - vendor/github.com/yusufpapurcu/wmi/wmi.go | 591 - .../bson/bsoncodec/array_codec.go | 7 +- .../bson/bsoncodec/byte_slice_codec.go | 25 +- .../bson/bsoncodec/default_value_decoders.go | 10 +- .../bson/bsoncodec/default_value_encoders.go | 12 +- .../bson/bsoncodec/empty_interface_codec.go | 24 +- .../mongo-driver/bson/bsoncodec/map_codec.go | 38 +- .../bson/bsoncodec/pointer_codec.go | 16 +- .../bson/bsoncodec/slice_codec.go | 27 +- .../bson/bsoncodec/string_codec.go | 16 +- .../bson/bsoncodec/struct_codec.go | 62 +- .../mongo-driver/bson/bsoncodec/time_codec.go | 24 +- .../mongo-driver/bson/bsoncodec/uint_codec.go | 24 +- .../mongo-driver/bson/bsonrw/copier.go | 5 +- .../bson/bsonrw/extjson_parser.go | 2 +- .../bson/bsonrw/extjson_reader.go | 5 +- .../mongo-driver/bson/bsonrw/json_scanner.go | 26 +- .../go.mongodb.org/mongo-driver/bson/doc.go | 89 +- .../mongo-driver/bson/primitive/decimal.go | 3 - .../mongo-driver/bson/primitive/objectid.go | 4 +- .../go.opentelemetry.io/otel/.codespellignore | 2 + vendor/go.opentelemetry.io/otel/.codespellrc | 2 +- vendor/go.opentelemetry.io/otel/.gitignore | 8 - vendor/go.opentelemetry.io/otel/.gitmodules | 3 - vendor/go.opentelemetry.io/otel/.golangci.yml | 21 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 291 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 6 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 33 +- vendor/go.opentelemetry.io/otel/Makefile | 83 +- vendor/go.opentelemetry.io/otel/README.md | 48 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 18 +- .../go.opentelemetry.io/otel/attribute/set.go | 40 +- .../otel/attribute/value.go | 18 +- .../otel/baggage/baggage.go | 367 +- .../go.opentelemetry.io/otel/codes/codes.go | 2 +- vendor/go.opentelemetry.io/otel/doc.go | 2 + .../otel/internal/attribute/attribute.go | 24 +- .../otel/internal/global/instruments.go | 66 +- .../otel/internal/global/meter.go | 389 +- .../otel/internal/global/trace.go | 4 + .../otel/internal/rawhelpers.go | 12 +- .../otel/metric/asyncfloat64.go | 8 +- .../otel/metric/asyncint64.go | 2 +- vendor/go.opentelemetry.io/otel/metric/doc.go | 18 + .../otel/metric/embedded/embedded.go | 20 + .../otel/metric/instrument.go | 24 +- .../go.opentelemetry.io/otel/metric/meter.go | 77 + .../otel/metric/noop/noop.go | 28 + .../otel/metric/syncfloat64.go | 60 +- .../otel/metric/syncint64.go | 54 +- .../otel/propagation/trace_context.go | 2 +- vendor/go.opentelemetry.io/otel/renovate.json | 32 + .../go.opentelemetry.io/otel/requirements.txt | 2 +- vendor/go.opentelemetry.io/otel/sdk/README.md | 3 + .../otel/sdk/instrumentation/README.md | 3 + .../otel/sdk/instrumentation/doc.go | 13 +- .../otel/sdk/instrumentation/library.go | 13 +- .../otel/sdk/instrumentation/scope.go | 13 +- .../otel/sdk/internal/x/README.md | 46 + .../otel/sdk/internal/x/x.go | 66 + .../otel/sdk/resource/README.md | 3 + .../otel/sdk/resource/auto.go | 13 +- .../otel/sdk/resource/builtin.go | 36 +- .../otel/sdk/resource/config.go | 13 +- .../otel/sdk/resource/container.go | 15 +- .../otel/sdk/resource/doc.go | 13 +- .../otel/sdk/resource/env.go | 15 +- .../otel/sdk/resource/host_id.go | 15 +- .../otel/sdk/resource/host_id_bsd.go | 13 +- .../otel/sdk/resource/host_id_darwin.go | 13 +- .../otel/sdk/resource/host_id_exec.go | 13 +- .../otel/sdk/resource/host_id_linux.go | 13 +- .../otel/sdk/resource/host_id_readfile.go | 13 +- .../otel/sdk/resource/host_id_unsupported.go | 23 +- .../otel/sdk/resource/host_id_windows.go | 13 +- .../otel/sdk/resource/os.go | 15 +- .../otel/sdk/resource/os_release_darwin.go | 13 +- .../otel/sdk/resource/os_release_unix.go | 13 +- .../otel/sdk/resource/os_unix.go | 13 +- .../otel/sdk/resource/os_unsupported.go | 25 +- .../otel/sdk/resource/os_windows.go | 13 +- .../otel/sdk/resource/process.go | 15 +- .../otel/sdk/resource/resource.go | 24 +- .../go.opentelemetry.io/otel/sdk/version.go | 15 +- .../otel/semconv/internal/http.go | 2 +- .../otel/semconv/internal/v2/net.go | 6 +- .../otel/semconv/v1.24.0/README.md | 3 - .../otel/semconv/v1.24.0/attribute_group.go | 4387 --- .../otel/semconv/v1.24.0/event.go | 200 - .../otel/semconv/v1.24.0/resource.go | 2545 -- .../otel/semconv/v1.24.0/trace.go | 1323 - .../otel/semconv/v1.26.0/README.md | 3 + .../otel/semconv/v1.26.0/attribute_group.go | 8996 ++++++ .../otel/semconv/{v1.24.0 => v1.26.0}/doc.go | 4 +- .../semconv/{v1.24.0 => v1.26.0}/exception.go | 2 +- .../semconv/{v1.24.0 => v1.26.0}/metric.go | 466 +- .../semconv/{v1.24.0 => v1.26.0}/schema.go | 4 +- .../go.opentelemetry.io/otel/trace/context.go | 2 +- vendor/go.opentelemetry.io/otel/trace/doc.go | 2 +- .../otel/trace/provider.go | 59 + vendor/go.opentelemetry.io/otel/trace/span.go | 177 + .../go.opentelemetry.io/otel/trace/trace.go | 249 - .../go.opentelemetry.io/otel/trace/tracer.go | 37 + .../otel/trace/tracestate.go | 10 + .../otel/verify_examples.sh | 74 - .../otel/verify_released_changelog.sh | 42 + vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 21 +- vendor/go.uber.org/atomic/CHANGELOG.md | 10 + vendor/go.uber.org/atomic/bool.go | 2 +- vendor/go.uber.org/atomic/duration.go | 2 +- vendor/go.uber.org/atomic/error.go | 14 +- vendor/go.uber.org/atomic/float32.go | 2 +- vendor/go.uber.org/atomic/float64.go | 2 +- vendor/go.uber.org/atomic/int32.go | 2 +- vendor/go.uber.org/atomic/int64.go | 2 +- vendor/go.uber.org/atomic/pointer_go118.go | 41 +- .../atomic/pointer_go118_pre119.go | 60 + vendor/go.uber.org/atomic/string.go | 23 +- vendor/go.uber.org/atomic/string_ext.go | 15 +- vendor/go.uber.org/atomic/time.go | 2 +- vendor/go.uber.org/atomic/uint32.go | 2 +- vendor/go.uber.org/atomic/uint64.go | 2 +- vendor/go.uber.org/atomic/uintptr.go | 2 +- vendor/go.uber.org/dig/CHANGELOG.md | 17 +- vendor/go.uber.org/dig/constructor.go | 5 + vendor/go.uber.org/dig/doc.go | 2 +- vendor/go.uber.org/dig/error.go | 22 + vendor/go.uber.org/dig/param.go | 2 +- vendor/go.uber.org/dig/scope.go | 7 +- vendor/go.uber.org/dig/version.go | 2 +- vendor/go.uber.org/zap/.golangci.yml | 77 + vendor/go.uber.org/zap/.readme.tmpl | 10 +- vendor/go.uber.org/zap/CHANGELOG.md | 273 +- .../go.uber.org/zap/{LICENSE.txt => LICENSE} | 0 vendor/go.uber.org/zap/Makefile | 82 +- vendor/go.uber.org/zap/README.md | 66 +- vendor/go.uber.org/zap/array.go | 127 + vendor/go.uber.org/zap/array_go118.go | 156 - vendor/go.uber.org/zap/buffer/buffer.go | 5 + vendor/go.uber.org/zap/error.go | 5 +- vendor/go.uber.org/zap/field.go | 27 +- vendor/go.uber.org/zap/http_handler.go | 19 +- .../stacktrace/stack.go} | 71 +- vendor/go.uber.org/zap/logger.go | 81 +- vendor/go.uber.org/zap/options.go | 15 + vendor/go.uber.org/zap/sink.go | 5 +- vendor/go.uber.org/zap/sugar.go | 39 + vendor/go.uber.org/zap/writer.go | 12 +- .../zap/zapcore/console_encoder.go | 2 +- vendor/go.uber.org/zap/zapcore/core.go | 6 +- vendor/go.uber.org/zap/zapcore/encoder.go | 15 + vendor/go.uber.org/zap/zapcore/entry.go | 4 +- vendor/go.uber.org/zap/zapcore/error.go | 5 +- vendor/go.uber.org/zap/zapcore/field.go | 2 +- .../go.uber.org/zap/zapcore/json_encoder.go | 145 +- vendor/go.uber.org/zap/zapcore/lazy_with.go | 54 + vendor/go4.org/netipx/.gitignore | 3 + vendor/go4.org/netipx/.gitmodules | 3 + vendor/go4.org/netipx/AUTHORS | 4 + vendor/go4.org/netipx/LICENSE | 27 + vendor/go4.org/netipx/README.md | 26 + vendor/go4.org/netipx/ipset.go | 498 + vendor/go4.org/netipx/mask6.go | 141 + vendor/go4.org/netipx/netipx.go | 584 + vendor/go4.org/netipx/uint128.go | 106 + vendor/golang.org/x/exp/LICENSE | 4 +- vendor/golang.org/x/exp/maps/maps.go | 94 - vendor/golang.org/x/exp/slices/cmp.go | 44 + vendor/golang.org/x/exp/slices/slices.go | 389 +- vendor/golang.org/x/exp/slices/sort.go | 117 +- .../slices/{zsortfunc.go => zsortanyfunc.go} | 154 +- .../golang.org/x/exp/slices/zsortordered.go | 34 +- vendor/golang.org/x/exp/slog/attr.go | 102 + vendor/golang.org/x/exp/slog/doc.go | 316 + vendor/golang.org/x/exp/slog/handler.go | 577 + .../x/exp/slog/internal/buffer/buffer.go | 84 + .../x/exp/slog/internal/ignorepc.go | 9 + vendor/golang.org/x/exp/slog/json_handler.go | 336 + vendor/golang.org/x/exp/slog/level.go | 201 + vendor/golang.org/x/exp/slog/logger.go | 343 + vendor/golang.org/x/exp/slog/noplog.bench | 36 + vendor/golang.org/x/exp/slog/record.go | 207 + vendor/golang.org/x/exp/slog/text_handler.go | 161 + vendor/golang.org/x/exp/slog/value.go | 456 + vendor/golang.org/x/exp/slog/value_119.go | 53 + vendor/golang.org/x/exp/slog/value_120.go | 39 + vendor/golang.org/x/oauth2/LICENSE | 4 +- vendor/golang.org/x/oauth2/README.md | 15 +- .../x/oauth2/internal/client_appengine.go | 13 - .../golang.org/x/oauth2/internal/transport.go | 5 - vendor/golang.org/x/oauth2/oauth2.go | 2 +- vendor/golang.org/x/oauth2/token.go | 7 + .../x/sync/singleflight/singleflight.go | 214 + vendor/golang.org/x/text/cases/cases.go | 162 + vendor/golang.org/x/text/cases/context.go | 376 + vendor/golang.org/x/text/cases/fold.go | 34 + vendor/golang.org/x/text/cases/icu.go | 61 + vendor/golang.org/x/text/cases/info.go | 82 + vendor/golang.org/x/text/cases/map.go | 816 + .../golang.org/x/text/cases/tables10.0.0.go | 2255 ++ .../golang.org/x/text/cases/tables11.0.0.go | 2316 ++ .../golang.org/x/text/cases/tables12.0.0.go | 2359 ++ .../golang.org/x/text/cases/tables13.0.0.go | 2399 ++ .../golang.org/x/text/cases/tables15.0.0.go | 2527 ++ vendor/golang.org/x/text/cases/tables9.0.0.go | 2215 ++ vendor/golang.org/x/text/cases/trieval.go | 217 + vendor/golang.org/x/text/internal/internal.go | 49 + vendor/golang.org/x/text/internal/match.go | 67 + vendor/golang.org/x/time/LICENSE | 4 +- vendor/golang.org/x/time/rate/rate.go | 19 +- vendor/golang.org/x/tools/txtar/archive.go | 140 + vendor/golang.org/x/tools/txtar/fs.go | 257 + .../appengine/internal/api.go | 653 - .../appengine/internal/api_classic.go | 170 - .../appengine/internal/api_common.go | 141 - .../appengine/internal/app_id.go | 28 - .../appengine/internal/base/api_base.pb.go | 308 - .../appengine/internal/base/api_base.proto | 33 - .../internal/datastore/datastore_v3.pb.go | 4367 --- .../internal/datastore/datastore_v3.proto | 551 - .../appengine/internal/identity.go | 54 - .../appengine/internal/identity_classic.go | 62 - .../appengine/internal/identity_flex.go | 12 - .../appengine/internal/identity_vm.go | 134 - .../appengine/internal/internal.go | 110 - .../appengine/internal/log/log_service.pb.go | 1313 - .../appengine/internal/log/log_service.proto | 150 - .../appengine/internal/main.go | 17 - .../appengine/internal/main_common.go | 7 - .../appengine/internal/main_vm.go | 70 - .../appengine/internal/metadata.go | 60 - .../appengine/internal/net.go | 56 - .../appengine/internal/regen.sh | 40 - .../internal/remote_api/remote_api.pb.go | 361 - .../internal/remote_api/remote_api.proto | 44 - .../appengine/internal/transaction.go | 115 - .../internal/urlfetch/urlfetch_service.pb.go | 527 - .../internal/urlfetch/urlfetch_service.proto | 64 - .../appengine/urlfetch/urlfetch.go | 209 - .../encoding/protodelim/protodelim.go | 160 + .../evanphx/json-patch.v4}/.gitignore | 0 .../evanphx/json-patch.v4}/LICENSE | 0 .../evanphx/json-patch.v4}/README.md | 0 .../evanphx/json-patch.v4}/errors.go | 0 .../evanphx/json-patch.v4}/merge.go | 0 .../evanphx/json-patch.v4}/patch.go | 42 + vendor/k8s.io/api/admission/v1/doc.go | 2 +- .../k8s.io/api/admission/v1/generated.pb.go | 127 +- .../k8s.io/api/admission/v1/generated.proto | 18 +- vendor/k8s.io/api/admission/v1/types.go | 1 + .../v1/zz_generated.prerelease-lifecycle.go | 28 + .../api/admission/v1beta1/generated.pb.go | 129 +- .../api/admission/v1beta1/generated.proto | 18 +- .../api/admissionregistration/v1/doc.go | 1 + .../admissionregistration/v1/generated.pb.go | 6011 +++- .../admissionregistration/v1/generated.proto | 583 +- .../api/admissionregistration/v1/register.go | 4 + .../api/admissionregistration/v1/types.go | 610 +- .../v1/types_swagger_doc_generated.go | 178 +- .../v1/zz_generated.deepcopy.go | 432 + .../v1/zz_generated.prerelease-lifecycle.go | 70 + .../api/admissionregistration/v1alpha1/doc.go | 1 + .../v1alpha1/generated.pb.go | 4449 ++- .../v1alpha1/generated.proto | 319 +- .../v1alpha1/register.go | 4 + .../admissionregistration/v1alpha1/types.go | 347 +- .../v1alpha1/types_swagger_doc_generated.go | 95 + .../v1alpha1/zz_generated.deepcopy.go | 252 + .../zz_generated.prerelease-lifecycle.go | 166 + .../v1beta1/generated.pb.go | 309 +- .../v1beta1/generated.proto | 53 +- .../admissionregistration/v1beta1/types.go | 19 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- vendor/k8s.io/api/apidiscovery/v2/doc.go | 23 + .../api/apidiscovery/v2/generated.pb.go | 1742 ++ .../api/apidiscovery/v2/generated.proto | 156 + vendor/k8s.io/api/apidiscovery/v2/register.go | 56 + vendor/k8s.io/api/apidiscovery/v2/types.go | 157 + .../apidiscovery/v2/zz_generated.deepcopy.go | 190 + .../v2/zz_generated.prerelease-lifecycle.go | 34 + .../api/apidiscovery/v2beta1/generated.pb.go | 113 +- .../api/apidiscovery/v2beta1/generated.proto | 10 +- .../v1alpha1/generated.pb.go | 119 +- .../v1alpha1/generated.proto | 7 +- .../api/apiserverinternal/v1alpha1/types.go | 1 - vendor/k8s.io/api/apps/v1/doc.go | 1 + vendor/k8s.io/api/apps/v1/generated.pb.go | 341 +- vendor/k8s.io/api/apps/v1/generated.proto | 77 +- vendor/k8s.io/api/apps/v1/types.go | 29 +- .../apps/v1/types_swagger_doc_generated.go | 4 +- .../v1/zz_generated.prerelease-lifecycle.go | 82 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 299 +- .../k8s.io/api/apps/v1beta1/generated.proto | 50 +- vendor/k8s.io/api/apps/v1beta1/types.go | 16 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 365 +- .../k8s.io/api/apps/v1beta2/generated.proto | 78 +- vendor/k8s.io/api/apps/v1beta2/types.go | 20 +- .../v1beta2/types_swagger_doc_generated.go | 4 +- vendor/k8s.io/api/authentication/v1/doc.go | 1 + .../api/authentication/v1/generated.pb.go | 154 +- .../api/authentication/v1/generated.proto | 12 +- vendor/k8s.io/api/authentication/v1/types.go | 7 + .../v1/zz_generated.prerelease-lifecycle.go | 40 + .../authentication/v1alpha1/generated.pb.go | 59 +- .../authentication/v1alpha1/generated.proto | 4 +- .../authentication/v1beta1/generated.pb.go | 117 +- .../authentication/v1beta1/generated.proto | 9 +- .../api/authentication/v1beta1/types.go | 3 + vendor/k8s.io/api/authorization/v1/doc.go | 2 +- .../api/authorization/v1/generated.pb.go | 737 +- .../api/authorization/v1/generated.proto | 85 +- vendor/k8s.io/api/authorization/v1/types.go | 79 + .../v1/types_swagger_doc_generated.go | 38 +- .../authorization/v1/zz_generated.deepcopy.go | 61 +- .../v1/zz_generated.prerelease-lifecycle.go | 46 + .../api/authorization/v1beta1/generated.pb.go | 294 +- .../api/authorization/v1beta1/generated.proto | 26 +- .../k8s.io/api/authorization/v1beta1/types.go | 16 + .../v1beta1/types_swagger_doc_generated.go | 18 +- .../v1beta1/zz_generated.deepcopy.go | 15 +- vendor/k8s.io/api/autoscaling/v1/doc.go | 1 + .../k8s.io/api/autoscaling/v1/generated.pb.go | 255 +- .../k8s.io/api/autoscaling/v1/generated.proto | 55 +- vendor/k8s.io/api/autoscaling/v1/types.go | 8 +- .../v1/types_swagger_doc_generated.go | 6 +- .../v1/zz_generated.prerelease-lifecycle.go | 40 + vendor/k8s.io/api/autoscaling/v2/doc.go | 1 + .../k8s.io/api/autoscaling/v2/generated.pb.go | 243 +- .../k8s.io/api/autoscaling/v2/generated.proto | 23 +- vendor/k8s.io/api/autoscaling/v2/types.go | 7 +- .../v2/types_swagger_doc_generated.go | 6 +- .../v2/zz_generated.prerelease-lifecycle.go | 34 + .../api/autoscaling/v2beta1/generated.pb.go | 243 +- .../api/autoscaling/v2beta1/generated.proto | 56 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 8 +- .../v2beta1/types_swagger_doc_generated.go | 6 +- .../api/autoscaling/v2beta2/generated.pb.go | 255 +- .../api/autoscaling/v2beta2/generated.proto | 27 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 9 +- .../v2beta2/types_swagger_doc_generated.go | 6 +- vendor/k8s.io/api/batch/v1/doc.go | 2 +- vendor/k8s.io/api/batch/v1/generated.pb.go | 747 +- vendor/k8s.io/api/batch/v1/generated.proto | 131 +- vendor/k8s.io/api/batch/v1/types.go | 130 +- .../batch/v1/types_swagger_doc_generated.go | 41 +- .../api/batch/v1/zz_generated.deepcopy.go | 59 + .../v1/zz_generated.prerelease-lifecycle.go | 46 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 115 +- .../k8s.io/api/batch/v1beta1/generated.proto | 14 +- vendor/k8s.io/api/certificates/v1/doc.go | 2 +- .../api/certificates/v1/generated.pb.go | 133 +- .../api/certificates/v1/generated.proto | 8 +- vendor/k8s.io/api/certificates/v1/types.go | 2 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../api/certificates/v1alpha1/generated.pb.go | 74 +- .../api/certificates/v1alpha1/generated.proto | 4 +- .../k8s.io/api/certificates/v1alpha1/types.go | 2 + .../zz_generated.prerelease-lifecycle.go | 8 +- .../api/certificates/v1beta1/generated.pb.go | 135 +- .../api/certificates/v1beta1/generated.proto | 8 +- vendor/k8s.io/api/coordination/v1/doc.go | 1 + .../api/coordination/v1/generated.pb.go | 179 +- .../api/coordination/v1/generated.proto | 26 +- vendor/k8s.io/api/coordination/v1/types.go | 30 +- .../v1/types_swagger_doc_generated.go | 6 +- .../coordination/v1/zz_generated.deepcopy.go | 10 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../k8s.io/api/coordination/v1alpha2/doc.go | 24 + .../api/coordination/v1alpha2/generated.pb.go | 1027 + .../api/coordination/v1alpha2/generated.proto | 100 + .../api/coordination/v1alpha2/register.go | 53 + .../k8s.io/api/coordination/v1alpha2/types.go | 95 + .../v1alpha2/types_swagger_doc_generated.go | 64 + .../v1alpha2/zz_generated.deepcopy.go | 110 + .../zz_generated.prerelease-lifecycle.go | 58 + .../api/coordination/v1beta1/generated.pb.go | 182 +- .../api/coordination/v1beta1/generated.proto | 23 +- .../k8s.io/api/coordination/v1beta1/types.go | 13 + .../v1beta1/types_swagger_doc_generated.go | 4 +- .../v1beta1/zz_generated.deepcopy.go | 11 + .../api/core/v1/annotation_key_constants.go | 25 +- vendor/k8s.io/api/core/v1/doc.go | 2 + vendor/k8s.io/api/core/v1/generated.pb.go | 7417 +++-- vendor/k8s.io/api/core/v1/generated.proto | 961 +- vendor/k8s.io/api/core/v1/objectreference.go | 2 +- vendor/k8s.io/api/core/v1/types.go | 932 +- .../core/v1/types_swagger_doc_generated.go | 369 +- .../api/core/v1/zz_generated.deepcopy.go | 315 +- .../v1/zz_generated.prerelease-lifecycle.go | 274 + vendor/k8s.io/api/discovery/v1/doc.go | 1 + .../k8s.io/api/discovery/v1/generated.pb.go | 137 +- .../k8s.io/api/discovery/v1/generated.proto | 6 +- vendor/k8s.io/api/discovery/v1/types.go | 2 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../api/discovery/v1beta1/generated.pb.go | 135 +- .../api/discovery/v1beta1/generated.proto | 6 +- vendor/k8s.io/api/events/v1/doc.go | 2 +- vendor/k8s.io/api/events/v1/generated.pb.go | 111 +- vendor/k8s.io/api/events/v1/generated.proto | 18 +- vendor/k8s.io/api/events/v1/types.go | 2 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../k8s.io/api/events/v1beta1/generated.pb.go | 111 +- .../k8s.io/api/events/v1beta1/generated.proto | 18 +- .../api/extensions/v1beta1/generated.pb.go | 447 +- .../api/extensions/v1beta1/generated.proto | 81 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 21 +- vendor/k8s.io/api/flowcontrol/v1/doc.go | 1 + .../k8s.io/api/flowcontrol/v1/generated.pb.go | 257 +- .../k8s.io/api/flowcontrol/v1/generated.proto | 12 +- vendor/k8s.io/api/flowcontrol/v1/types.go | 4 + .../v1/zz_generated.prerelease-lifecycle.go | 46 + .../api/flowcontrol/v1beta1/generated.pb.go | 249 +- .../api/flowcontrol/v1beta1/generated.proto | 12 +- .../api/flowcontrol/v1beta2/generated.pb.go | 261 +- .../api/flowcontrol/v1beta2/generated.proto | 12 +- .../api/flowcontrol/v1beta3/generated.pb.go | 259 +- .../api/flowcontrol/v1beta3/generated.proto | 12 +- .../imagepolicy}/v1alpha1/doc.go | 9 +- .../api/imagepolicy/v1alpha1/generated.pb.go | 1374 + .../api/imagepolicy/v1alpha1/generated.proto | 89 + .../api/imagepolicy/v1alpha1/register.go | 51 + .../k8s.io/api/imagepolicy/v1alpha1/types.go | 83 + .../v1alpha1/types_swagger_doc_generated.go | 72 + .../v1alpha1/zz_generated.deepcopy.go | 121 + vendor/k8s.io/api/networking/v1/doc.go | 1 + .../k8s.io/api/networking/v1/generated.pb.go | 275 +- .../k8s.io/api/networking/v1/generated.proto | 32 +- vendor/k8s.io/api/networking/v1/types.go | 18 +- .../v1/zz_generated.prerelease-lifecycle.go | 58 + .../api/networking/v1alpha1/generated.pb.go | 109 +- .../api/networking/v1alpha1/generated.proto | 14 +- .../k8s.io/api/networking/v1alpha1/types.go | 4 + .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../api/networking/v1beta1/generated.pb.go | 3212 ++- .../api/networking/v1beta1/generated.proto | 126 +- .../k8s.io/api/networking/v1beta1/register.go | 4 + vendor/k8s.io/api/networking/v1beta1/types.go | 137 +- .../v1beta1/types_swagger_doc_generated.go | 80 + .../networking/v1beta1/well_known_labels.go | 33 + .../v1beta1/zz_generated.deepcopy.go | 203 + .../zz_generated.prerelease-lifecycle.go | 72 + vendor/k8s.io/api/node/v1/doc.go | 2 +- vendor/k8s.io/api/node/v1/generated.pb.go | 99 +- vendor/k8s.io/api/node/v1/generated.proto | 8 +- vendor/k8s.io/api/node/v1/types.go | 2 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../k8s.io/api/node/v1alpha1/generated.pb.go | 103 +- .../k8s.io/api/node/v1alpha1/generated.proto | 8 +- .../k8s.io/api/node/v1beta1/generated.pb.go | 99 +- .../k8s.io/api/node/v1beta1/generated.proto | 8 +- vendor/k8s.io/api/policy/v1/doc.go | 1 + vendor/k8s.io/api/policy/v1/generated.pb.go | 125 +- vendor/k8s.io/api/policy/v1/generated.proto | 18 +- vendor/k8s.io/api/policy/v1/types.go | 3 + .../v1/zz_generated.prerelease-lifecycle.go | 40 + .../k8s.io/api/policy/v1beta1/generated.pb.go | 125 +- .../k8s.io/api/policy/v1beta1/generated.proto | 18 +- vendor/k8s.io/api/rbac/v1/doc.go | 2 +- vendor/k8s.io/api/rbac/v1/generated.pb.go | 137 +- vendor/k8s.io/api/rbac/v1/generated.proto | 28 +- vendor/k8s.io/api/rbac/v1/types.go | 20 +- .../v1/zz_generated.prerelease-lifecycle.go | 70 + .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 141 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 28 +- vendor/k8s.io/api/rbac/v1alpha1/types.go | 12 +- .../k8s.io/api/rbac/v1beta1/generated.pb.go | 137 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 28 +- vendor/k8s.io/api/rbac/v1beta1/types.go | 12 +- .../api/resource/v1alpha2/generated.pb.go | 4817 ---- .../api/resource/v1alpha2/generated.proto | 400 - vendor/k8s.io/api/resource/v1alpha2/types.go | 462 - .../v1alpha2/types_swagger_doc_generated.go | 232 - .../v1alpha2/zz_generated.deepcopy.go | 498 - vendor/k8s.io/api/resource/v1alpha3/doc.go | 24 + .../api/resource/v1alpha3/generated.pb.go | 8488 ++++++ .../api/resource/v1alpha3/generated.proto | 884 + .../{v1alpha2 => v1alpha3}/register.go | 15 +- vendor/k8s.io/api/resource/v1alpha3/types.go | 1081 + .../v1alpha3/types_swagger_doc_generated.go | 377 + .../v1alpha3/zz_generated.deepcopy.go | 866 + .../zz_generated.prerelease-lifecycle.go | 218 + .../api/resource/{v1alpha2 => v1beta1}/doc.go | 6 +- .../api/resource/v1beta1/generated.pb.go | 8655 ++++++ .../api/resource/v1beta1/generated.proto | 892 + .../k8s.io/api/resource/v1beta1/register.go | 60 + vendor/k8s.io/api/resource/v1beta1/types.go | 1084 + .../v1beta1/types_swagger_doc_generated.go | 386 + .../resource/v1beta1/zz_generated.deepcopy.go | 882 + .../zz_generated.prerelease-lifecycle.go | 166 + vendor/k8s.io/api/scheduling/v1/doc.go | 2 +- .../k8s.io/api/scheduling/v1/generated.pb.go | 73 +- .../k8s.io/api/scheduling/v1/generated.proto | 4 +- vendor/k8s.io/api/scheduling/v1/types.go | 2 + .../v1/zz_generated.prerelease-lifecycle.go | 34 + .../api/scheduling/v1alpha1/generated.pb.go | 71 +- .../api/scheduling/v1alpha1/generated.proto | 4 +- .../api/scheduling/v1beta1/generated.pb.go | 73 +- .../api/scheduling/v1beta1/generated.proto | 4 +- vendor/k8s.io/api/storage/v1/doc.go | 1 + vendor/k8s.io/api/storage/v1/generated.pb.go | 259 +- vendor/k8s.io/api/storage/v1/generated.proto | 46 +- vendor/k8s.io/api/storage/v1/types.go | 26 +- .../storage/v1/types_swagger_doc_generated.go | 6 +- .../v1/zz_generated.prerelease-lifecycle.go | 82 + .../api/storage/v1alpha1/generated.pb.go | 160 +- .../api/storage/v1alpha1/generated.proto | 28 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 6 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../api/storage/v1beta1/generated.pb.go | 866 +- .../api/storage/v1beta1/generated.proto | 83 +- vendor/k8s.io/api/storage/v1beta1/register.go | 3 + vendor/k8s.io/api/storage/v1beta1/types.go | 63 +- .../v1beta1/types_swagger_doc_generated.go | 23 +- .../storage/v1beta1/zz_generated.deepcopy.go | 66 + .../zz_generated.prerelease-lifecycle.go | 36 + .../api/storagemigration/v1alpha1}/doc.go | 13 +- .../storagemigration/v1alpha1/generated.pb.go | 1688 ++ .../storagemigration/v1alpha1/generated.proto | 127 + .../api/storagemigration/v1alpha1/register.go | 58 + .../api/storagemigration/v1alpha1/types.go | 131 + .../v1alpha1/types_swagger_doc_generated.go | 95 + .../v1alpha1/zz_generated.deepcopy.go | 160 + .../zz_generated.prerelease-lifecycle.go | 58 + .../pkg/apis/apiextensions/types.go | 25 + .../apis/apiextensions/types_jsonschema.go | 1 - .../pkg/apis/apiextensions/v1/conversion.go | 26 +- .../pkg/apis/apiextensions/v1/doc.go | 1 + .../pkg/apis/apiextensions/v1/generated.pb.go | 679 +- .../pkg/apis/apiextensions/v1/generated.proto | 51 +- .../pkg/apis/apiextensions/v1/marshal.go | 161 +- .../pkg/apis/apiextensions/v1/types.go | 33 + .../apis/apiextensions/v1/types_jsonschema.go | 53 +- .../v1/zz_generated.conversion.go | 33 + .../apiextensions/v1/zz_generated.deepcopy.go | 21 + .../v1/zz_generated.prerelease-lifecycle.go | 40 + .../apiextensions/v1beta1/generated.pb.go | 742 +- .../apiextensions/v1beta1/generated.proto | 59 +- .../pkg/apis/apiextensions/v1beta1/marshal.go | 159 +- .../pkg/apis/apiextensions/v1beta1/types.go | 38 + .../apiextensions/v1beta1/types_jsonschema.go | 53 +- .../v1beta1/zz_generated.conversion.go | 34 + .../v1beta1/zz_generated.deepcopy.go | 26 + .../apiextensions/zz_generated.deepcopy.go | 26 + .../apimachinery/pkg/api/errors/errors.go | 7 + .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 3 +- .../meta/testrestmapper/test_restmapper.go | 165 + .../apimachinery/pkg/api/resource/quantity.go | 68 +- .../pkg/api/validation/objectmeta.go | 2 +- .../meta/internalversion/scheme/register.go | 8 +- .../internalversion/validation/validation.go | 76 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 3 +- .../pkg/apis/meta/v1/controller_ref.go | 13 +- .../pkg/apis/meta/v1/generated.pb.go | 715 +- .../pkg/apis/meta/v1/generated.proto | 38 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 83 +- .../pkg/apis/meta/v1/micro_time.go | 28 + .../apimachinery/pkg/apis/meta/v1/time.go | 29 + .../apimachinery/pkg/apis/meta/v1/types.go | 72 + .../meta/v1/types_swagger_doc_generated.go | 12 + .../pkg/apis/meta/v1/unstructured/helpers.go | 24 + .../apis/meta/v1/unstructured/unstructured.go | 6 +- .../pkg/apis/meta/v1/validation/validation.go | 91 +- .../apis/meta/v1/zz_generated.conversion.go | 7 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 26 + .../pkg/apis/meta/v1beta1/generated.proto | 4 +- .../apimachinery/pkg/labels/selector.go | 24 +- .../apimachinery/pkg/runtime/extension.go | 100 +- .../k8s.io/apimachinery/pkg/runtime/helper.go | 18 + .../apimachinery/pkg/runtime/interfaces.go | 13 + .../pkg/runtime/serializer/cbor/cbor.go | 389 + .../runtime/serializer/cbor/direct/direct.go | 61 + .../pkg/runtime/serializer/cbor/framer.go | 90 + .../serializer/cbor/internal/modes/buffers.go | 65 + .../serializer/cbor/internal/modes/custom.go | 422 + .../serializer/cbor/internal/modes/decode.go | 158 + .../cbor/internal/modes/diagnostic.go | 36 + .../serializer/cbor/internal/modes/encode.go | 155 + .../pkg/runtime/serializer/cbor/raw.go | 236 + .../pkg/runtime/serializer/codec_factory.go | 149 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 8 +- vendor/k8s.io/apimachinery/pkg/types/patch.go | 4 +- .../pkg/util/duration/duration.go | 93 + .../apimachinery/pkg/util/framer/framer.go | 18 +- .../apimachinery/pkg/util/intstr/intstr.go | 26 + .../pkg/util/managedfields/fieldmanager.go | 7 +- .../managedfields/internal/structuredmerge.go | 19 +- .../apimachinery/pkg/util/runtime/runtime.go | 135 +- .../k8s.io/apimachinery/pkg/util/sets/set.go | 8 +- .../pkg/util/strategicpatch/patch.go | 4 + .../pkg/util/validation/field/errors.go | 26 +- .../pkg/util/validation/validation.go | 19 + .../apimachinery/pkg/util/version/version.go | 112 + vendor/k8s.io/apimachinery/pkg/watch/watch.go | 40 +- .../client-go/applyconfigurations/OWNERS | 5 + .../v1/auditannotation.go | 48 + .../v1/expressionwarning.go | 48 + .../v1/matchcondition.go | 4 +- .../v1/matchresources.go | 90 + .../v1/mutatingwebhook.go | 4 +- .../v1/mutatingwebhookconfiguration.go | 74 +- .../v1/namedrulewithoperations.go | 94 + .../admissionregistration/v1/paramkind.go | 48 + .../admissionregistration/v1/paramref.go | 71 + .../admissionregistration/v1/rule.go | 16 +- .../v1/rulewithoperations.go | 20 +- .../v1/servicereference.go | 4 +- .../admissionregistration/v1/typechecking.go | 44 + .../v1/validatingadmissionpolicy.go | 262 + .../v1/validatingadmissionpolicybinding.go | 253 + .../validatingadmissionpolicybindingspec.go | 72 + .../v1/validatingadmissionpolicyspec.go | 117 + .../v1/validatingadmissionpolicystatus.go | 66 + .../v1/validatingwebhook.go | 4 +- .../v1/validatingwebhookconfiguration.go | 74 +- .../admissionregistration/v1/validation.go | 70 + .../admissionregistration/v1/variable.go | 48 + .../v1/webhookclientconfig.go | 4 +- .../v1alpha1/applyconfiguration.go | 39 + .../v1alpha1/auditannotation.go | 4 +- .../v1alpha1/expressionwarning.go | 4 +- .../v1alpha1/jsonpatch.go | 39 + .../v1alpha1/matchcondition.go | 4 +- .../v1alpha1/matchresources.go | 4 +- .../v1alpha1/mutatingadmissionpolicy.go | 253 + .../mutatingadmissionpolicybinding.go | 253 + .../mutatingadmissionpolicybindingspec.go | 57 + .../v1alpha1/mutatingadmissionpolicyspec.go | 113 + .../v1alpha1/mutation.go | 61 + .../v1alpha1/namedrulewithoperations.go | 14 +- .../v1alpha1/paramkind.go | 4 +- .../v1alpha1/paramref.go | 16 +- .../v1alpha1/typechecking.go | 4 +- .../v1alpha1/validatingadmissionpolicy.go | 48 +- .../validatingadmissionpolicybinding.go | 48 +- .../validatingadmissionpolicybindingspec.go | 4 +- .../v1alpha1/validatingadmissionpolicyspec.go | 4 +- .../validatingadmissionpolicystatus.go | 4 +- .../v1alpha1/validation.go | 4 +- .../v1alpha1/variable.go | 4 +- .../v1beta1/auditannotation.go | 4 +- .../v1beta1/expressionwarning.go | 4 +- .../v1beta1/matchcondition.go | 4 +- .../v1beta1/matchresources.go | 4 +- .../v1beta1/mutatingwebhook.go | 4 +- .../v1beta1/mutatingwebhookconfiguration.go | 48 +- .../v1beta1/namedrulewithoperations.go | 14 +- .../v1beta1/paramkind.go | 4 +- .../admissionregistration/v1beta1/paramref.go | 16 +- .../v1beta1/servicereference.go | 4 +- .../v1beta1/typechecking.go | 4 +- .../v1beta1/validatingadmissionpolicy.go | 48 +- .../validatingadmissionpolicybinding.go | 48 +- .../validatingadmissionpolicybindingspec.go | 4 +- .../v1beta1/validatingadmissionpolicyspec.go | 4 +- .../validatingadmissionpolicystatus.go | 4 +- .../v1beta1/validatingwebhook.go | 4 +- .../v1beta1/validatingwebhookconfiguration.go | 48 +- .../v1beta1/validation.go | 4 +- .../admissionregistration/v1beta1/variable.go | 4 +- .../v1beta1/webhookclientconfig.go | 4 +- .../v1alpha1/serverstorageversion.go | 4 +- .../v1alpha1/storageversion.go | 62 +- .../v1alpha1/storageversioncondition.go | 22 +- .../v1alpha1/storageversionstatus.go | 4 +- .../apps/v1/controllerrevision.go | 68 +- .../applyconfigurations/apps/v1/daemonset.go | 76 +- .../apps/v1/daemonsetcondition.go | 18 +- .../apps/v1/daemonsetspec.go | 10 +- .../apps/v1/daemonsetstatus.go | 4 +- .../apps/v1/daemonsetupdatestrategy.go | 10 +- .../applyconfigurations/apps/v1/deployment.go | 76 +- .../apps/v1/deploymentcondition.go | 20 +- .../apps/v1/deploymentspec.go | 10 +- .../apps/v1/deploymentstatus.go | 4 +- .../apps/v1/deploymentstrategy.go | 10 +- .../applyconfigurations/apps/v1/replicaset.go | 76 +- .../apps/v1/replicasetcondition.go | 18 +- .../apps/v1/replicasetspec.go | 10 +- .../apps/v1/replicasetstatus.go | 4 +- .../apps/v1/rollingupdatedaemonset.go | 4 +- .../apps/v1/rollingupdatedeployment.go | 4 +- .../v1/rollingupdatestatefulsetstrategy.go | 4 +- .../apps/v1/statefulset.go | 76 +- .../apps/v1/statefulsetcondition.go | 18 +- .../apps/v1/statefulsetordinals.go | 4 +- ...setpersistentvolumeclaimretentionpolicy.go | 14 +- .../apps/v1/statefulsetspec.go | 10 +- .../apps/v1/statefulsetstatus.go | 4 +- .../apps/v1/statefulsetupdatestrategy.go | 10 +- .../apps/v1beta1/controllerrevision.go | 56 +- .../apps/v1beta1/deployment.go | 48 +- .../apps/v1beta1/deploymentcondition.go | 20 +- .../apps/v1beta1/deploymentspec.go | 4 +- .../apps/v1beta1/deploymentstatus.go | 4 +- .../apps/v1beta1/deploymentstrategy.go | 10 +- .../apps/v1beta1/rollbackconfig.go | 4 +- .../apps/v1beta1/rollingupdatedeployment.go | 4 +- .../rollingupdatestatefulsetstrategy.go | 4 +- .../apps/v1beta1/statefulset.go | 48 +- .../apps/v1beta1/statefulsetcondition.go | 18 +- .../apps/v1beta1/statefulsetordinals.go | 4 +- ...setpersistentvolumeclaimretentionpolicy.go | 14 +- .../apps/v1beta1/statefulsetspec.go | 10 +- .../apps/v1beta1/statefulsetstatus.go | 4 +- .../apps/v1beta1/statefulsetupdatestrategy.go | 10 +- .../apps/v1beta2/controllerrevision.go | 56 +- .../apps/v1beta2/daemonset.go | 48 +- .../apps/v1beta2/daemonsetcondition.go | 18 +- .../apps/v1beta2/daemonsetspec.go | 4 +- .../apps/v1beta2/daemonsetstatus.go | 4 +- .../apps/v1beta2/daemonsetupdatestrategy.go | 10 +- .../apps/v1beta2/deployment.go | 48 +- .../apps/v1beta2/deploymentcondition.go | 20 +- .../apps/v1beta2/deploymentspec.go | 4 +- .../apps/v1beta2/deploymentstatus.go | 4 +- .../apps/v1beta2/deploymentstrategy.go | 10 +- .../apps/v1beta2/replicaset.go | 48 +- .../apps/v1beta2/replicasetcondition.go | 18 +- .../apps/v1beta2/replicasetspec.go | 4 +- .../apps/v1beta2/replicasetstatus.go | 4 +- .../apps/v1beta2/rollingupdatedaemonset.go | 4 +- .../apps/v1beta2/rollingupdatedeployment.go | 4 +- .../rollingupdatestatefulsetstrategy.go | 4 +- .../applyconfigurations/apps/v1beta2/scale.go | 58 +- .../apps/v1beta2/statefulset.go | 48 +- .../apps/v1beta2/statefulsetcondition.go | 18 +- .../apps/v1beta2/statefulsetordinals.go | 4 +- ...setpersistentvolumeclaimretentionpolicy.go | 14 +- .../apps/v1beta2/statefulsetspec.go | 10 +- .../apps/v1beta2/statefulsetstatus.go | 4 +- .../apps/v1beta2/statefulsetupdatestrategy.go | 10 +- .../v1/crossversionobjectreference.go | 4 +- .../autoscaling/v1/horizontalpodautoscaler.go | 76 +- .../v1/horizontalpodautoscalerspec.go | 4 +- .../v1/horizontalpodautoscalerstatus.go | 18 +- .../autoscaling/v1/scale.go | 68 +- .../autoscaling/v1/scalespec.go | 4 +- .../autoscaling/v1/scalestatus.go | 4 +- .../v2/containerresourcemetricsource.go | 4 +- .../v2/containerresourcemetricstatus.go | 4 +- .../v2/crossversionobjectreference.go | 4 +- .../autoscaling/v2/externalmetricsource.go | 4 +- .../autoscaling/v2/externalmetricstatus.go | 4 +- .../autoscaling/v2/horizontalpodautoscaler.go | 48 +- .../v2/horizontalpodautoscalerbehavior.go | 4 +- .../v2/horizontalpodautoscalercondition.go | 18 +- .../v2/horizontalpodautoscalerspec.go | 4 +- .../v2/horizontalpodautoscalerstatus.go | 4 +- .../autoscaling/v2/hpascalingpolicy.go | 14 +- .../autoscaling/v2/hpascalingrules.go | 10 +- .../autoscaling/v2/metricidentifier.go | 4 +- .../autoscaling/v2/metricspec.go | 10 +- .../autoscaling/v2/metricstatus.go | 10 +- .../autoscaling/v2/metrictarget.go | 16 +- .../autoscaling/v2/metricvaluestatus.go | 4 +- .../autoscaling/v2/objectmetricsource.go | 4 +- .../autoscaling/v2/objectmetricstatus.go | 4 +- .../autoscaling/v2/podsmetricsource.go | 4 +- .../autoscaling/v2/podsmetricstatus.go | 4 +- .../autoscaling/v2/resourcemetricsource.go | 4 +- .../autoscaling/v2/resourcemetricstatus.go | 4 +- .../v2beta1/containerresourcemetricsource.go | 4 +- .../v2beta1/containerresourcemetricstatus.go | 4 +- .../v2beta1/crossversionobjectreference.go | 4 +- .../v2beta1/externalmetricsource.go | 4 +- .../v2beta1/externalmetricstatus.go | 4 +- .../v2beta1/horizontalpodautoscaler.go | 48 +- .../horizontalpodautoscalercondition.go | 18 +- .../v2beta1/horizontalpodautoscalerspec.go | 4 +- .../v2beta1/horizontalpodautoscalerstatus.go | 4 +- .../autoscaling/v2beta1/metricspec.go | 10 +- .../autoscaling/v2beta1/metricstatus.go | 10 +- .../autoscaling/v2beta1/objectmetricsource.go | 4 +- .../autoscaling/v2beta1/objectmetricstatus.go | 4 +- .../autoscaling/v2beta1/podsmetricsource.go | 4 +- .../autoscaling/v2beta1/podsmetricstatus.go | 4 +- .../v2beta1/resourcemetricsource.go | 4 +- .../v2beta1/resourcemetricstatus.go | 4 +- .../v2beta2/containerresourcemetricsource.go | 4 +- .../v2beta2/containerresourcemetricstatus.go | 4 +- .../v2beta2/crossversionobjectreference.go | 4 +- .../v2beta2/externalmetricsource.go | 4 +- .../v2beta2/externalmetricstatus.go | 4 +- .../v2beta2/horizontalpodautoscaler.go | 48 +- .../horizontalpodautoscalerbehavior.go | 4 +- .../horizontalpodautoscalercondition.go | 18 +- .../v2beta2/horizontalpodautoscalerspec.go | 4 +- .../v2beta2/horizontalpodautoscalerstatus.go | 4 +- .../autoscaling/v2beta2/hpascalingpolicy.go | 14 +- .../autoscaling/v2beta2/hpascalingrules.go | 14 +- .../autoscaling/v2beta2/metricidentifier.go | 4 +- .../autoscaling/v2beta2/metricspec.go | 10 +- .../autoscaling/v2beta2/metricstatus.go | 10 +- .../autoscaling/v2beta2/metrictarget.go | 16 +- .../autoscaling/v2beta2/metricvaluestatus.go | 4 +- .../autoscaling/v2beta2/objectmetricsource.go | 4 +- .../autoscaling/v2beta2/objectmetricstatus.go | 4 +- .../autoscaling/v2beta2/podsmetricsource.go | 4 +- .../autoscaling/v2beta2/podsmetricstatus.go | 4 +- .../v2beta2/resourcemetricsource.go | 4 +- .../v2beta2/resourcemetricstatus.go | 4 +- .../applyconfigurations/batch/v1/cronjob.go | 76 +- .../batch/v1/cronjobspec.go | 10 +- .../batch/v1/cronjobstatus.go | 14 +- .../applyconfigurations/batch/v1/job.go | 76 +- .../batch/v1/jobcondition.go | 20 +- .../applyconfigurations/batch/v1/jobspec.go | 22 +- .../applyconfigurations/batch/v1/jobstatus.go | 4 +- .../batch/v1/jobtemplatespec.go | 60 +- .../batch/v1/podfailurepolicy.go | 4 +- .../podfailurepolicyonexitcodesrequirement.go | 14 +- .../podfailurepolicyonpodconditionspattern.go | 14 +- .../batch/v1/podfailurepolicyrule.go | 10 +- .../batch/v1/successpolicy.go | 44 + .../batch/v1/successpolicyrule.go | 48 + .../batch/v1/uncountedterminatedpods.go | 4 +- .../batch/v1beta1/cronjob.go | 48 +- .../batch/v1beta1/cronjobspec.go | 10 +- .../batch/v1beta1/cronjobstatus.go | 4 +- .../batch/v1beta1/jobtemplatespec.go | 44 +- .../v1/certificatesigningrequest.go | 76 +- .../v1/certificatesigningrequestcondition.go | 20 +- .../v1/certificatesigningrequestspec.go | 28 +- .../v1/certificatesigningrequeststatus.go | 4 +- .../v1alpha1/clustertrustbundle.go | 48 +- .../v1alpha1/clustertrustbundlespec.go | 4 +- .../v1beta1/certificatesigningrequest.go | 48 +- .../certificatesigningrequestcondition.go | 20 +- .../v1beta1/certificatesigningrequestspec.go | 28 +- .../certificatesigningrequeststatus.go | 4 +- .../coordination/v1/lease.go | 74 +- .../coordination/v1/leasespec.go | 39 +- .../coordination/v1alpha2/leasecandidate.go | 255 + .../v1alpha2/leasecandidatespec.go | 89 + .../coordination/v1beta1/lease.go | 48 +- .../coordination/v1beta1/leasespec.go | 33 +- .../applyconfigurations/core/v1/affinity.go | 4 +- .../core/v1/apparmorprofile.go | 52 + .../core/v1/attachedvolume.go | 12 +- .../v1/awselasticblockstorevolumesource.go | 4 +- .../core/v1/azurediskvolumesource.go | 22 +- .../v1/azurefilepersistentvolumesource.go | 4 +- .../core/v1/azurefilevolumesource.go | 4 +- .../core/v1/capabilities.go | 14 +- .../core/v1/cephfspersistentvolumesource.go | 4 +- .../core/v1/cephfsvolumesource.go | 4 +- .../core/v1/cinderpersistentvolumesource.go | 4 +- .../core/v1/cindervolumesource.go | 4 +- .../core/v1/claimsource.go | 48 - .../core/v1/clientipconfig.go | 4 +- .../core/v1/clustertrustbundleprojection.go | 18 +- .../core/v1/componentcondition.go | 18 +- .../core/v1/componentstatus.go | 74 +- .../applyconfigurations/core/v1/configmap.go | 70 +- .../core/v1/configmapenvsource.go | 6 +- .../core/v1/configmapkeyselector.go | 6 +- .../core/v1/configmapnodeconfigsource.go | 4 +- .../core/v1/configmapprojection.go | 6 +- .../core/v1/configmapvolumesource.go | 6 +- .../applyconfigurations/core/v1/container.go | 4 +- .../core/v1/containerimage.go | 4 +- .../core/v1/containerport.go | 18 +- .../core/v1/containerresizepolicy.go | 14 +- .../core/v1/containerstate.go | 4 +- .../core/v1/containerstaterunning.go | 10 +- .../core/v1/containerstateterminated.go | 24 +- .../core/v1/containerstatewaiting.go | 4 +- .../core/v1/containerstatus.go | 67 +- .../core/v1/containeruser.go | 39 + .../core/v1/csipersistentvolumesource.go | 4 +- .../core/v1/csivolumesource.go | 4 +- .../core/v1/daemonendpoint.go | 4 +- .../core/v1/downwardapiprojection.go | 4 +- .../core/v1/downwardapivolumefile.go | 4 +- .../core/v1/downwardapivolumesource.go | 4 +- .../core/v1/emptydirvolumesource.go | 12 +- .../core/v1/endpointaddress.go | 4 +- .../core/v1/endpointport.go | 16 +- .../applyconfigurations/core/v1/endpoints.go | 74 +- .../core/v1/endpointsubset.go | 4 +- .../core/v1/envfromsource.go | 4 +- .../applyconfigurations/core/v1/envvar.go | 4 +- .../core/v1/envvarsource.go | 4 +- .../core/v1/ephemeralcontainer.go | 52 +- .../core/v1/ephemeralcontainercommon.go | 4 +- .../core/v1/ephemeralvolumesource.go | 4 +- .../applyconfigurations/core/v1/event.go | 110 +- .../core/v1/eventseries.go | 12 +- .../core/v1/eventsource.go | 4 +- .../applyconfigurations/core/v1/execaction.go | 4 +- .../core/v1/fcvolumesource.go | 4 +- .../core/v1/flexpersistentvolumesource.go | 4 +- .../core/v1/flexvolumesource.go | 4 +- .../core/v1/flockervolumesource.go | 4 +- .../core/v1/gcepersistentdiskvolumesource.go | 4 +- .../core/v1/gitrepovolumesource.go | 4 +- .../v1/glusterfspersistentvolumesource.go | 4 +- .../core/v1/glusterfsvolumesource.go | 4 +- .../applyconfigurations/core/v1/grpcaction.go | 4 +- .../applyconfigurations/core/v1/hostalias.go | 4 +- .../applyconfigurations/core/v1/hostip.go | 4 +- .../core/v1/hostpathvolumesource.go | 12 +- .../core/v1/httpgetaction.go | 10 +- .../applyconfigurations/core/v1/httpheader.go | 4 +- .../core/v1/imagevolumesource.go | 52 + .../core/v1/iscsipersistentvolumesource.go | 4 +- .../core/v1/iscsivolumesource.go | 4 +- .../applyconfigurations/core/v1/keytopath.go | 4 +- .../applyconfigurations/core/v1/lifecycle.go | 4 +- .../core/v1/lifecyclehandler.go | 4 +- .../applyconfigurations/core/v1/limitrange.go | 74 +- .../core/v1/limitrangeitem.go | 30 +- .../core/v1/limitrangespec.go | 4 +- .../core/v1/linuxcontaineruser.go | 59 + .../core/v1/loadbalanceringress.go | 10 +- .../core/v1/loadbalancerstatus.go | 4 +- .../core/v1/localobjectreference.go | 4 +- .../core/v1/localvolumesource.go | 4 +- .../core/v1/modifyvolumestatus.go | 12 +- .../applyconfigurations/core/v1/namespace.go | 76 +- .../core/v1/namespacecondition.go | 20 +- .../core/v1/namespacespec.go | 10 +- .../core/v1/namespacestatus.go | 10 +- .../core/v1/nfsvolumesource.go | 4 +- .../applyconfigurations/core/v1/node.go | 76 +- .../core/v1/nodeaddress.go | 12 +- .../core/v1/nodeaffinity.go | 4 +- .../core/v1/nodecondition.go | 22 +- .../core/v1/nodeconfigsource.go | 4 +- .../core/v1/nodeconfigstatus.go | 4 +- .../core/v1/nodedaemonendpoints.go | 4 +- .../core/v1/nodefeatures.go | 39 + .../core/v1/noderuntimehandler.go | 48 + .../core/v1/noderuntimehandlerfeatures.go | 48 + .../core/v1/nodeselector.go | 4 +- .../core/v1/nodeselectorrequirement.go | 14 +- .../core/v1/nodeselectorterm.go | 4 +- .../applyconfigurations/core/v1/nodespec.go | 4 +- .../applyconfigurations/core/v1/nodestatus.go | 45 +- .../core/v1/nodesysteminfo.go | 4 +- .../core/v1/objectfieldselector.go | 4 +- .../core/v1/objectreference.go | 4 +- .../core/v1/persistentvolume.go | 76 +- .../core/v1/persistentvolumeclaim.go | 76 +- .../core/v1/persistentvolumeclaimcondition.go | 22 +- .../core/v1/persistentvolumeclaimspec.go | 14 +- .../core/v1/persistentvolumeclaimstatus.go | 28 +- .../core/v1/persistentvolumeclaimtemplate.go | 60 +- .../v1/persistentvolumeclaimvolumesource.go | 4 +- .../core/v1/persistentvolumesource.go | 4 +- .../core/v1/persistentvolumespec.go | 66 +- .../core/v1/persistentvolumestatus.go | 16 +- .../v1/photonpersistentdiskvolumesource.go | 4 +- .../applyconfigurations/core/v1/pod.go | 76 +- .../core/v1/podaffinity.go | 4 +- .../core/v1/podaffinityterm.go | 22 +- .../core/v1/podantiaffinity.go | 4 +- .../core/v1/podcondition.go | 22 +- .../core/v1/poddnsconfig.go | 4 +- .../core/v1/poddnsconfigoption.go | 4 +- .../applyconfigurations/core/v1/podip.go | 4 +- .../applyconfigurations/core/v1/podos.go | 10 +- .../core/v1/podreadinessgate.go | 10 +- .../core/v1/podresourceclaim.go | 25 +- .../core/v1/podresourceclaimstatus.go | 4 +- .../core/v1/podschedulinggate.go | 4 +- .../core/v1/podsecuritycontext.go | 51 +- .../applyconfigurations/core/v1/podspec.go | 13 +- .../applyconfigurations/core/v1/podstatus.go | 18 +- .../core/v1/podtemplate.go | 74 +- .../core/v1/podtemplatespec.go | 60 +- .../applyconfigurations/core/v1/portstatus.go | 14 +- .../core/v1/portworxvolumesource.go | 4 +- .../core/v1/preferredschedulingterm.go | 4 +- .../applyconfigurations/core/v1/probe.go | 12 +- .../core/v1/probehandler.go | 4 +- .../core/v1/projectedvolumesource.go | 4 +- .../core/v1/quobytevolumesource.go | 4 +- .../core/v1/rbdpersistentvolumesource.go | 4 +- .../core/v1/rbdvolumesource.go | 4 +- .../core/v1/replicationcontroller.go | 76 +- .../core/v1/replicationcontrollercondition.go | 20 +- .../core/v1/replicationcontrollerspec.go | 4 +- .../core/v1/replicationcontrollerstatus.go | 4 +- .../core/v1/resourceclaim.go | 15 +- .../core/v1/resourcefieldselector.go | 4 +- .../core/v1/resourcehealth.go | 52 + .../core/v1/resourcequota.go | 76 +- .../core/v1/resourcequotaspec.go | 14 +- .../core/v1/resourcequotastatus.go | 14 +- .../core/v1/resourcerequirements.go | 14 +- .../v1/resourcestatus.go} | 33 +- .../core/v1/scaleiopersistentvolumesource.go | 4 +- .../core/v1/scaleiovolumesource.go | 4 +- .../v1/scopedresourceselectorrequirement.go | 16 +- .../core/v1/scopeselector.go | 4 +- .../core/v1/seccompprofile.go | 12 +- .../applyconfigurations/core/v1/secret.go | 72 +- .../core/v1/secretenvsource.go | 6 +- .../core/v1/secretkeyselector.go | 6 +- .../core/v1/secretprojection.go | 6 +- .../core/v1/secretreference.go | 4 +- .../core/v1/secretvolumesource.go | 4 +- .../core/v1/securitycontext.go | 13 +- .../core/v1/selinuxoptions.go | 4 +- .../applyconfigurations/core/v1/service.go | 76 +- .../core/v1/serviceaccount.go | 78 +- .../core/v1/serviceaccounttokenprojection.go | 4 +- .../core/v1/serviceport.go | 10 +- .../core/v1/servicespec.go | 13 +- .../core/v1/servicestatus.go | 4 +- .../core/v1/sessionaffinityconfig.go | 4 +- .../core/v1/sleepaction.go | 4 +- .../v1/storageospersistentvolumesource.go | 4 +- .../core/v1/storageosvolumesource.go | 4 +- .../applyconfigurations/core/v1/sysctl.go | 4 +- .../applyconfigurations/core/v1/taint.go | 16 +- .../core/v1/tcpsocketaction.go | 4 +- .../applyconfigurations/core/v1/toleration.go | 20 +- .../v1/topologyselectorlabelrequirement.go | 4 +- .../core/v1/topologyselectorterm.go | 4 +- .../core/v1/topologyspreadconstraint.go | 18 +- .../core/v1/typedlocalobjectreference.go | 4 +- .../core/v1/typedobjectreference.go | 4 +- .../applyconfigurations/core/v1/volume.go | 70 +- .../core/v1/volumedevice.go | 4 +- .../core/v1/volumemount.go | 29 +- .../core/v1/volumemountstatus.go | 70 + .../core/v1/volumenodeaffinity.go | 4 +- .../core/v1/volumeprojection.go | 4 +- .../core/v1/volumeresourcerequirements.go | 14 +- .../core/v1/volumesource.go | 13 +- .../core/v1/vspherevirtualdiskvolumesource.go | 4 +- .../core/v1/weightedpodaffinityterm.go | 4 +- .../core/v1/windowssecuritycontextoptions.go | 4 +- .../discovery/v1/endpoint.go | 4 +- .../discovery/v1/endpointconditions.go | 4 +- .../discovery/v1/endpointhints.go | 4 +- .../discovery/v1/endpointport.go | 16 +- .../discovery/v1/endpointslice.go | 70 +- .../discovery/v1/forzone.go | 4 +- .../discovery/v1beta1/endpoint.go | 4 +- .../discovery/v1beta1/endpointconditions.go | 4 +- .../discovery/v1beta1/endpointhints.go | 4 +- .../discovery/v1beta1/endpointport.go | 4 +- .../discovery/v1beta1/endpointslice.go | 60 +- .../discovery/v1beta1/forzone.go | 4 +- .../client-go/applyconfigurations/doc.go | 151 + .../applyconfigurations/events/v1/event.go | 110 +- .../events/v1/eventseries.go | 12 +- .../events/v1beta1/event.go | 48 +- .../events/v1beta1/eventseries.go | 4 +- .../extensions/v1beta1/daemonset.go | 48 +- .../extensions/v1beta1/daemonsetcondition.go | 18 +- .../extensions/v1beta1/daemonsetspec.go | 4 +- .../extensions/v1beta1/daemonsetstatus.go | 4 +- .../v1beta1/daemonsetupdatestrategy.go | 12 +- .../extensions/v1beta1/deployment.go | 48 +- .../extensions/v1beta1/deploymentcondition.go | 20 +- .../extensions/v1beta1/deploymentspec.go | 4 +- .../extensions/v1beta1/deploymentstatus.go | 4 +- .../extensions/v1beta1/deploymentstrategy.go | 10 +- .../extensions/v1beta1/httpingresspath.go | 10 +- .../v1beta1/httpingressrulevalue.go | 4 +- .../extensions/v1beta1/ingress.go | 48 +- .../extensions/v1beta1/ingressbackend.go | 4 +- .../v1beta1/ingressloadbalanceringress.go | 4 +- .../v1beta1/ingressloadbalancerstatus.go | 4 +- .../extensions/v1beta1/ingressportstatus.go | 4 +- .../extensions/v1beta1/ingressrule.go | 8 +- .../extensions/v1beta1/ingressrulevalue.go | 4 +- .../extensions/v1beta1/ingressspec.go | 4 +- .../extensions/v1beta1/ingressstatus.go | 4 +- .../extensions/v1beta1/ingresstls.go | 4 +- .../extensions/v1beta1/ipblock.go | 4 +- .../extensions/v1beta1/networkpolicy.go | 48 +- .../v1beta1/networkpolicyegressrule.go | 4 +- .../v1beta1/networkpolicyingressrule.go | 4 +- .../extensions/v1beta1/networkpolicypeer.go | 4 +- .../extensions/v1beta1/networkpolicyport.go | 4 +- .../extensions/v1beta1/networkpolicyspec.go | 4 +- .../extensions/v1beta1/replicaset.go | 48 +- .../extensions/v1beta1/replicasetcondition.go | 18 +- .../extensions/v1beta1/replicasetspec.go | 4 +- .../extensions/v1beta1/replicasetstatus.go | 4 +- .../extensions/v1beta1/rollbackconfig.go | 4 +- .../v1beta1/rollingupdatedaemonset.go | 4 +- .../v1beta1/rollingupdatedeployment.go | 4 +- .../extensions/v1beta1/scale.go | 58 +- .../v1/exemptprioritylevelconfiguration.go | 4 +- .../flowcontrol/v1/flowdistinguishermethod.go | 10 +- .../flowcontrol/v1/flowschema.go | 76 +- .../flowcontrol/v1/flowschemacondition.go | 20 +- .../flowcontrol/v1/flowschemaspec.go | 4 +- .../flowcontrol/v1/flowschemastatus.go | 4 +- .../flowcontrol/v1/groupsubject.go | 4 +- .../v1/limitedprioritylevelconfiguration.go | 4 +- .../flowcontrol/v1/limitresponse.go | 10 +- .../flowcontrol/v1/nonresourcepolicyrule.go | 4 +- .../flowcontrol/v1/policyruleswithsubjects.go | 4 +- .../v1/prioritylevelconfiguration.go | 76 +- .../v1/prioritylevelconfigurationcondition.go | 20 +- .../v1/prioritylevelconfigurationreference.go | 4 +- .../v1/prioritylevelconfigurationspec.go | 10 +- .../v1/prioritylevelconfigurationstatus.go | 4 +- .../flowcontrol/v1/queuingconfiguration.go | 4 +- .../flowcontrol/v1/resourcepolicyrule.go | 4 +- .../flowcontrol/v1/serviceaccountsubject.go | 4 +- .../flowcontrol/v1/subject.go | 10 +- .../flowcontrol/v1/usersubject.go | 4 +- .../exemptprioritylevelconfiguration.go | 4 +- .../v1beta1/flowdistinguishermethod.go | 10 +- .../flowcontrol/v1beta1/flowschema.go | 48 +- .../v1beta1/flowschemacondition.go | 20 +- .../flowcontrol/v1beta1/flowschemaspec.go | 4 +- .../flowcontrol/v1beta1/flowschemastatus.go | 4 +- .../flowcontrol/v1beta1/groupsubject.go | 4 +- .../limitedprioritylevelconfiguration.go | 4 +- .../flowcontrol/v1beta1/limitresponse.go | 10 +- .../v1beta1/nonresourcepolicyrule.go | 4 +- .../v1beta1/policyruleswithsubjects.go | 4 +- .../v1beta1/prioritylevelconfiguration.go | 48 +- .../prioritylevelconfigurationcondition.go | 20 +- .../prioritylevelconfigurationreference.go | 4 +- .../v1beta1/prioritylevelconfigurationspec.go | 10 +- .../prioritylevelconfigurationstatus.go | 4 +- .../v1beta1/queuingconfiguration.go | 4 +- .../flowcontrol/v1beta1/resourcepolicyrule.go | 4 +- .../v1beta1/serviceaccountsubject.go | 4 +- .../flowcontrol/v1beta1/subject.go | 10 +- .../flowcontrol/v1beta1/usersubject.go | 4 +- .../exemptprioritylevelconfiguration.go | 4 +- .../v1beta2/flowdistinguishermethod.go | 10 +- .../flowcontrol/v1beta2/flowschema.go | 48 +- .../v1beta2/flowschemacondition.go | 20 +- .../flowcontrol/v1beta2/flowschemaspec.go | 4 +- .../flowcontrol/v1beta2/flowschemastatus.go | 4 +- .../flowcontrol/v1beta2/groupsubject.go | 4 +- .../limitedprioritylevelconfiguration.go | 4 +- .../flowcontrol/v1beta2/limitresponse.go | 10 +- .../v1beta2/nonresourcepolicyrule.go | 4 +- .../v1beta2/policyruleswithsubjects.go | 4 +- .../v1beta2/prioritylevelconfiguration.go | 48 +- .../prioritylevelconfigurationcondition.go | 20 +- .../prioritylevelconfigurationreference.go | 4 +- .../v1beta2/prioritylevelconfigurationspec.go | 10 +- .../prioritylevelconfigurationstatus.go | 4 +- .../v1beta2/queuingconfiguration.go | 4 +- .../flowcontrol/v1beta2/resourcepolicyrule.go | 4 +- .../v1beta2/serviceaccountsubject.go | 4 +- .../flowcontrol/v1beta2/subject.go | 10 +- .../flowcontrol/v1beta2/usersubject.go | 4 +- .../exemptprioritylevelconfiguration.go | 4 +- .../v1beta3/flowdistinguishermethod.go | 10 +- .../flowcontrol/v1beta3/flowschema.go | 48 +- .../v1beta3/flowschemacondition.go | 20 +- .../flowcontrol/v1beta3/flowschemaspec.go | 4 +- .../flowcontrol/v1beta3/flowschemastatus.go | 4 +- .../flowcontrol/v1beta3/groupsubject.go | 4 +- .../limitedprioritylevelconfiguration.go | 4 +- .../flowcontrol/v1beta3/limitresponse.go | 10 +- .../v1beta3/nonresourcepolicyrule.go | 4 +- .../v1beta3/policyruleswithsubjects.go | 4 +- .../v1beta3/prioritylevelconfiguration.go | 48 +- .../prioritylevelconfigurationcondition.go | 20 +- .../prioritylevelconfigurationreference.go | 4 +- .../v1beta3/prioritylevelconfigurationspec.go | 10 +- .../prioritylevelconfigurationstatus.go | 4 +- .../v1beta3/queuingconfiguration.go | 4 +- .../flowcontrol/v1beta3/resourcepolicyrule.go | 4 +- .../v1beta3/serviceaccountsubject.go | 4 +- .../flowcontrol/v1beta3/subject.go | 10 +- .../flowcontrol/v1beta3/usersubject.go | 4 +- .../imagepolicy/v1alpha1/imagereview.go | 262 + .../v1alpha1/imagereviewcontainerspec.go | 39 + .../imagepolicy/v1alpha1/imagereviewspec.go | 68 + .../imagepolicy/v1alpha1/imagereviewstatus.go | 63 + .../applyconfigurations/internal/internal.go | 5245 ++-- .../applyconfigurations/meta/v1/condition.go | 22 +- .../meta/v1/deleteoptions.go | 29 +- .../meta/v1/labelselector.go | 4 +- .../meta/v1/labelselectorrequirement.go | 14 +- .../meta/v1/managedfieldsentry.go | 26 +- .../applyconfigurations/meta/v1/objectmeta.go | 19 +- .../meta/v1/ownerreference.go | 4 +- .../meta/v1/preconditions.go | 4 +- .../applyconfigurations/meta/v1/typemeta.go | 4 +- .../networking/v1/httpingresspath.go | 10 +- .../networking/v1/httpingressrulevalue.go | 4 +- .../networking/v1/ingress.go | 76 +- .../networking/v1/ingressbackend.go | 4 +- .../networking/v1/ingressclass.go | 74 +- .../v1/ingressclassparametersreference.go | 4 +- .../networking/v1/ingressclassspec.go | 4 +- .../v1/ingressloadbalanceringress.go | 4 +- .../v1/ingressloadbalancerstatus.go | 4 +- .../networking/v1/ingressportstatus.go | 14 +- .../networking/v1/ingressrule.go | 8 +- .../networking/v1/ingressrulevalue.go | 4 +- .../networking/v1/ingressservicebackend.go | 4 +- .../networking/v1/ingressspec.go | 4 +- .../networking/v1/ingressstatus.go | 4 +- .../networking/v1/ingresstls.go | 4 +- .../networking/v1/ipblock.go | 4 +- .../networking/v1/networkpolicy.go | 74 +- .../networking/v1/networkpolicyegressrule.go | 4 +- .../networking/v1/networkpolicyingressrule.go | 4 +- .../networking/v1/networkpolicypeer.go | 16 +- .../networking/v1/networkpolicyport.go | 10 +- .../networking/v1/networkpolicyspec.go | 16 +- .../networking/v1/servicebackendport.go | 4 +- .../networking/v1alpha1/ipaddress.go | 48 +- .../networking/v1alpha1/ipaddressspec.go | 4 +- .../networking/v1alpha1/parentreference.go | 4 +- .../networking/v1alpha1/servicecidr.go | 48 +- .../networking/v1alpha1/servicecidrspec.go | 4 +- .../networking/v1alpha1/servicecidrstatus.go | 4 +- .../networking/v1beta1/httpingresspath.go | 10 +- .../v1beta1/httpingressrulevalue.go | 4 +- .../networking/v1beta1/ingress.go | 48 +- .../networking/v1beta1/ingressbackend.go | 4 +- .../networking/v1beta1/ingressclass.go | 48 +- .../ingressclassparametersreference.go | 4 +- .../networking/v1beta1/ingressclassspec.go | 4 +- .../v1beta1/ingressloadbalanceringress.go | 4 +- .../v1beta1/ingressloadbalancerstatus.go | 4 +- .../networking/v1beta1/ingressportstatus.go | 4 +- .../networking/v1beta1/ingressrule.go | 8 +- .../networking/v1beta1/ingressrulevalue.go | 4 +- .../networking/v1beta1/ingressspec.go | 4 +- .../networking/v1beta1/ingressstatus.go | 4 +- .../networking/v1beta1/ingresstls.go | 4 +- .../networking/v1beta1/ipaddress.go | 253 + .../networking/v1beta1/ipaddressspec.go | 39 + .../v1beta1/parentreference.go} | 50 +- .../v1beta1/servicecidr.go} | 136 +- .../networking/v1beta1/servicecidrspec.go | 41 + .../networking/v1beta1/servicecidrstatus.go | 48 + .../applyconfigurations/node/v1/overhead.go | 10 +- .../node/v1/runtimeclass.go | 78 +- .../applyconfigurations/node/v1/scheduling.go | 12 +- .../node/v1alpha1/overhead.go | 4 +- .../node/v1alpha1/runtimeclass.go | 48 +- .../node/v1alpha1/runtimeclassspec.go | 4 +- .../node/v1alpha1/scheduling.go | 4 +- .../node/v1beta1/overhead.go | 4 +- .../node/v1beta1/runtimeclass.go | 48 +- .../node/v1beta1/scheduling.go | 4 +- .../applyconfigurations/policy/v1/eviction.go | 68 +- .../policy/v1/poddisruptionbudget.go | 76 +- .../policy/v1/poddisruptionbudgetspec.go | 10 +- .../policy/v1/poddisruptionbudgetstatus.go | 28 +- .../policy/v1beta1/eviction.go | 56 +- .../policy/v1beta1/poddisruptionbudget.go | 48 +- .../policy/v1beta1/poddisruptionbudgetspec.go | 16 +- .../v1beta1/poddisruptionbudgetstatus.go | 4 +- .../rbac/v1/aggregationrule.go | 10 +- .../rbac/v1/clusterrole.go | 76 +- .../rbac/v1/clusterrolebinding.go | 76 +- .../applyconfigurations/rbac/v1/policyrule.go | 4 +- .../applyconfigurations/rbac/v1/role.go | 74 +- .../rbac/v1/rolebinding.go | 76 +- .../applyconfigurations/rbac/v1/roleref.go | 4 +- .../applyconfigurations/rbac/v1/subject.go | 4 +- .../rbac/v1alpha1/aggregationrule.go | 4 +- .../rbac/v1alpha1/clusterrole.go | 48 +- .../rbac/v1alpha1/clusterrolebinding.go | 48 +- .../rbac/v1alpha1/policyrule.go | 4 +- .../applyconfigurations/rbac/v1alpha1/role.go | 48 +- .../rbac/v1alpha1/rolebinding.go | 48 +- .../rbac/v1alpha1/roleref.go | 4 +- .../rbac/v1alpha1/subject.go | 4 +- .../rbac/v1beta1/aggregationrule.go | 4 +- .../rbac/v1beta1/clusterrole.go | 48 +- .../rbac/v1beta1/clusterrolebinding.go | 48 +- .../rbac/v1beta1/policyrule.go | 4 +- .../applyconfigurations/rbac/v1beta1/role.go | 48 +- .../rbac/v1beta1/rolebinding.go | 48 +- .../rbac/v1beta1/roleref.go | 4 +- .../rbac/v1beta1/subject.go | 4 +- .../resource/v1alpha2/allocationresult.go | 66 - .../v1alpha2/podschedulingcontextspec.go | 50 - .../v1alpha2/podschedulingcontextstatus.go | 44 - .../resourceclaimparametersreference.go | 57 - .../resource/v1alpha2/resourceclaimspec.go | 61 - .../resource/v1alpha2/resourcehandle.go | 48 - .../v1alpha3/allocateddevicestatus.go | 94 + .../resource/v1alpha3/allocationresult.go | 52 + .../resource/v1alpha3/basicdevice.go | 65 + .../resource/v1alpha3/celdeviceselector.go | 39 + .../resource/v1alpha3/device.go | 48 + .../v1alpha3/deviceallocationconfiguration.go | 63 + .../v1alpha3/deviceallocationresult.go | 58 + .../resource/v1alpha3/deviceattribute.go | 66 + .../resource/v1alpha3/deviceclaim.go | 72 + .../v1alpha3/deviceclaimconfiguration.go | 50 + .../resource/v1alpha3/deviceclass.go | 253 + .../v1alpha3/deviceclassconfiguration.go | 39 + .../resource/v1alpha3/deviceclassspec.go | 58 + .../resource/v1alpha3/deviceconfiguration.go | 39 + .../resource/v1alpha3/deviceconstraint.go | 54 + .../resource/v1alpha3/devicerequest.go | 93 + .../v1alpha3/devicerequestallocationresult.go | 75 + .../resource/v1alpha3/deviceselector.go | 39 + .../resource/v1alpha3/networkdevicedata.go | 59 + .../v1alpha3/opaquedeviceconfiguration.go | 52 + .../{v1alpha2 => v1alpha3}/resourceclaim.go | 64 +- .../resourceclaimconsumerreference.go | 6 +- .../resource/v1alpha3/resourceclaimspec.go | 39 + .../resourceclaimstatus.go | 36 +- .../resourceclaimtemplate.go | 64 +- .../v1alpha3/resourceclaimtemplatespec.go | 194 + .../resource/v1alpha3/resourcepool.go | 57 + .../resourceslice.go} | 151 +- .../resource/v1alpha3/resourceslicespec.go | 93 + .../resource/v1beta1/allocateddevicestatus.go | 94 + .../resource/v1beta1/allocationresult.go | 52 + .../resource/v1beta1/basicdevice.go | 64 + .../resource/v1beta1/celdeviceselector.go | 39 + .../resource/v1beta1/device.go | 48 + .../v1beta1/deviceallocationconfiguration.go | 63 + .../v1beta1/deviceallocationresult.go | 58 + .../resource/v1beta1/deviceattribute.go | 66 + .../resource/v1beta1/devicecapacity.go | 43 + .../resource/v1beta1/deviceclaim.go | 72 + .../v1beta1/deviceclaimconfiguration.go | 50 + .../resource/v1beta1/deviceclass.go | 253 + .../v1beta1/deviceclassconfiguration.go | 39 + .../resource/v1beta1/deviceclassspec.go | 58 + .../resource/v1beta1/deviceconfiguration.go | 39 + .../resource/v1beta1/deviceconstraint.go | 54 + .../resource/v1beta1/devicerequest.go | 93 + .../v1beta1/devicerequestallocationresult.go | 75 + .../resource/v1beta1/deviceselector.go | 39 + .../resource/v1beta1/networkdevicedata.go | 59 + .../v1beta1/opaquedeviceconfiguration.go | 52 + .../resource/v1beta1/resourceclaim.go | 264 + .../v1beta1/resourceclaimconsumerreference.go | 70 + .../resource/v1beta1/resourceclaimspec.go | 39 + .../resource/v1beta1/resourceclaimstatus.go | 67 + .../resource/v1beta1/resourceclaimtemplate.go | 255 + .../resourceclaimtemplatespec.go | 46 +- .../resource/v1beta1/resourcepool.go | 57 + .../resource/v1beta1/resourceslice.go | 253 + .../resource/v1beta1/resourceslicespec.go | 93 + .../scheduling/v1/priorityclass.go | 72 +- .../scheduling/v1alpha1/priorityclass.go | 56 +- .../scheduling/v1beta1/priorityclass.go | 56 +- .../storage/v1/csidriver.go | 74 +- .../storage/v1/csidriverspec.go | 14 +- .../applyconfigurations/storage/v1/csinode.go | 74 +- .../storage/v1/csinodedriver.go | 4 +- .../storage/v1/csinodespec.go | 4 +- .../storage/v1/csistoragecapacity.go | 74 +- .../storage/v1/storageclass.go | 78 +- .../storage/v1/tokenrequest.go | 4 +- .../storage/v1/volumeattachment.go | 76 +- .../storage/v1/volumeattachmentsource.go | 12 +- .../storage/v1/volumeattachmentspec.go | 4 +- .../storage/v1/volumeattachmentstatus.go | 4 +- .../storage/v1/volumeerror.go | 12 +- .../storage/v1/volumenoderesources.go | 4 +- .../storage/v1alpha1/csistoragecapacity.go | 56 +- .../storage/v1alpha1/volumeattachment.go | 48 +- .../v1alpha1/volumeattachmentsource.go | 4 +- .../storage/v1alpha1/volumeattachmentspec.go | 4 +- .../v1alpha1/volumeattachmentstatus.go | 4 +- .../storage/v1alpha1/volumeattributesclass.go | 56 +- .../storage/v1alpha1/volumeerror.go | 4 +- .../storage/v1beta1/csidriver.go | 48 +- .../storage/v1beta1/csidriverspec.go | 26 +- .../storage/v1beta1/csinode.go | 48 +- .../storage/v1beta1/csinodedriver.go | 4 +- .../storage/v1beta1/csinodespec.go | 4 +- .../storage/v1beta1/csistoragecapacity.go | 56 +- .../storage/v1beta1/storageclass.go | 60 +- .../storage/v1beta1/tokenrequest.go | 4 +- .../storage/v1beta1/volumeattachment.go | 48 +- .../storage/v1beta1/volumeattachmentsource.go | 4 +- .../storage/v1beta1/volumeattachmentspec.go | 4 +- .../storage/v1beta1/volumeattachmentstatus.go | 4 +- .../storage/v1beta1/volumeattributesclass.go | 268 + .../storage/v1beta1/volumeerror.go | 4 +- .../storage/v1beta1/volumenoderesources.go | 4 +- .../v1alpha1/groupversionresource.go | 57 + .../v1alpha1/migrationcondition.go | 81 + .../v1alpha1/storageversionmigration.go | 262 + .../v1alpha1/storageversionmigrationspec.go | 48 + .../v1alpha1/storageversionmigrationstatus.go | 53 + .../client-go/applyconfigurations/utils.go | 1814 ++ .../discovery/aggregated_discovery.go | 124 +- .../client-go/discovery/discovery_client.go | 29 +- .../client-go/discovery/fake/discovery.go | 12 +- vendor/k8s.io/client-go/dynamic/scheme.go | 66 +- vendor/k8s.io/client-go/dynamic/simple.go | 223 +- vendor/k8s.io/client-go/features/envvar.go | 188 + vendor/k8s.io/client-go/features/features.go | 143 + .../client-go/features/known_features.go | 77 + vendor/k8s.io/client-go/gentype/fake.go | 304 + vendor/k8s.io/client-go/gentype/type.go | 387 + .../admissionregistration/v1/interface.go | 14 + .../v1/mutatingwebhookconfiguration.go | 16 +- .../v1/validatingadmissionpolicy.go | 89 + .../v1/validatingadmissionpolicybinding.go | 89 + .../v1/validatingwebhookconfiguration.go | 16 +- .../v1alpha1/interface.go | 14 + .../v1alpha1/mutatingadmissionpolicy.go | 89 + .../mutatingadmissionpolicybinding.go | 89 + .../v1alpha1/validatingadmissionpolicy.go | 16 +- .../validatingadmissionpolicybinding.go | 16 +- .../v1beta1/mutatingwebhookconfiguration.go | 16 +- .../v1beta1/validatingadmissionpolicy.go | 16 +- .../validatingadmissionpolicybinding.go | 16 +- .../v1beta1/validatingwebhookconfiguration.go | 16 +- .../v1alpha1/storageversion.go | 16 +- .../informers/apps/v1/controllerrevision.go | 16 +- .../client-go/informers/apps/v1/daemonset.go | 16 +- .../client-go/informers/apps/v1/deployment.go | 16 +- .../client-go/informers/apps/v1/replicaset.go | 16 +- .../informers/apps/v1/statefulset.go | 16 +- .../apps/v1beta1/controllerrevision.go | 16 +- .../informers/apps/v1beta1/deployment.go | 16 +- .../informers/apps/v1beta1/statefulset.go | 16 +- .../apps/v1beta2/controllerrevision.go | 16 +- .../informers/apps/v1beta2/daemonset.go | 16 +- .../informers/apps/v1beta2/deployment.go | 16 +- .../informers/apps/v1beta2/replicaset.go | 16 +- .../informers/apps/v1beta2/statefulset.go | 16 +- .../autoscaling/v1/horizontalpodautoscaler.go | 16 +- .../autoscaling/v2/horizontalpodautoscaler.go | 16 +- .../v2beta1/horizontalpodautoscaler.go | 16 +- .../v2beta2/horizontalpodautoscaler.go | 16 +- .../client-go/informers/batch/v1/cronjob.go | 16 +- .../client-go/informers/batch/v1/job.go | 16 +- .../informers/batch/v1beta1/cronjob.go | 16 +- .../v1/certificatesigningrequest.go | 16 +- .../v1alpha1/clustertrustbundle.go | 16 +- .../v1beta1/certificatesigningrequest.go | 16 +- .../informers/coordination/interface.go | 8 + .../informers/coordination/v1/lease.go | 16 +- .../coordination/v1alpha2/interface.go | 45 + .../coordination/v1alpha2/leasecandidate.go | 90 + .../informers/coordination/v1beta1/lease.go | 16 +- .../informers/core/v1/componentstatus.go | 16 +- .../client-go/informers/core/v1/configmap.go | 16 +- .../client-go/informers/core/v1/endpoints.go | 16 +- .../client-go/informers/core/v1/event.go | 16 +- .../client-go/informers/core/v1/limitrange.go | 16 +- .../client-go/informers/core/v1/namespace.go | 16 +- .../client-go/informers/core/v1/node.go | 16 +- .../informers/core/v1/persistentvolume.go | 16 +- .../core/v1/persistentvolumeclaim.go | 16 +- .../k8s.io/client-go/informers/core/v1/pod.go | 16 +- .../informers/core/v1/podtemplate.go | 16 +- .../core/v1/replicationcontroller.go | 16 +- .../informers/core/v1/resourcequota.go | 16 +- .../client-go/informers/core/v1/secret.go | 16 +- .../client-go/informers/core/v1/service.go | 16 +- .../informers/core/v1/serviceaccount.go | 16 +- .../informers/discovery/v1/endpointslice.go | 16 +- .../discovery/v1beta1/endpointslice.go | 16 +- vendor/k8s.io/client-go/informers/doc.go | 2 +- .../client-go/informers/events/v1/event.go | 16 +- .../informers/events/v1beta1/event.go | 16 +- .../informers/extensions/v1beta1/daemonset.go | 16 +- .../extensions/v1beta1/deployment.go | 16 +- .../informers/extensions/v1beta1/ingress.go | 16 +- .../extensions/v1beta1/networkpolicy.go | 16 +- .../extensions/v1beta1/replicaset.go | 16 +- vendor/k8s.io/client-go/informers/factory.go | 7 + .../informers/flowcontrol/v1/flowschema.go | 16 +- .../v1/prioritylevelconfiguration.go | 16 +- .../flowcontrol/v1beta1/flowschema.go | 16 +- .../v1beta1/prioritylevelconfiguration.go | 16 +- .../flowcontrol/v1beta2/flowschema.go | 16 +- .../v1beta2/prioritylevelconfiguration.go | 16 +- .../flowcontrol/v1beta3/flowschema.go | 16 +- .../v1beta3/prioritylevelconfiguration.go | 16 +- vendor/k8s.io/client-go/informers/generic.go | 57 +- .../informers/networking/v1/ingress.go | 16 +- .../informers/networking/v1/ingressclass.go | 16 +- .../informers/networking/v1/networkpolicy.go | 16 +- .../networking/v1alpha1/ipaddress.go | 16 +- .../networking/v1alpha1/servicecidr.go | 16 +- .../informers/networking/v1beta1/ingress.go | 16 +- .../networking/v1beta1/ingressclass.go | 16 +- .../informers/networking/v1beta1/interface.go | 14 + .../informers/networking/v1beta1/ipaddress.go | 89 + .../networking/v1beta1/servicecidr.go | 89 + .../informers/node/v1/runtimeclass.go | 16 +- .../informers/node/v1alpha1/runtimeclass.go | 16 +- .../informers/node/v1beta1/runtimeclass.go | 16 +- .../policy/v1/poddisruptionbudget.go | 16 +- .../policy/v1beta1/poddisruptionbudget.go | 16 +- .../informers/rbac/v1/clusterrole.go | 16 +- .../informers/rbac/v1/clusterrolebinding.go | 16 +- .../client-go/informers/rbac/v1/role.go | 16 +- .../informers/rbac/v1/rolebinding.go | 16 +- .../informers/rbac/v1alpha1/clusterrole.go | 16 +- .../rbac/v1alpha1/clusterrolebinding.go | 16 +- .../client-go/informers/rbac/v1alpha1/role.go | 16 +- .../informers/rbac/v1alpha1/rolebinding.go | 16 +- .../informers/rbac/v1beta1/clusterrole.go | 16 +- .../rbac/v1beta1/clusterrolebinding.go | 16 +- .../client-go/informers/rbac/v1beta1/role.go | 16 +- .../informers/rbac/v1beta1/rolebinding.go | 16 +- .../client-go/informers/resource/interface.go | 20 +- .../resource/v1alpha3/deviceclass.go | 89 + .../{v1alpha2 => v1alpha3}/interface.go | 22 +- .../{v1alpha2 => v1alpha3}/resourceclaim.go | 22 +- .../resourceclaimtemplate.go | 22 +- .../resourceslice.go} | 46 +- .../informers/resource/v1beta1/deviceclass.go | 89 + .../informers/resource/v1beta1/interface.go | 66 + .../resource/v1beta1/resourceclaim.go | 90 + .../resourceclaimtemplate.go} | 46 +- .../resource/v1beta1/resourceslice.go | 89 + .../informers/scheduling/v1/priorityclass.go | 16 +- .../scheduling/v1alpha1/priorityclass.go | 16 +- .../scheduling/v1beta1/priorityclass.go | 16 +- .../informers/storage/v1/csidriver.go | 16 +- .../client-go/informers/storage/v1/csinode.go | 16 +- .../storage/v1/csistoragecapacity.go | 16 +- .../informers/storage/v1/storageclass.go | 16 +- .../informers/storage/v1/volumeattachment.go | 16 +- .../storage/v1alpha1/csistoragecapacity.go | 16 +- .../storage/v1alpha1/volumeattachment.go | 16 +- .../storage/v1alpha1/volumeattributesclass.go | 16 +- .../informers/storage/v1beta1/csidriver.go | 16 +- .../informers/storage/v1beta1/csinode.go | 16 +- .../storage/v1beta1/csistoragecapacity.go | 16 +- .../informers/storage/v1beta1/interface.go | 7 + .../informers/storage/v1beta1/storageclass.go | 16 +- .../storage/v1beta1/volumeattachment.go | 16 +- .../storage/v1beta1/volumeattributesclass.go | 89 + .../informers/storagemigration/interface.go | 46 + .../storagemigration/v1alpha1/interface.go | 45 + .../v1alpha1/storageversionmigration.go | 89 + .../k8s.io/client-go/kubernetes/clientset.go | 59 +- vendor/k8s.io/client-go/kubernetes/doc.go | 2 +- .../kubernetes/fake/clientset_generated.go | 70 +- .../client-go/kubernetes/fake/register.go | 10 +- .../client-go/kubernetes/scheme/register.go | 10 +- .../v1/admissionregistration_client.go | 20 +- .../fake/fake_admissionregistration_client.go | 12 +- .../fake/fake_mutatingwebhookconfiguration.go | 138 +- .../v1/fake/fake_validatingadmissionpolicy.go | 53 + .../fake_validatingadmissionpolicybinding.go | 53 + .../fake_validatingwebhookconfiguration.go | 138 +- .../v1/generated_expansion.go | 4 + .../v1/mutatingwebhookconfiguration.go | 170 +- .../v1/validatingadmissionpolicy.go | 79 + .../v1/validatingadmissionpolicybinding.go | 75 + .../v1/validatingwebhookconfiguration.go | 170 +- .../v1alpha1/admissionregistration_client.go | 20 +- .../fake/fake_admissionregistration_client.go | 12 +- .../fake/fake_mutatingadmissionpolicy.go | 53 + .../fake_mutatingadmissionpolicybinding.go | 55 + .../fake/fake_validatingadmissionpolicy.go | 171 +- .../fake_validatingadmissionpolicybinding.go | 140 +- .../v1alpha1/generated_expansion.go | 4 + .../v1alpha1/mutatingadmissionpolicy.go | 75 + .../mutatingadmissionpolicybinding.go | 75 + .../v1alpha1/validatingadmissionpolicy.go | 220 +- .../validatingadmissionpolicybinding.go | 170 +- .../v1beta1/admissionregistration_client.go | 10 +- .../fake/fake_admissionregistration_client.go | 8 +- .../fake/fake_mutatingwebhookconfiguration.go | 138 +- .../fake/fake_validatingadmissionpolicy.go | 171 +- .../fake_validatingadmissionpolicybinding.go | 140 +- .../fake_validatingwebhookconfiguration.go | 140 +- .../v1beta1/mutatingwebhookconfiguration.go | 170 +- .../v1beta1/validatingadmissionpolicy.go | 220 +- .../validatingadmissionpolicybinding.go | 170 +- .../v1beta1/validatingwebhookconfiguration.go | 170 +- .../v1alpha1/apiserverinternal_client.go | 10 +- .../fake/fake_apiserverinternal_client.go | 2 +- .../v1alpha1/fake/fake_storageversion.go | 171 +- .../v1alpha1/storageversion.go | 218 +- .../kubernetes/typed/apps/v1/apps_client.go | 10 +- .../typed/apps/v1/controllerrevision.go | 177 +- .../kubernetes/typed/apps/v1/daemonset.go | 229 +- .../kubernetes/typed/apps/v1/deployment.go | 249 +- .../typed/apps/v1/fake/fake_apps_client.go | 10 +- .../apps/v1/fake/fake_controllerrevision.go | 147 +- .../typed/apps/v1/fake/fake_daemonset.go | 178 +- .../typed/apps/v1/fake/fake_deployment.go | 196 +- .../typed/apps/v1/fake/fake_replicaset.go | 196 +- .../typed/apps/v1/fake/fake_statefulset.go | 196 +- .../kubernetes/typed/apps/v1/replicaset.go | 249 +- .../kubernetes/typed/apps/v1/statefulset.go | 249 +- .../typed/apps/v1beta1/apps_client.go | 10 +- .../typed/apps/v1beta1/controllerrevision.go | 177 +- .../typed/apps/v1beta1/deployment.go | 229 +- .../apps/v1beta1/fake/fake_apps_client.go | 6 +- .../v1beta1/fake/fake_controllerrevision.go | 147 +- .../apps/v1beta1/fake/fake_deployment.go | 180 +- .../apps/v1beta1/fake/fake_statefulset.go | 180 +- .../typed/apps/v1beta1/statefulset.go | 229 +- .../typed/apps/v1beta2/apps_client.go | 10 +- .../typed/apps/v1beta2/controllerrevision.go | 177 +- .../typed/apps/v1beta2/daemonset.go | 229 +- .../typed/apps/v1beta2/deployment.go | 229 +- .../apps/v1beta2/fake/fake_apps_client.go | 10 +- .../v1beta2/fake/fake_controllerrevision.go | 147 +- .../typed/apps/v1beta2/fake/fake_daemonset.go | 180 +- .../apps/v1beta2/fake/fake_deployment.go | 180 +- .../apps/v1beta2/fake/fake_replicaset.go | 180 +- .../apps/v1beta2/fake/fake_statefulset.go | 198 +- .../typed/apps/v1beta2/replicaset.go | 229 +- .../typed/apps/v1beta2/statefulset.go | 269 +- .../v1/authentication_client.go | 10 +- .../v1/fake/fake_authentication_client.go | 4 +- .../v1/fake/fake_selfsubjectreview.go | 32 +- .../v1/fake/fake_tokenreview.go | 32 +- .../authentication/v1/selfsubjectreview.go | 31 +- .../typed/authentication/v1/tokenreview.go | 31 +- .../v1alpha1/authentication_client.go | 10 +- .../fake/fake_authentication_client.go | 2 +- .../v1alpha1/fake/fake_selfsubjectreview.go | 32 +- .../v1alpha1/selfsubjectreview.go | 31 +- .../v1beta1/authentication_client.go | 10 +- .../fake/fake_authentication_client.go | 4 +- .../v1beta1/fake/fake_selfsubjectreview.go | 32 +- .../v1beta1/fake/fake_tokenreview.go | 32 +- .../v1beta1/selfsubjectreview.go | 31 +- .../authentication/v1beta1/tokenreview.go | 31 +- .../authorization/v1/authorization_client.go | 10 +- .../v1/fake/fake_authorization_client.go | 8 +- .../v1/fake/fake_localsubjectaccessreview.go | 34 +- .../v1/fake/fake_selfsubjectaccessreview.go | 32 +- .../v1/fake/fake_selfsubjectrulesreview.go | 32 +- .../v1/fake/fake_subjectaccessreview.go | 32 +- .../v1/localsubjectaccessreview.go | 34 +- .../v1/selfsubjectaccessreview.go | 31 +- .../v1/selfsubjectrulesreview.go | 31 +- .../authorization/v1/subjectaccessreview.go | 31 +- .../v1beta1/authorization_client.go | 10 +- .../v1beta1/fake/fake_authorization_client.go | 8 +- .../fake/fake_localsubjectaccessreview.go | 34 +- .../fake/fake_selfsubjectaccessreview.go | 32 +- .../fake/fake_selfsubjectrulesreview.go | 32 +- .../v1beta1/fake/fake_subjectaccessreview.go | 32 +- .../v1beta1/localsubjectaccessreview.go | 36 +- .../v1beta1/selfsubjectaccessreview.go | 33 +- .../v1beta1/selfsubjectrulesreview.go | 33 +- .../v1beta1/subjectaccessreview.go | 31 +- .../autoscaling/v1/autoscaling_client.go | 10 +- .../v1/fake/fake_autoscaling_client.go | 2 +- .../v1/fake/fake_horizontalpodautoscaler.go | 182 +- .../autoscaling/v1/horizontalpodautoscaler.go | 229 +- .../autoscaling/v2/autoscaling_client.go | 10 +- .../v2/fake/fake_autoscaling_client.go | 2 +- .../v2/fake/fake_horizontalpodautoscaler.go | 182 +- .../autoscaling/v2/horizontalpodautoscaler.go | 229 +- .../autoscaling/v2beta1/autoscaling_client.go | 10 +- .../v2beta1/fake/fake_autoscaling_client.go | 2 +- .../fake/fake_horizontalpodautoscaler.go | 182 +- .../v2beta1/horizontalpodautoscaler.go | 233 +- .../autoscaling/v2beta2/autoscaling_client.go | 10 +- .../v2beta2/fake/fake_autoscaling_client.go | 2 +- .../fake/fake_horizontalpodautoscaler.go | 182 +- .../v2beta2/horizontalpodautoscaler.go | 233 +- .../kubernetes/typed/batch/v1/batch_client.go | 10 +- .../kubernetes/typed/batch/v1/cronjob.go | 229 +- .../typed/batch/v1/fake/fake_batch_client.go | 4 +- .../typed/batch/v1/fake/fake_cronjob.go | 178 +- .../typed/batch/v1/fake/fake_job.go | 178 +- .../kubernetes/typed/batch/v1/job.go | 229 +- .../typed/batch/v1beta1/batch_client.go | 10 +- .../kubernetes/typed/batch/v1beta1/cronjob.go | 229 +- .../batch/v1beta1/fake/fake_batch_client.go | 2 +- .../typed/batch/v1beta1/fake/fake_cronjob.go | 180 +- .../certificates/v1/certificates_client.go | 10 +- .../v1/certificatesigningrequest.go | 227 +- .../v1/fake/fake_certificates_client.go | 2 +- .../v1/fake/fake_certificatesigningrequest.go | 176 +- .../v1alpha1/certificates_client.go | 10 +- .../v1alpha1/clustertrustbundle.go | 168 +- .../v1alpha1/fake/fake_certificates_client.go | 2 +- .../v1alpha1/fake/fake_clustertrustbundle.go | 138 +- .../v1beta1/certificates_client.go | 10 +- .../v1beta1/certificatesigningrequest.go | 220 +- .../certificatesigningrequest_expansion.go | 2 +- .../v1beta1/fake/fake_certificates_client.go | 2 +- .../fake/fake_certificatesigningrequest.go | 171 +- ...ake_certificatesigningrequest_expansion.go | 4 +- .../coordination/v1/coordination_client.go | 10 +- .../v1/fake/fake_coordination_client.go | 2 +- .../typed/coordination/v1/fake/fake_lease.go | 143 +- .../kubernetes/typed/coordination/v1/lease.go | 177 +- .../v1alpha2/coordination_client.go | 107 + .../v1alpha2/doc.go | 0 .../v1alpha2/fake/doc.go | 0 .../v1alpha2/fake/fake_coordination_client.go | 40 + .../v1alpha2/fake/fake_leasecandidate.go | 53 + .../v1alpha2/generated_expansion.go | 21 + .../coordination/v1alpha2/leasecandidate.go | 71 + .../v1beta1/coordination_client.go | 10 +- .../v1beta1/fake/fake_coordination_client.go | 2 +- .../coordination/v1beta1/fake/fake_lease.go | 143 +- .../typed/coordination/v1beta1/lease.go | 177 +- .../typed/core/v1/componentstatus.go | 166 +- .../kubernetes/typed/core/v1/configmap.go | 177 +- .../kubernetes/typed/core/v1/core_client.go | 10 +- .../kubernetes/typed/core/v1/endpoints.go | 177 +- .../kubernetes/typed/core/v1/event.go | 177 +- .../typed/core/v1/event_expansion.go | 22 +- .../core/v1/fake/fake_componentstatus.go | 136 +- .../typed/core/v1/fake/fake_configmap.go | 143 +- .../typed/core/v1/fake/fake_core_client.go | 32 +- .../typed/core/v1/fake/fake_endpoints.go | 143 +- .../typed/core/v1/fake/fake_event.go | 143 +- .../core/v1/fake/fake_event_expansion.go | 36 +- .../typed/core/v1/fake/fake_limitrange.go | 143 +- .../typed/core/v1/fake/fake_namespace.go | 159 +- .../core/v1/fake/fake_namespace_expansion.go | 6 +- .../typed/core/v1/fake/fake_node.go | 167 +- .../typed/core/v1/fake/fake_node_expansion.go | 4 +- .../core/v1/fake/fake_persistentvolume.go | 169 +- .../v1/fake/fake_persistentvolumeclaim.go | 182 +- .../kubernetes/typed/core/v1/fake/fake_pod.go | 183 +- .../typed/core/v1/fake/fake_pod_expansion.go | 36 +- .../typed/core/v1/fake/fake_podtemplate.go | 143 +- .../v1/fake/fake_replicationcontroller.go | 194 +- .../typed/core/v1/fake/fake_resourcequota.go | 180 +- .../typed/core/v1/fake/fake_secret.go | 143 +- .../typed/core/v1/fake/fake_service.go | 170 +- .../core/v1/fake/fake_service_expansion.go | 4 +- .../typed/core/v1/fake/fake_serviceaccount.go | 150 +- .../kubernetes/typed/core/v1/limitrange.go | 177 +- .../kubernetes/typed/core/v1/namespace.go | 201 +- .../typed/core/v1/namespace_expansion.go | 2 +- .../kubernetes/typed/core/v1/node.go | 216 +- .../typed/core/v1/node_expansion.go | 2 +- .../typed/core/v1/persistentvolume.go | 216 +- .../typed/core/v1/persistentvolumeclaim.go | 229 +- .../client-go/kubernetes/typed/core/v1/pod.go | 245 +- .../kubernetes/typed/core/v1/pod_expansion.go | 14 +- .../kubernetes/typed/core/v1/podtemplate.go | 177 +- .../typed/core/v1/replicationcontroller.go | 239 +- .../kubernetes/typed/core/v1/resourcequota.go | 229 +- .../kubernetes/typed/core/v1/secret.go | 177 +- .../kubernetes/typed/core/v1/service.go | 213 +- .../typed/core/v1/service_expansion.go | 4 +- .../typed/core/v1/serviceaccount.go | 182 +- .../typed/discovery/v1/discovery_client.go | 10 +- .../typed/discovery/v1/endpointslice.go | 177 +- .../v1/fake/fake_discovery_client.go | 2 +- .../discovery/v1/fake/fake_endpointslice.go | 145 +- .../discovery/v1beta1/discovery_client.go | 10 +- .../typed/discovery/v1beta1/endpointslice.go | 177 +- .../v1beta1/fake/fake_discovery_client.go | 2 +- .../v1beta1/fake/fake_endpointslice.go | 147 +- .../kubernetes/typed/events/v1/event.go | 177 +- .../typed/events/v1/events_client.go | 10 +- .../typed/events/v1/fake/fake_event.go | 143 +- .../events/v1/fake/fake_events_client.go | 2 +- .../kubernetes/typed/events/v1beta1/event.go | 177 +- .../typed/events/v1beta1/event_expansion.go | 18 +- .../typed/events/v1beta1/events_client.go | 10 +- .../typed/events/v1beta1/fake/fake_event.go | 143 +- .../v1beta1/fake/fake_event_expansion.go | 24 +- .../events/v1beta1/fake/fake_events_client.go | 2 +- .../typed/extensions/v1beta1/daemonset.go | 229 +- .../typed/extensions/v1beta1/deployment.go | 269 +- .../v1beta1/deployment_expansion.go | 2 +- .../extensions/v1beta1/extensions_client.go | 10 +- .../extensions/v1beta1/fake/fake_daemonset.go | 180 +- .../v1beta1/fake/fake_deployment.go | 198 +- .../v1beta1/fake/fake_deployment_expansion.go | 4 +- .../v1beta1/fake/fake_extensions_client.go | 10 +- .../extensions/v1beta1/fake/fake_ingress.go | 180 +- .../v1beta1/fake/fake_networkpolicy.go | 147 +- .../v1beta1/fake/fake_replicaset.go | 198 +- .../typed/extensions/v1beta1/ingress.go | 229 +- .../typed/extensions/v1beta1/networkpolicy.go | 177 +- .../typed/extensions/v1beta1/replicaset.go | 269 +- .../v1/fake/fake_flowcontrol_client.go | 4 +- .../flowcontrol/v1/fake/fake_flowschema.go | 167 +- .../fake/fake_prioritylevelconfiguration.go | 171 +- .../flowcontrol/v1/flowcontrol_client.go | 10 +- .../typed/flowcontrol/v1/flowschema.go | 216 +- .../v1/prioritylevelconfiguration.go | 218 +- .../v1beta1/fake/fake_flowcontrol_client.go | 4 +- .../v1beta1/fake/fake_flowschema.go | 169 +- .../fake/fake_prioritylevelconfiguration.go | 171 +- .../flowcontrol/v1beta1/flowcontrol_client.go | 10 +- .../typed/flowcontrol/v1beta1/flowschema.go | 216 +- .../v1beta1/prioritylevelconfiguration.go | 220 +- .../v1beta2/fake/fake_flowcontrol_client.go | 4 +- .../v1beta2/fake/fake_flowschema.go | 169 +- .../fake/fake_prioritylevelconfiguration.go | 171 +- .../flowcontrol/v1beta2/flowcontrol_client.go | 10 +- .../typed/flowcontrol/v1beta2/flowschema.go | 216 +- .../v1beta2/prioritylevelconfiguration.go | 220 +- .../v1beta3/fake/fake_flowcontrol_client.go | 4 +- .../v1beta3/fake/fake_flowschema.go | 169 +- .../fake/fake_prioritylevelconfiguration.go | 171 +- .../flowcontrol/v1beta3/flowcontrol_client.go | 10 +- .../typed/flowcontrol/v1beta3/flowschema.go | 216 +- .../v1beta3/prioritylevelconfiguration.go | 220 +- .../typed/networking/v1/fake/fake_ingress.go | 178 +- .../networking/v1/fake/fake_ingressclass.go | 136 +- .../v1/fake/fake_networking_client.go | 6 +- .../networking/v1/fake/fake_networkpolicy.go | 145 +- .../kubernetes/typed/networking/v1/ingress.go | 229 +- .../typed/networking/v1/ingressclass.go | 166 +- .../typed/networking/v1/networking_client.go | 10 +- .../typed/networking/v1/networkpolicy.go | 177 +- .../v1alpha1/fake/fake_ipaddress.go | 136 +- .../v1alpha1/fake/fake_networking_client.go | 4 +- .../v1alpha1/fake/fake_servicecidr.go | 171 +- .../typed/networking/v1alpha1/ipaddress.go | 166 +- .../networking/v1alpha1/networking_client.go | 10 +- .../typed/networking/v1alpha1/servicecidr.go | 216 +- .../networking/v1beta1/fake/fake_ingress.go | 180 +- .../v1beta1/fake/fake_ingressclass.go | 138 +- .../networking/v1beta1/fake/fake_ipaddress.go | 51 + .../v1beta1/fake/fake_networking_client.go | 12 +- .../v1beta1/fake/fake_servicecidr.go | 51 + .../networking/v1beta1/generated_expansion.go | 4 + .../typed/networking/v1beta1/ingress.go | 229 +- .../typed/networking/v1beta1/ingressclass.go | 166 +- .../typed/networking/v1beta1/ipaddress.go | 71 + .../networking/v1beta1/networking_client.go | 20 +- .../typed/networking/v1beta1/servicecidr.go | 75 + .../typed/node/v1/fake/fake_node_client.go | 2 +- .../typed/node/v1/fake/fake_runtimeclass.go | 136 +- .../kubernetes/typed/node/v1/node_client.go | 10 +- .../kubernetes/typed/node/v1/runtimeclass.go | 166 +- .../node/v1alpha1/fake/fake_node_client.go | 2 +- .../node/v1alpha1/fake/fake_runtimeclass.go | 138 +- .../typed/node/v1alpha1/node_client.go | 10 +- .../typed/node/v1alpha1/runtimeclass.go | 166 +- .../node/v1beta1/fake/fake_node_client.go | 2 +- .../node/v1beta1/fake/fake_runtimeclass.go | 138 +- .../typed/node/v1beta1/node_client.go | 10 +- .../typed/node/v1beta1/runtimeclass.go | 166 +- .../kubernetes/typed/policy/v1/eviction.go | 17 +- .../typed/policy/v1/eviction_expansion.go | 2 +- .../typed/policy/v1/fake/fake_eviction.go | 25 +- .../policy/v1/fake/fake_eviction_expansion.go | 4 +- .../v1/fake/fake_poddisruptionbudget.go | 182 +- .../policy/v1/fake/fake_policy_client.go | 4 +- .../typed/policy/v1/poddisruptionbudget.go | 229 +- .../typed/policy/v1/policy_client.go | 10 +- .../typed/policy/v1beta1/eviction.go | 17 +- .../policy/v1beta1/eviction_expansion.go | 2 +- .../policy/v1beta1/fake/fake_eviction.go | 25 +- .../v1beta1/fake/fake_eviction_expansion.go | 4 +- .../v1beta1/fake/fake_poddisruptionbudget.go | 182 +- .../policy/v1beta1/fake/fake_policy_client.go | 4 +- .../policy/v1beta1/poddisruptionbudget.go | 229 +- .../typed/policy/v1beta1/policy_client.go | 10 +- .../kubernetes/typed/rbac/v1/clusterrole.go | 166 +- .../typed/rbac/v1/clusterrolebinding.go | 166 +- .../typed/rbac/v1/fake/fake_clusterrole.go | 134 +- .../rbac/v1/fake/fake_clusterrolebinding.go | 138 +- .../typed/rbac/v1/fake/fake_rbac_client.go | 8 +- .../typed/rbac/v1/fake/fake_role.go | 143 +- .../typed/rbac/v1/fake/fake_rolebinding.go | 143 +- .../kubernetes/typed/rbac/v1/rbac_client.go | 10 +- .../kubernetes/typed/rbac/v1/role.go | 177 +- .../kubernetes/typed/rbac/v1/rolebinding.go | 177 +- .../typed/rbac/v1alpha1/clusterrole.go | 166 +- .../typed/rbac/v1alpha1/clusterrolebinding.go | 166 +- .../rbac/v1alpha1/fake/fake_clusterrole.go | 138 +- .../v1alpha1/fake/fake_clusterrolebinding.go | 138 +- .../rbac/v1alpha1/fake/fake_rbac_client.go | 8 +- .../typed/rbac/v1alpha1/fake/fake_role.go | 143 +- .../rbac/v1alpha1/fake/fake_rolebinding.go | 147 +- .../typed/rbac/v1alpha1/rbac_client.go | 10 +- .../kubernetes/typed/rbac/v1alpha1/role.go | 177 +- .../typed/rbac/v1alpha1/rolebinding.go | 177 +- .../typed/rbac/v1beta1/clusterrole.go | 166 +- .../typed/rbac/v1beta1/clusterrolebinding.go | 166 +- .../rbac/v1beta1/fake/fake_clusterrole.go | 136 +- .../v1beta1/fake/fake_clusterrolebinding.go | 138 +- .../rbac/v1beta1/fake/fake_rbac_client.go | 8 +- .../typed/rbac/v1beta1/fake/fake_role.go | 143 +- .../rbac/v1beta1/fake/fake_rolebinding.go | 145 +- .../typed/rbac/v1beta1/rbac_client.go | 10 +- .../kubernetes/typed/rbac/v1beta1/role.go | 177 +- .../typed/rbac/v1beta1/rolebinding.go | 177 +- .../fake/fake_podschedulingcontext.go | 189 - .../v1alpha2/fake/fake_resourceclaim.go | 189 - .../fake/fake_resourceclaimtemplate.go | 154 - .../v1alpha2/fake/fake_resourceclass.go | 145 - .../resource/v1alpha2/podschedulingcontext.go | 256 - .../typed/resource/v1alpha2/resourceclaim.go | 256 - .../v1alpha2/resourceclaimtemplate.go | 208 - .../typed/resource/v1alpha2/resourceclass.go | 197 - .../typed/resource/v1alpha3/deviceclass.go | 71 + .../typed/resource/v1alpha3}/doc.go | 9 +- .../typed/resource/v1alpha3/fake/doc.go | 20 + .../v1alpha3/fake/fake_deviceclass.go | 53 + .../fake/fake_resource_client.go | 22 +- .../v1alpha3/fake/fake_resourceclaim.go | 53 + .../fake/fake_resourceclaimtemplate.go | 53 + .../v1alpha3/fake/fake_resourceslice.go | 53 + .../generated_expansion.go | 6 +- .../{v1alpha2 => v1alpha3}/resource_client.go | 56 +- .../typed/resource/v1alpha3/resourceclaim.go | 75 + .../v1alpha3/resourceclaimtemplate.go | 73 + .../typed/resource/v1alpha3/resourceslice.go | 71 + .../typed/resource/v1beta1/deviceclass.go | 71 + .../kubernetes/typed/resource/v1beta1/doc.go | 20 + .../typed/resource/v1beta1/fake/doc.go | 20 + .../resource/v1beta1/fake/fake_deviceclass.go | 51 + .../v1beta1/fake/fake_resource_client.go | 52 + .../v1beta1/fake/fake_resourceclaim.go | 53 + .../fake/fake_resourceclaimtemplate.go | 53 + .../v1beta1/fake/fake_resourceslice.go | 53 + .../resource/v1beta1/generated_expansion.go | 27 + .../typed/resource/v1beta1/resource_client.go | 122 + .../typed/resource/v1beta1/resourceclaim.go | 75 + .../resource/v1beta1/resourceclaimtemplate.go | 71 + .../typed/resource/v1beta1/resourceslice.go | 71 + .../scheduling/v1/fake/fake_priorityclass.go | 136 +- .../v1/fake/fake_scheduling_client.go | 2 +- .../typed/scheduling/v1/priorityclass.go | 166 +- .../typed/scheduling/v1/scheduling_client.go | 10 +- .../v1alpha1/fake/fake_priorityclass.go | 138 +- .../v1alpha1/fake/fake_scheduling_client.go | 2 +- .../scheduling/v1alpha1/priorityclass.go | 166 +- .../scheduling/v1alpha1/scheduling_client.go | 10 +- .../v1beta1/fake/fake_priorityclass.go | 138 +- .../v1beta1/fake/fake_scheduling_client.go | 2 +- .../typed/scheduling/v1beta1/priorityclass.go | 166 +- .../scheduling/v1beta1/scheduling_client.go | 10 +- .../kubernetes/typed/storage/v1/csidriver.go | 166 +- .../kubernetes/typed/storage/v1/csinode.go | 166 +- .../typed/storage/v1/csistoragecapacity.go | 177 +- .../typed/storage/v1/fake/fake_csidriver.go | 134 +- .../typed/storage/v1/fake/fake_csinode.go | 134 +- .../v1/fake/fake_csistoragecapacity.go | 147 +- .../storage/v1/fake/fake_storage_client.go | 10 +- .../storage/v1/fake/fake_storageclass.go | 136 +- .../storage/v1/fake/fake_volumeattachment.go | 169 +- .../typed/storage/v1/storage_client.go | 10 +- .../typed/storage/v1/storageclass.go | 166 +- .../typed/storage/v1/volumeattachment.go | 216 +- .../storage/v1alpha1/csistoragecapacity.go | 177 +- .../v1alpha1/fake/fake_csistoragecapacity.go | 147 +- .../v1alpha1/fake/fake_storage_client.go | 6 +- .../v1alpha1/fake/fake_volumeattachment.go | 171 +- .../fake/fake_volumeattributesclass.go | 138 +- .../typed/storage/v1alpha1/storage_client.go | 10 +- .../storage/v1alpha1/volumeattachment.go | 216 +- .../storage/v1alpha1/volumeattributesclass.go | 166 +- .../typed/storage/v1beta1/csidriver.go | 166 +- .../typed/storage/v1beta1/csinode.go | 166 +- .../storage/v1beta1/csistoragecapacity.go | 177 +- .../storage/v1beta1/fake/fake_csidriver.go | 136 +- .../storage/v1beta1/fake/fake_csinode.go | 136 +- .../v1beta1/fake/fake_csistoragecapacity.go | 147 +- .../v1beta1/fake/fake_storage_client.go | 14 +- .../storage/v1beta1/fake/fake_storageclass.go | 138 +- .../v1beta1/fake/fake_volumeattachment.go | 171 +- .../fake/fake_volumeattributesclass.go | 53 + .../storage/v1beta1/generated_expansion.go | 2 + .../typed/storage/v1beta1/storage_client.go | 15 +- .../typed/storage/v1beta1/storageclass.go | 166 +- .../typed/storage/v1beta1/volumeattachment.go | 216 +- .../storage/v1beta1/volumeattributesclass.go | 71 + .../typed/storagemigration/v1alpha1/doc.go | 20 + .../storagemigration/v1alpha1/fake/doc.go | 20 + .../fake/fake_storagemigration_client.go | 40 + .../fake/fake_storageversionmigration.go | 53 + .../v1alpha1/generated_expansion.go | 21 + .../v1alpha1/storagemigration_client.go | 107 + .../v1alpha1/storageversionmigration.go | 79 + .../v1/expansion_generated.go | 8 + .../v1/mutatingwebhookconfiguration.go | 36 +- .../v1/validatingadmissionpolicy.go | 48 + .../v1/validatingadmissionpolicybinding.go | 48 + .../v1/validatingwebhookconfiguration.go | 36 +- .../v1alpha1/expansion_generated.go | 8 + .../v1alpha1/mutatingadmissionpolicy.go | 48 + .../mutatingadmissionpolicybinding.go | 48 + .../v1alpha1/validatingadmissionpolicy.go | 36 +- .../validatingadmissionpolicybinding.go | 36 +- .../v1beta1/mutatingwebhookconfiguration.go | 36 +- .../v1beta1/validatingadmissionpolicy.go | 36 +- .../validatingadmissionpolicybinding.go | 36 +- .../v1beta1/validatingwebhookconfiguration.go | 36 +- .../v1alpha1/storageversion.go | 36 +- .../listers/apps/v1/controllerrevision.go | 51 +- .../client-go/listers/apps/v1/daemonset.go | 51 +- .../client-go/listers/apps/v1/deployment.go | 51 +- .../client-go/listers/apps/v1/replicaset.go | 51 +- .../client-go/listers/apps/v1/statefulset.go | 51 +- .../apps/v1beta1/controllerrevision.go | 51 +- .../listers/apps/v1beta1/deployment.go | 51 +- .../listers/apps/v1beta1/statefulset.go | 51 +- .../apps/v1beta2/controllerrevision.go | 51 +- .../listers/apps/v1beta2/daemonset.go | 51 +- .../listers/apps/v1beta2/deployment.go | 51 +- .../listers/apps/v1beta2/replicaset.go | 51 +- .../listers/apps/v1beta2/statefulset.go | 51 +- .../autoscaling/v1/horizontalpodautoscaler.go | 51 +- .../autoscaling/v2/horizontalpodautoscaler.go | 51 +- .../v2beta1/horizontalpodautoscaler.go | 51 +- .../v2beta2/horizontalpodautoscaler.go | 51 +- .../client-go/listers/batch/v1/cronjob.go | 51 +- .../k8s.io/client-go/listers/batch/v1/job.go | 51 +- .../listers/batch/v1beta1/cronjob.go | 51 +- .../v1/certificatesigningrequest.go | 36 +- .../v1alpha1/clustertrustbundle.go | 36 +- .../v1beta1/certificatesigningrequest.go | 36 +- .../listers/coordination/v1/lease.go | 51 +- .../v1alpha2/expansion_generated.go | 27 + .../coordination/v1alpha2/leasecandidate.go | 70 + .../listers/coordination/v1beta1/lease.go | 51 +- .../listers/core/v1/componentstatus.go | 36 +- .../client-go/listers/core/v1/configmap.go | 51 +- .../client-go/listers/core/v1/endpoints.go | 51 +- .../k8s.io/client-go/listers/core/v1/event.go | 51 +- .../client-go/listers/core/v1/limitrange.go | 51 +- .../client-go/listers/core/v1/namespace.go | 36 +- .../k8s.io/client-go/listers/core/v1/node.go | 36 +- .../listers/core/v1/persistentvolume.go | 36 +- .../listers/core/v1/persistentvolumeclaim.go | 51 +- .../k8s.io/client-go/listers/core/v1/pod.go | 51 +- .../client-go/listers/core/v1/podtemplate.go | 51 +- .../listers/core/v1/replicationcontroller.go | 51 +- .../listers/core/v1/resourcequota.go | 51 +- .../client-go/listers/core/v1/secret.go | 51 +- .../client-go/listers/core/v1/service.go | 51 +- .../listers/core/v1/serviceaccount.go | 51 +- .../listers/discovery/v1/endpointslice.go | 51 +- .../discovery/v1beta1/endpointslice.go | 51 +- vendor/k8s.io/client-go/listers/doc.go | 18 + .../client-go/listers/events/v1/event.go | 51 +- .../client-go/listers/events/v1beta1/event.go | 51 +- .../listers/extensions/v1beta1/daemonset.go | 51 +- .../listers/extensions/v1beta1/deployment.go | 51 +- .../listers/extensions/v1beta1/ingress.go | 51 +- .../extensions/v1beta1/networkpolicy.go | 51 +- .../listers/extensions/v1beta1/replicaset.go | 51 +- .../listers/flowcontrol/v1/flowschema.go | 36 +- .../v1/prioritylevelconfiguration.go | 36 +- .../listers/flowcontrol/v1beta1/flowschema.go | 36 +- .../v1beta1/prioritylevelconfiguration.go | 36 +- .../listers/flowcontrol/v1beta2/flowschema.go | 36 +- .../v1beta2/prioritylevelconfiguration.go | 36 +- .../listers/flowcontrol/v1beta3/flowschema.go | 36 +- .../v1beta3/prioritylevelconfiguration.go | 36 +- .../client-go/listers/generic_helpers.go | 72 + .../listers/networking/v1/ingress.go | 51 +- .../listers/networking/v1/ingressclass.go | 36 +- .../listers/networking/v1/networkpolicy.go | 51 +- .../listers/networking/v1alpha1/ipaddress.go | 36 +- .../networking/v1alpha1/servicecidr.go | 36 +- .../networking/v1beta1/expansion_generated.go | 8 + .../listers/networking/v1beta1/ingress.go | 51 +- .../networking/v1beta1/ingressclass.go | 36 +- .../listers/networking/v1beta1/ipaddress.go | 48 + .../listers/networking/v1beta1/servicecidr.go | 48 + .../client-go/listers/node/v1/runtimeclass.go | 36 +- .../listers/node/v1alpha1/runtimeclass.go | 36 +- .../listers/node/v1beta1/runtimeclass.go | 36 +- .../client-go/listers/policy/v1/eviction.go | 51 +- .../listers/policy/v1/poddisruptionbudget.go | 51 +- .../listers/policy/v1beta1/eviction.go | 51 +- .../policy/v1beta1/poddisruptionbudget.go | 51 +- .../client-go/listers/rbac/v1/clusterrole.go | 36 +- .../listers/rbac/v1/clusterrolebinding.go | 36 +- .../k8s.io/client-go/listers/rbac/v1/role.go | 51 +- .../client-go/listers/rbac/v1/rolebinding.go | 51 +- .../listers/rbac/v1alpha1/clusterrole.go | 36 +- .../rbac/v1alpha1/clusterrolebinding.go | 36 +- .../client-go/listers/rbac/v1alpha1/role.go | 51 +- .../listers/rbac/v1alpha1/rolebinding.go | 51 +- .../listers/rbac/v1beta1/clusterrole.go | 36 +- .../rbac/v1beta1/clusterrolebinding.go | 36 +- .../client-go/listers/rbac/v1beta1/role.go | 51 +- .../listers/rbac/v1beta1/rolebinding.go | 51 +- .../resource/v1alpha2/podschedulingcontext.go | 99 - .../resource/v1alpha2/resourceclass.go | 68 - .../listers/resource/v1alpha3/deviceclass.go | 48 + .../expansion_generated.go | 18 +- .../{v1alpha2 => v1alpha3}/resourceclaim.go | 53 +- .../v1alpha3/resourceclaimtemplate.go | 70 + .../resource/v1alpha3/resourceslice.go | 48 + .../listers/resource/v1beta1/deviceclass.go | 48 + .../resource/v1beta1/expansion_generated.go | 43 + .../listers/resource/v1beta1/resourceclaim.go | 70 + .../resourceclaimtemplate.go | 53 +- .../listers/resource/v1beta1/resourceslice.go | 48 + .../listers/scheduling/v1/priorityclass.go | 36 +- .../scheduling/v1alpha1/priorityclass.go | 36 +- .../scheduling/v1beta1/priorityclass.go | 36 +- .../client-go/listers/storage/v1/csidriver.go | 36 +- .../client-go/listers/storage/v1/csinode.go | 36 +- .../listers/storage/v1/csistoragecapacity.go | 51 +- .../listers/storage/v1/storageclass.go | 36 +- .../listers/storage/v1/volumeattachment.go | 36 +- .../storage/v1alpha1/csistoragecapacity.go | 51 +- .../storage/v1alpha1/volumeattachment.go | 36 +- .../storage/v1alpha1/volumeattributesclass.go | 36 +- .../listers/storage/v1beta1/csidriver.go | 36 +- .../listers/storage/v1beta1/csinode.go | 36 +- .../storage/v1beta1/csistoragecapacity.go | 51 +- .../storage/v1beta1/expansion_generated.go | 4 + .../listers/storage/v1beta1/storageclass.go | 36 +- .../storage/v1beta1/volumeattachment.go | 36 +- .../storage/v1beta1/volumeattributesclass.go | 48 + .../v1alpha1/expansion_generated.go | 23 + .../v1alpha1/storageversionmigration.go | 48 + vendor/k8s.io/client-go/metadata/metadata.go | 43 +- .../k8s.io/client-go/openapi/groupversion.go | 12 + vendor/k8s.io/client-go/rest/client.go | 159 +- vendor/k8s.io/client-go/rest/config.go | 22 + vendor/k8s.io/client-go/rest/request.go | 314 +- vendor/k8s.io/client-go/rest/url_utils.go | 2 +- vendor/k8s.io/client-go/rest/watch/decoder.go | 4 +- vendor/k8s.io/client-go/rest/watch/encoder.go | 2 +- .../k8s.io/client-go/restmapper/shortcut.go | 2 +- vendor/k8s.io/client-go/testing/actions.go | 239 +- vendor/k8s.io/client-go/testing/fixture.go | 671 +- vendor/k8s.io/client-go/tools/cache/OWNERS | 3 +- .../client-go/tools/cache/controller.go | 142 +- .../client-go/tools/cache/delta_fifo.go | 53 +- vendor/k8s.io/client-go/tools/cache/index.go | 3 +- .../k8s.io/client-go/tools/cache/listers.go | 6 +- .../k8s.io/client-go/tools/cache/listwatch.go | 4 + .../k8s.io/client-go/tools/cache/reflector.go | 275 +- .../reflector_data_consistency_detector.go | 94 +- .../client-go/tools/cache/shared_informer.go | 10 +- .../tools/cache/thread_safe_store.go | 92 +- .../client-go/tools/clientcmd/api/doc.go | 2 +- .../client-go/tools/clientcmd/api/helpers.go | 5 +- .../tools/clientcmd/api/latest/latest.go | 2 +- .../client-go/tools/clientcmd/api/v1/doc.go | 2 +- .../tools/clientcmd/client_config.go | 123 +- .../client-go/tools/clientcmd/config.go | 3 +- .../client-go/tools/clientcmd/loader.go | 17 +- .../k8s.io/client-go/tools/clientcmd/merge.go | 121 + .../client-go/tools/leaderelection/OWNERS | 2 + .../tools/leaderelection/leaderelection.go | 137 +- .../tools/leaderelection/leasecandidate.go | 202 + .../client-go/tools/leaderelection/metrics.go | 30 +- .../leaderelection/resourcelock/interface.go | 91 +- .../leaderelection/resourcelock/leaselock.go | 15 +- vendor/k8s.io/client-go/tools/record/event.go | 23 +- .../client-go/tools/record/events_cache.go | 8 +- .../k8s.io/client-go/transport/cache_go118.go | 24 +- .../client-go/transport/cert_rotation.go | 7 +- .../client-go/transport/round_trippers.go | 13 +- vendor/k8s.io/client-go/util/apply/apply.go | 49 + .../data_consistency_detector.go | 146 + .../list_data_consistency_detector.go | 76 + .../watch_list_data_consistency_detector.go | 54 + .../client-go/util/flowcontrol/backoff.go | 31 +- .../client-go/util/watchlist/watch_list.go | 82 + .../util/workqueue/default_rate_limiters.go | 139 +- .../util/workqueue/delaying_queue.go | 105 +- .../client-go/util/workqueue/metrics.go | 67 +- .../k8s.io/client-go/util/workqueue/queue.go | 168 +- .../util/workqueue/rate_limiting_queue.go | 64 +- vendor/k8s.io/code-generator/OWNERS | 1 + .../generators/applyconfiguration.go | 102 +- .../generators/internal.go | 21 +- .../generators/openapi.go | 18 +- .../generators/targets.go | 23 +- .../generators/types.go | 25 +- .../applyconfiguration-gen/generators/util.go | 15 +- .../cmd/client-gen/args/args.go | 5 + .../client-gen/generators/client_generator.go | 15 +- .../generators/fake/fake_client_generator.go | 22 +- .../fake/generator_fake_for_clientset.go | 53 +- .../fake/generator_fake_for_group.go | 7 +- .../fake/generator_fake_for_type.go | 518 +- .../generators/generator_for_clientset.go | 6 +- .../generators/generator_for_group.go | 64 +- .../generators/generator_for_type.go | 657 +- .../cmd/client-gen/types/types.go | 16 +- .../cmd/conversion-gen/args/args.go | 21 +- .../conversion-gen/generators/conversion.go | 12 +- .../code-generator/cmd/conversion-gen/main.go | 2 +- .../cmd/deepcopy-gen/generators/deepcopy.go | 14 +- .../cmd/defaulter-gen/args/args.go | 13 +- .../cmd/defaulter-gen/generators/defaulter.go | 40 +- .../code-generator/cmd/defaulter-gen/main.go | 2 +- .../cmd/go-to-protobuf/protobuf/generator.go | 9 +- .../cmd/go-to-protobuf/protobuf/namer.go | 5 + .../cmd/informer-gen/generators/factory.go | 3 +- .../cmd/informer-gen/generators/generic.go | 4 +- .../cmd/informer-gen/generators/informer.go | 5 +- .../cmd/informer-gen/generators/targets.go | 12 +- .../cmd/informer-gen/generators/types.go | 2 + .../cmd/lister-gen/generators/lister.go | 92 +- .../cmd/register-gen/generators/targets.go | 2 +- vendor/k8s.io/code-generator/kube_codegen.sh | 117 +- vendor/k8s.io/code-generator/tools.go | 1 + vendor/k8s.io/component-base/config/OWNERS | 13 - vendor/k8s.io/component-base/config/types.go | 80 - .../config/v1alpha1/conversion.go | 53 - .../config/v1alpha1/defaults.go | 98 - .../config/v1alpha1/register.go | 31 - .../component-base/config/v1alpha1/types.go | 82 - .../v1alpha1/zz_generated.conversion.go | 133 - .../config/v1alpha1/zz_generated.deepcopy.go | 88 - .../config/zz_generated.deepcopy.go | 73 - vendor/k8s.io/gengo/v2/Makefile | 14 + vendor/k8s.io/gengo/v2/generator/execute.go | 9 +- .../gengo/v2/generator/import_tracker.go | 15 +- .../gengo/v2/generator/snippet_writer.go | 38 +- vendor/k8s.io/gengo/v2/namer/namer.go | 16 +- vendor/k8s.io/gengo/v2/parser/parse.go | 65 +- vendor/k8s.io/gengo/v2/parser/parse_122.go | 33 + .../k8s.io/gengo/v2/parser/parse_pre_122.go | 30 + vendor/k8s.io/gengo/v2/types/types.go | 20 +- vendor/k8s.io/klog/v2/klog.go | 76 +- .../kube-openapi/cmd/openapi-gen/args/args.go | 78 + .../cmd/openapi-gen/openapi-gen.go | 62 + .../kube-openapi/pkg/generators/README.md | 49 + .../kube-openapi/pkg/generators/api_linter.go | 219 + .../kube-openapi/pkg/generators/config.go | 84 + .../kube-openapi/pkg/generators/enum.go | 177 + .../kube-openapi/pkg/generators/extension.go | 203 + .../kube-openapi/pkg/generators/markers.go | 768 + .../kube-openapi/pkg/generators/openapi.go | 1137 + .../kube-openapi/pkg/generators/rules/OWNERS | 4 + .../kube-openapi/pkg/generators/rules/doc.go | 23 + .../pkg/generators/rules/idl_tag.go | 55 + .../pkg/generators/rules/names_match.go | 182 + .../generators/rules/omitempty_match_case.go | 64 + .../kube-openapi/pkg/generators/union.go | 208 + .../kube-openapi/pkg/util/proto/document.go | 2 +- .../kube-openapi/pkg/util/sets/empty.go | 27 + .../kube-openapi/pkg/util/sets/string.go | 207 + .../kubectl/pkg/util/podutils/podutils.go | 117 +- .../k8s.io/utils/clock/testing/fake_clock.go | 25 +- vendor/k8s.io/utils/integer/integer.go | 73 - .../forked/golang/golang-lru/lru.go | 133 + vendor/k8s.io/utils/lru/lru.go | 99 + vendor/k8s.io/utils/net/multi_listen.go | 195 + vendor/k8s.io/utils/trace/trace.go | 2 +- vendor/modules.txt | 431 +- .../sigs.k8s.io/controller-runtime/.gitignore | 5 +- .../controller-runtime/.golangci.yml | 27 +- .../controller-runtime/.gomodcheck.yaml | 17 + vendor/sigs.k8s.io/controller-runtime/FAQ.md | 4 +- .../sigs.k8s.io/controller-runtime/Makefile | 122 +- vendor/sigs.k8s.io/controller-runtime/OWNERS | 3 +- .../controller-runtime/OWNERS_ALIASES | 6 - .../sigs.k8s.io/controller-runtime/README.md | 21 + .../sigs.k8s.io/controller-runtime/RELEASE.md | 14 +- .../sigs.k8s.io/controller-runtime/alias.go | 12 +- .../pkg/builder/controller.go | 181 +- .../controller-runtime/pkg/builder/options.go | 10 +- .../controller-runtime/pkg/builder/webhook.go | 110 +- .../controller-runtime/pkg/cache/cache.go | 183 +- .../pkg/cache/delegating_by_gvk_cache.go | 15 +- .../pkg/cache/informer_cache.go | 11 + .../pkg/cache/internal/cache_reader.go | 57 +- .../pkg/cache/internal/informers.go | 92 +- .../pkg/cache/multi_namespace_cache.go | 49 +- .../pkg/certwatcher/certwatcher.go | 106 +- .../pkg/certwatcher/metrics/metrics.go | 1 + .../pkg/client/apiutil/apimachinery.go | 26 +- .../pkg/client/apiutil/errors.go | 54 + .../pkg/client/apiutil/restmapper.go | 187 +- .../controller-runtime/pkg/client/client.go | 59 +- .../pkg/client/fake/client.go | 564 +- .../controller-runtime/pkg/client/fake/doc.go | 2 +- .../pkg/client/fieldowner.go | 106 + .../pkg/client/fieldvalidation.go | 106 + .../pkg/client/interfaces.go | 15 +- .../controller-runtime/pkg/client/options.go | 104 +- .../controller-runtime/pkg/cluster/cluster.go | 18 +- .../controller-runtime/pkg/config/config.go | 112 - .../pkg/config/controller.go | 15 +- .../pkg/config/v1alpha1/register.go | 43 - .../pkg/config/v1alpha1/types.go | 179 - .../config/v1alpha1/zz_generated.deepcopy.go | 157 - .../pkg/controller/controller.go | 116 +- .../controllerutil/controllerutil.go | 129 +- .../v1alpha1/doc.go => controller/name.go} | 33 +- .../pkg/controller/priorityqueue/metrics.go | 146 + .../controller/priorityqueue/priorityqueue.go | 401 + .../controller-runtime/pkg/event/event.go | 51 +- .../controller-runtime/pkg/handler/enqueue.go | 43 +- .../pkg/handler/enqueue_mapped.go | 68 +- .../pkg/handler/enqueue_owner.go | 58 +- .../pkg/handler/eventhandler.go | 129 +- .../pkg/internal/controller/controller.go | 160 +- .../internal/controller/metrics/metrics.go | 8 + .../pkg/internal/field/selector/utils.go | 16 +- .../pkg/internal/metrics/workqueue.go | 131 + .../pkg/internal/source/event_handler.go | 38 +- .../pkg/internal/source/kind.go | 58 +- .../pkg/internal/syncs/syncs.go | 38 + .../pkg/leaderelection/leader_election.go | 25 +- .../pkg/manager/internal.go | 110 +- .../controller-runtime/pkg/manager/manager.go | 157 +- .../pkg/manager/runnable_group.go | 20 +- .../controller-runtime/pkg/manager/server.go | 74 +- .../pkg/metrics/leaderelection.go | 23 +- .../pkg/metrics/server/server.go | 34 +- .../pkg/metrics/workqueue.go | 101 - .../pkg/predicate/predicate.go | 196 +- .../pkg/ratelimiter/ratelimiter.go | 30 - .../pkg/reconcile/reconcile.go | 54 +- .../controller-runtime/pkg/source/source.go | 186 +- .../pkg/webhook/admission/decode.go | 25 +- .../pkg/webhook/admission/defaulter.go | 82 - .../pkg/webhook/admission/defaulter_custom.go | 83 +- .../pkg/webhook/admission/http.go | 52 +- .../pkg/webhook/admission/metrics/metrics.go | 39 + .../pkg/webhook/admission/validator.go | 125 - .../pkg/webhook/admission/validator_custom.go | 6 +- .../pkg/webhook/admission/webhook.go | 33 +- .../controller-runtime/pkg/webhook/alias.go | 6 - .../controller-runtime/pkg/webhook/server.go | 4 +- .../controller-tools/pkg/crd/flatten.go | 2 + .../controller-tools/pkg/crd/gen.go | 59 +- .../controller-tools/pkg/crd/known_types.go | 20 +- .../controller-tools/pkg/crd/markers/crd.go | 39 +- .../controller-tools/pkg/crd/markers/doc.go | 17 +- .../pkg/crd/markers/package.go | 2 +- .../pkg/crd/markers/priority.go | 37 + .../pkg/crd/markers/register.go | 24 +- .../pkg/crd/markers/topology.go | 4 +- .../pkg/crd/markers/validation.go | 128 +- .../crd/markers/zz_generated.markerhelp.go | 149 +- .../controller-tools/pkg/crd/schema.go | 134 +- .../pkg/crd/zz_generated.markerhelp.go | 19 +- .../controller-tools/pkg/deepcopy/gen.go | 2 - .../controller-tools/pkg/deepcopy/traverse.go | 14 +- .../pkg/deepcopy/zz_generated.markerhelp.go | 5 +- .../controller-tools/pkg/genall/genall.go | 10 +- .../controller-tools/pkg/genall/options.go | 4 +- .../controller-tools/pkg/genall/output.go | 5 +- .../pkg/genall/zz_generated.markerhelp.go | 13 +- .../controller-tools/pkg/loader/loader.go | 14 +- .../controller-tools/pkg/loader/refs.go | 3 +- .../controller-tools/pkg/markers/collect.go | 10 +- .../controller-tools/pkg/markers/parse.go | 37 +- .../controller-tools/pkg/markers/zip.go | 57 +- .../controller-tools/pkg/rbac/parser.go | 146 +- .../pkg/rbac/zz_generated.markerhelp.go | 9 +- .../controller-tools/pkg/schemapatcher/gen.go | 6 +- .../schemapatcher/zz_generated.markerhelp.go | 9 +- .../controller-tools/pkg/version/version.go | 10 + .../controller-tools/pkg/webhook/parser.go | 223 +- .../pkg/webhook/zz_generated.markerhelp.go | 69 +- vendor/sigs.k8s.io/json/Makefile | 2 +- vendor/sigs.k8s.io/json/OWNERS | 2 +- .../internal/golang/encoding/json/decode.go | 140 +- .../internal/golang/encoding/json/encode.go | 490 +- .../internal/golang/encoding/json/fold.go | 150 +- .../internal/golang/encoding/json/indent.go | 119 +- .../internal/golang/encoding/json/scanner.go | 4 +- .../internal/golang/encoding/json/stream.go | 41 +- .../structured-merge-diff/v4/fieldpath/set.go | 277 + .../structured-merge-diff/v4/merge/update.go | 34 +- .../structured-merge-diff/v4/typed/compare.go | 10 + .../structured-merge-diff/v4/typed/parser.go | 2 +- .../v4/value/reflectcache.go | 63 +- .../structured-merge-diff/v4/value/value.go | 2 +- vendor/sigs.k8s.io/yaml/LICENSE | 256 + vendor/sigs.k8s.io/yaml/OWNERS | 8 +- vendor/sigs.k8s.io/yaml/fields.go | 55 +- .../yaml/goyaml.v2}/LICENSE | 0 .../yaml/goyaml.v2/LICENSE.libyaml} | 18 +- .../yaml/goyaml.v2/NOTICE} | 8 +- vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS | 24 + vendor/sigs.k8s.io/yaml/goyaml.v2/README.md | 143 + vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go | 744 + vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go | 815 + vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go | 1685 ++ vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go | 390 + vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go | 1095 + vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go | 412 + vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go | 258 + vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go | 2711 ++ vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go | 113 + vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go | 26 + vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go | 478 + vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go | 739 + .../yaml/goyaml.v2/yamlprivateh.go | 173 + vendor/sigs.k8s.io/yaml/yaml.go | 145 +- vendor/sigs.k8s.io/yaml/yaml_go110.go | 17 + 3888 files changed, 345556 insertions(+), 128452 deletions(-) create mode 100644 vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_parameters.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_responses.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_parameters.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_responses.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_parameters.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_responses.go delete mode 100644 vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_parameters.go delete mode 100644 vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_responses.go delete mode 100644 vendor/github.com/cilium/cilium/api/v1/client/metrics/metrics_client.go delete mode 100644 vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_parameters.go delete mode 100644 vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_responses.go delete mode 100644 vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/attach_mode.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/bgp_family.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/bgp_nlri.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/bgp_path.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/bgp_path_attribute.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/bgp_route.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_prefix_match.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_statement.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/endpoint_batch_delete_request.go delete mode 100644 vendor/github.com/cilium/cilium/api/v1/models/host_routing.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/i_psec_status.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/label.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/label_array.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/routing.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/srv6.go create mode 100644 vendor/github.com/cilium/cilium/api/v1/models/state_d_b_query.go create mode 100644 vendor/github.com/cilium/cilium/pkg/cidr/cidr_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/cidr/cidr_unspecified.go create mode 100644 vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go create mode 100644 vendor/github.com/cilium/cilium/pkg/command/exec/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/command/exec/exec.go delete mode 100644 vendor/github.com/cilium/cilium/pkg/components/components.go create mode 100644 vendor/github.com/cilium/cilium/pkg/container/cache/cache.go create mode 100644 vendor/github.com/cilium/cilium/pkg/container/cache/caches.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_unspecified.go create mode 100644 vendor/github.com/cilium/cilium/pkg/health/client/modules.go create mode 100644 vendor/github.com/cilium/cilium/pkg/health/client/tree.go delete mode 100644 vendor/github.com/cilium/cilium/pkg/hive/cell/config.go delete mode 100644 vendor/github.com/cilium/cilium/pkg/hive/cell/health.go delete mode 100644 vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go delete mode 100644 vendor/github.com/cilium/cilium/pkg/hive/cell/metric.go delete mode 100644 vendor/github.com/cilium/cilium/pkg/hive/cell/module.go create mode 100644 vendor/github.com/cilium/cilium/pkg/hive/health/types/types.go delete mode 100644 vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnc_types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_advert_types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_override_types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnodeconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpadvertisement.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnodeconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpadvertisement.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpclusterconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeerconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnodeconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpadvertisement.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpclusterconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeerconfig.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types_cilium.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.conversion.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.defaults.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go create mode 100644 vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go create mode 100644 vendor/github.com/cilium/cilium/pkg/labels/cidr.go delete mode 100644 vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go delete mode 100644 vendor/github.com/cilium/cilium/pkg/labels/cidr/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/lock/map.go create mode 100644 vendor/github.com/cilium/cilium/pkg/lock/sortable_mutex.go create mode 100644 vendor/github.com/cilium/cilium/pkg/logging/slog.go create mode 100644 vendor/github.com/cilium/cilium/pkg/metrics/cmd.go create mode 100644 vendor/github.com/cilium/cilium/pkg/metrics/dump.html.tmpl create mode 100644 vendor/github.com/cilium/cilium/pkg/metrics/histogram.go create mode 100644 vendor/github.com/cilium/cilium/pkg/metrics/json.go create mode 100644 vendor/github.com/cilium/cilium/pkg/metrics/metric/collections/product.go create mode 100644 vendor/github.com/cilium/cilium/pkg/metrics/metrics_windows.go create mode 100644 vendor/github.com/cilium/cilium/pkg/metrics/plot.go create mode 100644 vendor/github.com/cilium/cilium/pkg/metrics/sampler.go create mode 100644 vendor/github.com/cilium/cilium/pkg/netns/doc.go create mode 100644 vendor/github.com/cilium/cilium/pkg/netns/netns_linux.go create mode 100644 vendor/github.com/cilium/cilium/pkg/netns/netns_other.go create mode 100644 vendor/github.com/cilium/cilium/pkg/option/.gitignore create mode 100644 vendor/github.com/cilium/cilium/pkg/option/features.go create mode 100644 vendor/github.com/cilium/cilium/pkg/resiliency/error.go create mode 100644 vendor/github.com/cilium/cilium/pkg/resiliency/errorset.go create mode 100644 vendor/github.com/cilium/cilium/pkg/resiliency/helpers.go create mode 100644 vendor/github.com/cilium/cilium/pkg/resiliency/retry.go create mode 100644 vendor/github.com/cilium/cilium/pkg/source/source.go create mode 100644 vendor/github.com/cilium/cilium/pkg/time/time.go create mode 100644 vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go create mode 100644 vendor/github.com/cilium/ebpf/.clang-format create mode 100644 vendor/github.com/cilium/ebpf/.gitattributes create mode 100644 vendor/github.com/cilium/ebpf/.gitignore create mode 100644 vendor/github.com/cilium/ebpf/.golangci.yaml create mode 100644 vendor/github.com/cilium/ebpf/.vimto.toml create mode 100644 vendor/github.com/cilium/ebpf/CODEOWNERS create mode 100644 vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/cilium/ebpf/CONTRIBUTING.md create mode 100644 vendor/github.com/cilium/ebpf/LICENSE create mode 100644 vendor/github.com/cilium/ebpf/MAINTAINERS.md create mode 100644 vendor/github.com/cilium/ebpf/Makefile create mode 100644 vendor/github.com/cilium/ebpf/README.md create mode 100644 vendor/github.com/cilium/ebpf/asm/alu.go create mode 100644 vendor/github.com/cilium/ebpf/asm/alu_string.go create mode 100644 vendor/github.com/cilium/ebpf/asm/doc.go create mode 100644 vendor/github.com/cilium/ebpf/asm/func.go create mode 100644 vendor/github.com/cilium/ebpf/asm/func_string.go create mode 100644 vendor/github.com/cilium/ebpf/asm/instruction.go create mode 100644 vendor/github.com/cilium/ebpf/asm/jump.go create mode 100644 vendor/github.com/cilium/ebpf/asm/jump_string.go create mode 100644 vendor/github.com/cilium/ebpf/asm/load_store.go create mode 100644 vendor/github.com/cilium/ebpf/asm/load_store_string.go create mode 100644 vendor/github.com/cilium/ebpf/asm/metadata.go create mode 100644 vendor/github.com/cilium/ebpf/asm/opcode.go create mode 100644 vendor/github.com/cilium/ebpf/asm/opcode_string.go create mode 100644 vendor/github.com/cilium/ebpf/asm/register.go create mode 100644 vendor/github.com/cilium/ebpf/attachtype_string.go create mode 100644 vendor/github.com/cilium/ebpf/btf/btf.go create mode 100644 vendor/github.com/cilium/ebpf/btf/btf_types.go create mode 100644 vendor/github.com/cilium/ebpf/btf/btf_types_string.go create mode 100644 vendor/github.com/cilium/ebpf/btf/core.go create mode 100644 vendor/github.com/cilium/ebpf/btf/doc.go create mode 100644 vendor/github.com/cilium/ebpf/btf/ext_info.go create mode 100644 vendor/github.com/cilium/ebpf/btf/feature.go create mode 100644 vendor/github.com/cilium/ebpf/btf/format.go create mode 100644 vendor/github.com/cilium/ebpf/btf/handle.go create mode 100644 vendor/github.com/cilium/ebpf/btf/kernel.go create mode 100644 vendor/github.com/cilium/ebpf/btf/marshal.go create mode 100644 vendor/github.com/cilium/ebpf/btf/strings.go create mode 100644 vendor/github.com/cilium/ebpf/btf/traversal.go create mode 100644 vendor/github.com/cilium/ebpf/btf/types.go create mode 100644 vendor/github.com/cilium/ebpf/btf/workarounds.go create mode 100644 vendor/github.com/cilium/ebpf/collection.go create mode 100644 vendor/github.com/cilium/ebpf/cpu.go create mode 100644 vendor/github.com/cilium/ebpf/doc.go create mode 100644 vendor/github.com/cilium/ebpf/elf_reader.go create mode 100644 vendor/github.com/cilium/ebpf/elf_sections.go create mode 100644 vendor/github.com/cilium/ebpf/features/doc.go create mode 100644 vendor/github.com/cilium/ebpf/features/map.go create mode 100644 vendor/github.com/cilium/ebpf/features/misc.go create mode 100644 vendor/github.com/cilium/ebpf/features/prog.go create mode 100644 vendor/github.com/cilium/ebpf/features/version.go create mode 100644 vendor/github.com/cilium/ebpf/info.go create mode 100644 vendor/github.com/cilium/ebpf/internal/buffer.go create mode 100644 vendor/github.com/cilium/ebpf/internal/deque.go create mode 100644 vendor/github.com/cilium/ebpf/internal/elf.go create mode 100644 vendor/github.com/cilium/ebpf/internal/endian_be.go create mode 100644 vendor/github.com/cilium/ebpf/internal/endian_le.go create mode 100644 vendor/github.com/cilium/ebpf/internal/errors.go create mode 100644 vendor/github.com/cilium/ebpf/internal/feature.go create mode 100644 vendor/github.com/cilium/ebpf/internal/io.go create mode 100644 vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go create mode 100644 vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go create mode 100644 vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go create mode 100644 vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go create mode 100644 vendor/github.com/cilium/ebpf/internal/linux/auxv.go create mode 100644 vendor/github.com/cilium/ebpf/internal/linux/doc.go create mode 100644 vendor/github.com/cilium/ebpf/internal/linux/kconfig.go create mode 100644 vendor/github.com/cilium/ebpf/internal/linux/platform.go create mode 100644 vendor/github.com/cilium/ebpf/internal/linux/statfs.go create mode 100644 vendor/github.com/cilium/ebpf/internal/linux/vdso.go create mode 100644 vendor/github.com/cilium/ebpf/internal/linux/version.go create mode 100644 vendor/github.com/cilium/ebpf/internal/math.go create mode 100644 vendor/github.com/cilium/ebpf/internal/output.go create mode 100644 vendor/github.com/cilium/ebpf/internal/prog.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/doc.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/fd.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/pinning.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/ptr.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/signals.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/syscall.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sys/types.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sysenc/doc.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sysenc/layout.go create mode 100644 vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go create mode 100644 vendor/github.com/cilium/ebpf/internal/testutils/fdtrace/fd_trace.go create mode 100644 vendor/github.com/cilium/ebpf/internal/testutils/fdtrace/main.go create mode 100644 vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go create mode 100644 vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go create mode 100644 vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go create mode 100644 vendor/github.com/cilium/ebpf/internal/unix/doc.go create mode 100644 vendor/github.com/cilium/ebpf/internal/unix/types_linux.go create mode 100644 vendor/github.com/cilium/ebpf/internal/unix/types_other.go create mode 100644 vendor/github.com/cilium/ebpf/internal/version.go create mode 100644 vendor/github.com/cilium/ebpf/link/anchor.go create mode 100644 vendor/github.com/cilium/ebpf/link/cgroup.go create mode 100644 vendor/github.com/cilium/ebpf/link/doc.go create mode 100644 vendor/github.com/cilium/ebpf/link/iter.go create mode 100644 vendor/github.com/cilium/ebpf/link/kprobe.go create mode 100644 vendor/github.com/cilium/ebpf/link/kprobe_multi.go create mode 100644 vendor/github.com/cilium/ebpf/link/link.go create mode 100644 vendor/github.com/cilium/ebpf/link/netfilter.go create mode 100644 vendor/github.com/cilium/ebpf/link/netkit.go create mode 100644 vendor/github.com/cilium/ebpf/link/netns.go create mode 100644 vendor/github.com/cilium/ebpf/link/perf_event.go create mode 100644 vendor/github.com/cilium/ebpf/link/program.go create mode 100644 vendor/github.com/cilium/ebpf/link/query.go create mode 100644 vendor/github.com/cilium/ebpf/link/raw_tracepoint.go create mode 100644 vendor/github.com/cilium/ebpf/link/socket_filter.go create mode 100644 vendor/github.com/cilium/ebpf/link/syscalls.go create mode 100644 vendor/github.com/cilium/ebpf/link/tcx.go create mode 100644 vendor/github.com/cilium/ebpf/link/tracepoint.go create mode 100644 vendor/github.com/cilium/ebpf/link/tracing.go create mode 100644 vendor/github.com/cilium/ebpf/link/uprobe.go create mode 100644 vendor/github.com/cilium/ebpf/link/uprobe_multi.go create mode 100644 vendor/github.com/cilium/ebpf/link/xdp.go create mode 100644 vendor/github.com/cilium/ebpf/linker.go create mode 100644 vendor/github.com/cilium/ebpf/map.go create mode 100644 vendor/github.com/cilium/ebpf/marshalers.go create mode 100644 vendor/github.com/cilium/ebpf/memory.go create mode 100644 vendor/github.com/cilium/ebpf/netlify.toml create mode 100644 vendor/github.com/cilium/ebpf/prog.go create mode 100644 vendor/github.com/cilium/ebpf/syscalls.go create mode 100644 vendor/github.com/cilium/ebpf/types.go create mode 100644 vendor/github.com/cilium/ebpf/types_string.go create mode 100644 vendor/github.com/cilium/ebpf/variable.go create mode 100644 vendor/github.com/cilium/hive/.gitignore create mode 100644 vendor/github.com/cilium/hive/CODEOWNERS rename vendor/{k8s.io/component-base => github.com/cilium/hive}/LICENSE (99%) create mode 100644 vendor/github.com/cilium/hive/Makefile create mode 100644 vendor/github.com/cilium/hive/README.md rename vendor/github.com/cilium/{cilium/pkg => }/hive/cell/cell.go (90%) create mode 100644 vendor/github.com/cilium/hive/cell/config.go rename vendor/github.com/cilium/{cilium/pkg => }/hive/cell/decorator.go (56%) rename vendor/github.com/cilium/{cilium/pkg => }/hive/cell/group.go (80%) create mode 100644 vendor/github.com/cilium/hive/cell/health.go rename vendor/github.com/cilium/{cilium/pkg => }/hive/cell/info.go (100%) create mode 100644 vendor/github.com/cilium/hive/cell/invoke.go rename vendor/github.com/cilium/{cilium/pkg/hive => hive/cell}/lifecycle.go (62%) create mode 100644 vendor/github.com/cilium/hive/cell/module.go rename vendor/github.com/cilium/{cilium/pkg => }/hive/cell/provide.go (67%) create mode 100644 vendor/github.com/cilium/hive/cell/simple_health.go rename vendor/github.com/cilium/{cilium/pkg => }/hive/command.go (80%) rename vendor/github.com/cilium/{cilium/pkg => }/hive/doc.go (100%) rename vendor/github.com/cilium/{cilium/pkg => }/hive/hive.go (52%) create mode 100644 vendor/github.com/cilium/hive/internal/map_string.go create mode 100644 vendor/github.com/cilium/hive/internal/reflect.go create mode 100644 vendor/github.com/cilium/hive/job/job.go create mode 100644 vendor/github.com/cilium/hive/job/metrics.go create mode 100644 vendor/github.com/cilium/hive/job/observer.go create mode 100644 vendor/github.com/cilium/hive/job/oneshot.go create mode 100644 vendor/github.com/cilium/hive/job/timer.go create mode 100644 vendor/github.com/cilium/hive/script.go create mode 100644 vendor/github.com/cilium/hive/script/LICENSE create mode 100644 vendor/github.com/cilium/hive/script/README.md create mode 100644 vendor/github.com/cilium/hive/script/README.md.original create mode 100644 vendor/github.com/cilium/hive/script/cmds.go create mode 100644 vendor/github.com/cilium/hive/script/cmds_other.go create mode 100644 vendor/github.com/cilium/hive/script/cmds_posix.go create mode 100644 vendor/github.com/cilium/hive/script/conds.go create mode 100644 vendor/github.com/cilium/hive/script/engine.go create mode 100644 vendor/github.com/cilium/hive/script/errors.go create mode 100644 vendor/github.com/cilium/hive/script/internal/diff/diff.go create mode 100644 vendor/github.com/cilium/hive/script/makeraw_unix.go create mode 100644 vendor/github.com/cilium/hive/script/makeraw_unix_bsd.go create mode 100644 vendor/github.com/cilium/hive/script/makeraw_unix_other.go create mode 100644 vendor/github.com/cilium/hive/script/makeraw_unsupported.go create mode 100644 vendor/github.com/cilium/hive/script/state.go rename vendor/github.com/cilium/{cilium/pkg => }/hive/shutdowner.go (100%) rename vendor/{google.golang.org/appengine => github.com/cilium/statedb}/LICENSE (99%) create mode 100644 vendor/github.com/cilium/statedb/index/bool.go create mode 100644 vendor/github.com/cilium/statedb/index/int.go create mode 100644 vendor/github.com/cilium/statedb/index/keyset.go create mode 100644 vendor/github.com/cilium/statedb/index/map.go create mode 100644 vendor/github.com/cilium/statedb/index/netip.go create mode 100644 vendor/github.com/cilium/statedb/index/seq.go create mode 100644 vendor/github.com/cilium/statedb/index/set.go create mode 100644 vendor/github.com/cilium/statedb/index/string.go create mode 100644 vendor/github.com/cilium/statedb/part/cache.go create mode 100644 vendor/github.com/cilium/statedb/part/iterator.go create mode 100644 vendor/github.com/cilium/statedb/part/map.go create mode 100644 vendor/github.com/cilium/statedb/part/node.go create mode 100644 vendor/github.com/cilium/statedb/part/ops.go create mode 100644 vendor/github.com/cilium/statedb/part/registry.go create mode 100644 vendor/github.com/cilium/statedb/part/set.go create mode 100644 vendor/github.com/cilium/statedb/part/tree.go create mode 100644 vendor/github.com/cilium/statedb/part/txn.go create mode 100644 vendor/github.com/cilium/stream/CODEOWNERS create mode 100644 vendor/github.com/cilium/stream/LICENSE create mode 100644 vendor/github.com/cilium/stream/Makefile create mode 100644 vendor/github.com/cilium/stream/README.md create mode 100644 vendor/github.com/cilium/stream/observable.go create mode 100644 vendor/github.com/cilium/stream/operators.go create mode 100644 vendor/github.com/cilium/stream/sinks.go create mode 100644 vendor/github.com/cilium/stream/sources.go rename vendor/github.com/{matttproud/golang_protobuf_extensions/pbutil/doc.go => containernetworking/cni/pkg/ns/ns_darwin.go} (76%) create mode 100644 vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go rename vendor/github.com/{petermattis/goid/goid_go1.5_amd64.go => containernetworking/cni/pkg/ns/ns_windows.go} (57%) delete mode 100644 vendor/github.com/emicklei/go-restful/v3/json.go delete mode 100644 vendor/github.com/emicklei/go-restful/v3/jsoniter.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/fold.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/fuzz.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/indent.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/scanner.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/tables.go create mode 100644 vendor/github.com/evanphx/json-patch/v5/internal/json/tags.go create mode 100644 vendor/github.com/fatih/color/color_windows.go delete mode 100644 vendor/github.com/felixge/httpsnoop/.travis.yml delete mode 100644 vendor/github.com/fsnotify/fsnotify/.gitattributes create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/freebsd.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/internal.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/unix2.go create mode 100644 vendor/github.com/fsnotify/fsnotify/internal/windows.go delete mode 100644 vendor/github.com/fsnotify/fsnotify/mkdoc.zsh rename vendor/github.com/{lufia/plan9stats => fxamacker/cbor/v2}/.gitignore (100%) create mode 100644 vendor/github.com/fxamacker/cbor/v2/.golangci.yml create mode 100644 vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md rename vendor/github.com/{spf13/jwalterweatherman => fxamacker/cbor/v2}/LICENSE (94%) create mode 100644 vendor/github.com/fxamacker/cbor/v2/README.md create mode 100644 vendor/github.com/fxamacker/cbor/v2/SECURITY.md create mode 100644 vendor/github.com/fxamacker/cbor/v2/bytestring.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/cache.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/common.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/decode.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/diagnose.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/doc.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/encode.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/encode_map.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/simplevalue.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/stream.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/structfields.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/tag.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/valid.go delete mode 100644 vendor/github.com/go-ole/go-ole/.travis.yml delete mode 100644 vendor/github.com/go-ole/go-ole/ChangeLog.md delete mode 100644 vendor/github.com/go-ole/go-ole/README.md delete mode 100644 vendor/github.com/go-ole/go-ole/appveyor.yml delete mode 100644 vendor/github.com/go-ole/go-ole/com.go delete mode 100644 vendor/github.com/go-ole/go-ole/com_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/connect.go delete mode 100644 vendor/github.com/go-ole/go-ole/constants.go delete mode 100644 vendor/github.com/go-ole/go-ole/error.go delete mode 100644 vendor/github.com/go-ole/go-ole/error_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/error_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/guid.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/idispatch.go delete mode 100644 vendor/github.com/go-ole/go-ole/idispatch_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/idispatch_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant.go delete mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iinspectable.go delete mode 100644 vendor/github.com/go-ole/go-ole/iinspectable_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iinspectable_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo.go delete mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo.go delete mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iunknown.go delete mode 100644 vendor/github.com/go-ole/go-ole/iunknown_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iunknown_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/ole.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/go-get.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/oleutil.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearray.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearray_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearray_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearrayconversion.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearrayslices.go delete mode 100644 vendor/github.com/go-ole/go-ole/utility.go delete mode 100644 vendor/github.com/go-ole/go-ole/variables.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_386.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_amd64.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_arm.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_arm64.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_date_386.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_date_amd64.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_date_arm.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_date_arm64.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_ppc64le.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_s390x.go delete mode 100644 vendor/github.com/go-ole/go-ole/vt_string.go delete mode 100644 vendor/github.com/go-ole/go-ole/winrt.go delete mode 100644 vendor/github.com/go-ole/go-ole/winrt_doc.go delete mode 100644 vendor/github.com/go-openapi/analysis/appveyor.yml create mode 100644 vendor/github.com/go-openapi/runtime/csv_options.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/go18.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/pre_go18.go delete mode 100644 vendor/github.com/go-openapi/runtime/middleware/ui_defaults.go create mode 100644 vendor/github.com/go-openapi/runtime/middleware/ui_options.go create mode 100644 vendor/github.com/go-openapi/validate/BENCHMARK.md create mode 100644 vendor/github.com/go-openapi/validate/pools.go create mode 100644 vendor/github.com/go-openapi/validate/pools_debug.go rename vendor/github.com/{tklauser/numcpus => google/btree}/LICENSE (100%) create mode 100644 vendor/github.com/google/btree/README.md create mode 100644 vendor/github.com/google/btree/btree.go create mode 100644 vendor/github.com/google/btree/btree_generic.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go create mode 100644 vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go create mode 100644 vendor/github.com/gopacket/gopacket/.gitignore create mode 100644 vendor/github.com/gopacket/gopacket/AUTHORS create mode 100644 vendor/github.com/gopacket/gopacket/CONTRIBUTING.md create mode 100644 vendor/github.com/gopacket/gopacket/LICENSE create mode 100644 vendor/github.com/gopacket/gopacket/README.md create mode 100644 vendor/github.com/gopacket/gopacket/SECURITY.md create mode 100644 vendor/github.com/gopacket/gopacket/base.go create mode 100644 vendor/github.com/gopacket/gopacket/checksum.go create mode 100644 vendor/github.com/gopacket/gopacket/decode.go create mode 100644 vendor/github.com/gopacket/gopacket/doc.go create mode 100644 vendor/github.com/gopacket/gopacket/flows.go create mode 100644 vendor/github.com/gopacket/gopacket/gc create mode 100644 vendor/github.com/gopacket/gopacket/layerclass.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/.lint_blacklist create mode 100644 vendor/github.com/gopacket/gopacket/layers/arp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/asf.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/asf_presencepong.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/base.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/bfd.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/bitfield.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/cdp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/ctp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/dhcpv4.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/dhcpv6.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/dhcpv6_options.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/dns.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/doc.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/dot11.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/dot1q.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/eap.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/eapol.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/endpoints.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/enums.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/enums_generated.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/erspan2.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/etherip.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/ethernet.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/fddi.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/fuzz_layer.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/gen_linted.sh create mode 100644 vendor/github.com/gopacket/gopacket/layers/geneve.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/gre.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/gtp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/iana_ports.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/icmp4.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/icmp6.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/icmp6msg.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/igmp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/ip4.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/ip6.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/ipsec.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/layertypes.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/lcm.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/linux_sll.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/linux_sll2.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/llc.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/lldp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/loopback.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/mdp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/mldv1.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/mldv2.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/modbustcp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/mpls.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/multipathtcp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/ndp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/ntp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/ospf.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/pflog.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/ports.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/ppp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/pppoe.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/prism.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/radiotap.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/radius.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/rmcp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/rudp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/sctp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/sflow.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/sip.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/stp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/tcp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/tcpip.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/test_creator.py create mode 100644 vendor/github.com/gopacket/gopacket/layers/tls.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/tls_alert.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/tls_appdata.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/tls_cipherspec.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/tls_handshake.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/udp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/udplite.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/usb.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/vrrp.go create mode 100644 vendor/github.com/gopacket/gopacket/layers/vxlan.go create mode 100644 vendor/github.com/gopacket/gopacket/layers_decoder.go create mode 100644 vendor/github.com/gopacket/gopacket/layertype.go create mode 100644 vendor/github.com/gopacket/gopacket/packet.go create mode 100644 vendor/github.com/gopacket/gopacket/parser.go create mode 100644 vendor/github.com/gopacket/gopacket/runtests.sh create mode 100644 vendor/github.com/gopacket/gopacket/time.go create mode 100644 vendor/github.com/gopacket/gopacket/writer.go create mode 100644 vendor/github.com/klauspost/compress/.gitattributes rename vendor/github.com/{spf13/jwalterweatherman => klauspost/compress}/.gitignore (70%) create mode 100644 vendor/github.com/klauspost/compress/.goreleaser.yml create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md create mode 100644 vendor/github.com/klauspost/compress/compressible.go create mode 100644 vendor/github.com/klauspost/compress/fse/README.md create mode 100644 vendor/github.com/klauspost/compress/fse/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/fse/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/fse/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/fse/compress.go create mode 100644 vendor/github.com/klauspost/compress/fse/decompress.go create mode 100644 vendor/github.com/klauspost/compress/fse/fse.go create mode 100644 vendor/github.com/klauspost/compress/gen.sh create mode 100644 vendor/github.com/klauspost/compress/huff0/.gitignore create mode 100644 vendor/github.com/klauspost/compress/huff0/README.md create mode 100644 vendor/github.com/klauspost/compress/huff0/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/huff0/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/huff0/compress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.go create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s create mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_generic.go create mode 100644 vendor/github.com/klauspost/compress/huff0/huff0.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go create mode 100644 vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/LICENSE create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/decode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/encode_other.go create mode 100644 vendor/github.com/klauspost/compress/internal/snapref/snappy.go create mode 100644 vendor/github.com/klauspost/compress/s2sx.mod create mode 100644 vendor/github.com/klauspost/compress/s2sx.sum create mode 100644 vendor/github.com/klauspost/compress/zstd/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/bitreader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bitwriter.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blockenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/blocktype_string.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytebuf.go create mode 100644 vendor/github.com/klauspost/compress/zstd/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decodeheader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/decoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/dict.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_base.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_best.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_better.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_dfast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/enc_fast.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/encoder_options.go create mode 100644 vendor/github.com/klauspost/compress/zstd/framedec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/frameenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_encoder.go create mode 100644 vendor/github.com/klauspost/compress/zstd/fse_predefined.go create mode 100644 vendor/github.com/klauspost/compress/zstd/hash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/history.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/seqdec_generic.go create mode 100644 vendor/github.com/klauspost/compress/zstd/seqenc.go create mode 100644 vendor/github.com/klauspost/compress/zstd/snappy.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zip.go create mode 100644 vendor/github.com/klauspost/compress/zstd/zstd.go delete mode 100644 vendor/github.com/lufia/plan9stats/LICENSE delete mode 100644 vendor/github.com/lufia/plan9stats/README.md delete mode 100644 vendor/github.com/lufia/plan9stats/cpu.go delete mode 100644 vendor/github.com/lufia/plan9stats/doc.go delete mode 100644 vendor/github.com/lufia/plan9stats/host.go delete mode 100644 vendor/github.com/lufia/plan9stats/int.go delete mode 100644 vendor/github.com/lufia/plan9stats/opts.go delete mode 100644 vendor/github.com/lufia/plan9stats/stats.go create mode 100644 vendor/github.com/mackerelio/go-osstat/LICENSE.txt create mode 100644 vendor/github.com/mackerelio/go-osstat/memory/memory_darwin.go create mode 100644 vendor/github.com/mackerelio/go-osstat/memory/memory_freebsd.go create mode 100644 vendor/github.com/mackerelio/go-osstat/memory/memory_linux.go create mode 100644 vendor/github.com/mackerelio/go-osstat/memory/memory_other.go create mode 100644 vendor/github.com/mackerelio/go-osstat/memory/memory_windows.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/mitchellh/go-wordwrap/LICENSE.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/README.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/wordwrap.go create mode 100644 vendor/github.com/pelletier/go-toml/v2/unstable/unmarshaler.go rename vendor/github.com/petermattis/goid/{goid_go1.5_arm.go => goid_go1.5.go} (81%) rename vendor/github.com/petermattis/goid/{goid_go1.5_arm64.s => goid_go1.5.s} (68%) delete mode 100644 vendor/github.com/petermattis/goid/goid_go1.5_amd64.s delete mode 100644 vendor/github.com/petermattis/goid/goid_go1.5_arm.s create mode 100644 vendor/github.com/petermattis/goid/runtime_go1.23.go delete mode 100644 vendor/github.com/pmezard/go-difflib/LICENSE delete mode 100644 vendor/github.com/pmezard/go-difflib/difflib/difflib.go delete mode 100644 vendor/github.com/power-devops/perfstat/c_helpers.c delete mode 100644 vendor/github.com/power-devops/perfstat/c_helpers.h delete mode 100644 vendor/github.com/power-devops/perfstat/config.go delete mode 100644 vendor/github.com/power-devops/perfstat/cpustat.go delete mode 100644 vendor/github.com/power-devops/perfstat/diskstat.go delete mode 100644 vendor/github.com/power-devops/perfstat/doc.go delete mode 100644 vendor/github.com/power-devops/perfstat/fsstat.go delete mode 100644 vendor/github.com/power-devops/perfstat/helpers.go delete mode 100644 vendor/github.com/power-devops/perfstat/lparstat.go delete mode 100644 vendor/github.com/power-devops/perfstat/lvmstat.go delete mode 100644 vendor/github.com/power-devops/perfstat/memstat.go delete mode 100644 vendor/github.com/power-devops/perfstat/netstat.go delete mode 100644 vendor/github.com/power-devops/perfstat/procstat.go delete mode 100644 vendor/github.com/power-devops/perfstat/sysconf.go delete mode 100644 vendor/github.com/power-devops/perfstat/systemcfg.go delete mode 100644 vendor/github.com/power-devops/perfstat/types_cpu.go delete mode 100644 vendor/github.com/power-devops/perfstat/types_disk.go delete mode 100644 vendor/github.com/power-devops/perfstat/types_fs.go delete mode 100644 vendor/github.com/power-devops/perfstat/types_lpar.go delete mode 100644 vendor/github.com/power-devops/perfstat/types_lvm.go delete mode 100644 vendor/github.com/power-devops/perfstat/types_memory.go delete mode 100644 vendor/github.com/power-devops/perfstat/types_network.go delete mode 100644 vendor/github.com/power-devops/perfstat/types_process.go delete mode 100644 vendor/github.com/power-devops/perfstat/uptime.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt delete mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/model/labelset_string.go create mode 100644 vendor/github.com/prometheus/common/model/metadata.go create mode 100644 vendor/github.com/prometheus/procfs/net_tls_stat.go create mode 100644 vendor/github.com/sagikazarmark/locafero/.editorconfig create mode 100644 vendor/github.com/sagikazarmark/locafero/.envrc create mode 100644 vendor/github.com/sagikazarmark/locafero/.gitignore create mode 100644 vendor/github.com/sagikazarmark/locafero/.golangci.yaml create mode 100644 vendor/github.com/sagikazarmark/locafero/LICENSE create mode 100644 vendor/github.com/sagikazarmark/locafero/README.md create mode 100644 vendor/github.com/sagikazarmark/locafero/file_type.go create mode 100644 vendor/github.com/sagikazarmark/locafero/finder.go create mode 100644 vendor/github.com/sagikazarmark/locafero/flake.lock create mode 100644 vendor/github.com/sagikazarmark/locafero/flake.nix create mode 100644 vendor/github.com/sagikazarmark/locafero/helpers.go create mode 100644 vendor/github.com/sagikazarmark/locafero/justfile rename vendor/github.com/{fsnotify/fsnotify => sagikazarmark/slog-shim}/.editorconfig (60%) create mode 100644 vendor/github.com/sagikazarmark/slog-shim/.envrc create mode 100644 vendor/github.com/sagikazarmark/slog-shim/.gitignore create mode 100644 vendor/github.com/sagikazarmark/slog-shim/LICENSE create mode 100644 vendor/github.com/sagikazarmark/slog-shim/README.md create mode 100644 vendor/github.com/sagikazarmark/slog-shim/attr.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/attr_120.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/flake.lock create mode 100644 vendor/github.com/sagikazarmark/slog-shim/flake.nix create mode 100644 vendor/github.com/sagikazarmark/slog-shim/handler.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/handler_120.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/json_handler.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/json_handler_120.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/level.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/level_120.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/logger.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/logger_120.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/record.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/record_120.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/text_handler.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/text_handler_120.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/value.go create mode 100644 vendor/github.com/sagikazarmark/slog-shim/value_120.go create mode 100644 vendor/github.com/sasha-s/go-deadlock/trylock.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/LICENSE delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/binary.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/common.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/common_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/common_freebsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/common_linux.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/common_openbsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/common_unix.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/common_windows.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/endian.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/internal/common/warnings.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_aix.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_aix_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_bsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_cgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_darwin_nocgo.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_fallback.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_freebsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_386.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_amd64.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_openbsd_arm64.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_plan9.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_solaris.go delete mode 100644 vendor/github.com/shirou/gopsutil/v3/mem/mem_windows.go create mode 100644 vendor/github.com/sourcegraph/conc/.golangci.yml rename vendor/github.com/{power-devops/perfstat => sourcegraph/conc}/LICENSE (96%) create mode 100644 vendor/github.com/sourcegraph/conc/README.md create mode 100644 vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go119.go create mode 100644 vendor/github.com/sourcegraph/conc/internal/multierror/multierror_go120.go create mode 100644 vendor/github.com/sourcegraph/conc/iter/iter.go create mode 100644 vendor/github.com/sourcegraph/conc/iter/map.go create mode 100644 vendor/github.com/sourcegraph/conc/panics/panics.go create mode 100644 vendor/github.com/sourcegraph/conc/panics/try.go create mode 100644 vendor/github.com/sourcegraph/conc/waitgroup.go delete mode 100644 vendor/github.com/spf13/jwalterweatherman/README.md delete mode 100644 vendor/github.com/spf13/jwalterweatherman/default_notepad.go delete mode 100644 vendor/github.com/spf13/jwalterweatherman/log_counter.go delete mode 100644 vendor/github.com/spf13/jwalterweatherman/notepad.go create mode 100644 vendor/github.com/spf13/pflag/ipnet_slice.go create mode 100644 vendor/github.com/spf13/viper/.envrc create mode 100644 vendor/github.com/spf13/viper/.yamlignore create mode 100644 vendor/github.com/spf13/viper/.yamllint.yaml delete mode 100644 vendor/github.com/spf13/viper/experimental_logger.go rename vendor/github.com/spf13/viper/{viper_go1_15.go => file.go} (94%) create mode 100644 vendor/github.com/spf13/viper/file_finder.go create mode 100644 vendor/github.com/spf13/viper/flake.lock create mode 100644 vendor/github.com/spf13/viper/flake.nix delete mode 100644 vendor/github.com/spf13/viper/fs.go create mode 100644 vendor/github.com/spf13/viper/internal/features/bind_struct.go create mode 100644 vendor/github.com/spf13/viper/internal/features/bind_struct_default.go delete mode 100644 vendor/github.com/spf13/viper/viper_go1_16.go delete mode 100644 vendor/github.com/spf13/viper/watch.go delete mode 100644 vendor/github.com/spf13/viper/watch_unsupported.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/.cirrus.yml delete mode 100644 vendor/github.com/tklauser/go-sysconf/.gitignore delete mode 100644 vendor/github.com/tklauser/go-sysconf/LICENSE delete mode 100644 vendor/github.com/tklauser/go-sysconf/README.md delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_dragonfly.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_freebsd.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_generic.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_linux.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_openbsd.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_posix.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_solaris.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go delete mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go delete mode 100644 vendor/github.com/tklauser/numcpus/.cirrus.yml delete mode 100644 vendor/github.com/tklauser/numcpus/README.md delete mode 100644 vendor/github.com/tklauser/numcpus/numcpus.go delete mode 100644 vendor/github.com/tklauser/numcpus/numcpus_bsd.go delete mode 100644 vendor/github.com/tklauser/numcpus/numcpus_linux.go delete mode 100644 vendor/github.com/tklauser/numcpus/numcpus_solaris.go delete mode 100644 vendor/github.com/tklauser/numcpus/numcpus_unsupported.go delete mode 100644 vendor/github.com/tklauser/numcpus/numcpus_windows.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/ip6tnl_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/nl/vdpa_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/rule_nonlinux.go create mode 100644 vendor/github.com/vishvananda/netlink/socket_xdp_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/unix_diag.go create mode 100644 vendor/github.com/vishvananda/netlink/vdpa_linux.go create mode 100644 vendor/github.com/vishvananda/netlink/virtio.go create mode 100644 vendor/github.com/vishvananda/netlink/xdp_diag.go create mode 100644 vendor/github.com/vishvananda/netlink/xdp_linux.go rename vendor/github.com/vishvananda/netlink/{xfrm.go => xfrm_linux.go} (95%) delete mode 100644 vendor/github.com/vishvananda/netlink/xfrm_policy.go delete mode 100644 vendor/github.com/vishvananda/netlink/xfrm_state.go create mode 100644 vendor/github.com/vishvananda/netlink/xfrm_unspecified.go create mode 100644 vendor/github.com/vishvananda/netns/.yamllint.yml create mode 100644 vendor/github.com/x448/float16/.travis.yml create mode 100644 vendor/github.com/x448/float16/LICENSE create mode 100644 vendor/github.com/x448/float16/README.md create mode 100644 vendor/github.com/x448/float16/float16.go delete mode 100644 vendor/github.com/yusufpapurcu/wmi/LICENSE delete mode 100644 vendor/github.com/yusufpapurcu/wmi/README.md delete mode 100644 vendor/github.com/yusufpapurcu/wmi/swbemservices.go delete mode 100644 vendor/github.com/yusufpapurcu/wmi/wmi.go delete mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules create mode 100644 vendor/go.opentelemetry.io/otel/renovate.json create mode 100644 vendor/go.opentelemetry.io/otel/sdk/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/doc.go (96%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/exception.go (98%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/metric.go (77%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/schema.go (85%) create mode 100644 vendor/go.opentelemetry.io/otel/trace/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracer.go delete mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh create mode 100644 vendor/go.opentelemetry.io/otel/verify_released_changelog.sh create mode 100644 vendor/go.uber.org/atomic/pointer_go118_pre119.go create mode 100644 vendor/go.uber.org/zap/.golangci.yml rename vendor/go.uber.org/zap/{LICENSE.txt => LICENSE} (100%) delete mode 100644 vendor/go.uber.org/zap/array_go118.go rename vendor/go.uber.org/zap/{stacktrace.go => internal/stacktrace/stack.go} (75%) create mode 100644 vendor/go.uber.org/zap/zapcore/lazy_with.go create mode 100644 vendor/go4.org/netipx/.gitignore create mode 100644 vendor/go4.org/netipx/.gitmodules create mode 100644 vendor/go4.org/netipx/AUTHORS create mode 100644 vendor/go4.org/netipx/LICENSE create mode 100644 vendor/go4.org/netipx/README.md create mode 100644 vendor/go4.org/netipx/ipset.go create mode 100644 vendor/go4.org/netipx/mask6.go create mode 100644 vendor/go4.org/netipx/netipx.go create mode 100644 vendor/go4.org/netipx/uint128.go delete mode 100644 vendor/golang.org/x/exp/maps/maps.go create mode 100644 vendor/golang.org/x/exp/slices/cmp.go rename vendor/golang.org/x/exp/slices/{zsortfunc.go => zsortanyfunc.go} (64%) create mode 100644 vendor/golang.org/x/exp/slog/attr.go create mode 100644 vendor/golang.org/x/exp/slog/doc.go create mode 100644 vendor/golang.org/x/exp/slog/handler.go create mode 100644 vendor/golang.org/x/exp/slog/internal/buffer/buffer.go create mode 100644 vendor/golang.org/x/exp/slog/internal/ignorepc.go create mode 100644 vendor/golang.org/x/exp/slog/json_handler.go create mode 100644 vendor/golang.org/x/exp/slog/level.go create mode 100644 vendor/golang.org/x/exp/slog/logger.go create mode 100644 vendor/golang.org/x/exp/slog/noplog.bench create mode 100644 vendor/golang.org/x/exp/slog/record.go create mode 100644 vendor/golang.org/x/exp/slog/text_handler.go create mode 100644 vendor/golang.org/x/exp/slog/value.go create mode 100644 vendor/golang.org/x/exp/slog/value_119.go create mode 100644 vendor/golang.org/x/exp/slog/value_120.go delete mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go create mode 100644 vendor/golang.org/x/text/cases/cases.go create mode 100644 vendor/golang.org/x/text/cases/context.go create mode 100644 vendor/golang.org/x/text/cases/fold.go create mode 100644 vendor/golang.org/x/text/cases/icu.go create mode 100644 vendor/golang.org/x/text/cases/info.go create mode 100644 vendor/golang.org/x/text/cases/map.go create mode 100644 vendor/golang.org/x/text/cases/tables10.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables13.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables15.0.0.go create mode 100644 vendor/golang.org/x/text/cases/tables9.0.0.go create mode 100644 vendor/golang.org/x/text/cases/trieval.go create mode 100644 vendor/golang.org/x/text/internal/internal.go create mode 100644 vendor/golang.org/x/text/internal/match.go create mode 100644 vendor/golang.org/x/tools/txtar/archive.go create mode 100644 vendor/golang.org/x/tools/txtar/fs.go delete mode 100644 vendor/google.golang.org/appengine/internal/api.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_id.go delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto delete mode 100644 vendor/google.golang.org/appengine/internal/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/internal.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/main.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/metadata.go delete mode 100644 vendor/google.golang.org/appengine/internal/net.go delete mode 100644 vendor/google.golang.org/appengine/internal/regen.sh delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto delete mode 100644 vendor/google.golang.org/appengine/internal/transaction.go delete mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto delete mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go rename vendor/{github.com/evanphx/json-patch => gopkg.in/evanphx/json-patch.v4}/.gitignore (100%) rename vendor/{github.com/evanphx/json-patch => gopkg.in/evanphx/json-patch.v4}/LICENSE (100%) rename vendor/{github.com/evanphx/json-patch => gopkg.in/evanphx/json-patch.v4}/README.md (100%) rename vendor/{github.com/evanphx/json-patch => gopkg.in/evanphx/json-patch.v4}/errors.go (100%) rename vendor/{github.com/evanphx/json-patch => gopkg.in/evanphx/json-patch.v4}/merge.go (100%) rename vendor/{github.com/evanphx/json-patch => gopkg.in/evanphx/json-patch.v4}/patch.go (95%) create mode 100644 vendor/k8s.io/api/admission/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/doc.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/generated.pb.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/generated.proto create mode 100644 vendor/k8s.io/api/apidiscovery/v2/register.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/types.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/apps/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/authorization/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/autoscaling/v2/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/batch/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/certificates/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha2/doc.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha2/generated.pb.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha2/generated.proto create mode 100644 vendor/k8s.io/api/coordination/v1alpha2/register.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha2/types.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/coordination/v1alpha2/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/core/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/discovery/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/events/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/flowcontrol/v1/zz_generated.prerelease-lifecycle.go rename vendor/k8s.io/{component-base/config => api/imagepolicy}/v1alpha1/doc.go (74%) create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/networking/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/well_known_labels.go create mode 100644 vendor/k8s.io/api/node/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/policy/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.prerelease-lifecycle.go delete mode 100644 vendor/k8s.io/api/resource/v1alpha2/generated.pb.go delete mode 100644 vendor/k8s.io/api/resource/v1alpha2/generated.proto delete mode 100644 vendor/k8s.io/api/resource/v1alpha2/types.go delete mode 100644 vendor/k8s.io/api/resource/v1alpha2/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/resource/v1alpha2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/resource/v1alpha3/doc.go create mode 100644 vendor/k8s.io/api/resource/v1alpha3/generated.pb.go create mode 100644 vendor/k8s.io/api/resource/v1alpha3/generated.proto rename vendor/k8s.io/api/resource/{v1alpha2 => v1alpha3}/register.go (89%) create mode 100644 vendor/k8s.io/api/resource/v1alpha3/types.go create mode 100644 vendor/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go rename vendor/k8s.io/api/resource/{v1alpha2 => v1beta1}/doc.go (81%) create mode 100644 vendor/k8s.io/api/resource/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/resource/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/resource/v1beta1/register.go create mode 100644 vendor/k8s.io/api/resource/v1beta1/types.go create mode 100644 vendor/k8s.io/api/resource/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/resource/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/resource/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/api/storage/v1/zz_generated.prerelease-lifecycle.go rename vendor/{sigs.k8s.io/controller-runtime/pkg/ratelimiter => k8s.io/api/storagemigration/v1alpha1}/doc.go (66%) create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storagemigration/v1alpha1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/testrestmapper/test_restmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/cbor.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/direct/direct.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/framer.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/buffers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/decode.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/diagnostic.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/encode.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/raw.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/duration/duration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/OWNERS create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/auditannotation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/expressionwarning.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/matchresources.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/namedrulewithoperations.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramkind.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/paramref.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/typechecking.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicybindingspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingadmissionpolicystatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/variable.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/applyconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/jsonpatch.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicybindingspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutatingadmissionpolicyspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1/mutation.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicy.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/batch/v1/successpolicyrule.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/coordination/v1alpha2/leasecandidatespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/apparmorprofile.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/claimsource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/containeruser.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/imagevolumesource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/linuxcontaineruser.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/nodefeatures.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandler.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/noderuntimehandlerfeatures.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcehealth.go rename vendor/k8s.io/client-go/applyconfigurations/{resource/v1alpha2/resourceclaimschedulingstatus.go => core/v1/resourcestatus.go} (51%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/core/v1/volumemountstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/doc.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereview.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewcontainerspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1/imagereviewstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ipaddressspec.go rename vendor/k8s.io/client-go/applyconfigurations/{resource/v1alpha2/resourceclassparametersreference.go => networking/v1beta1/parentreference.go} (51%) rename vendor/k8s.io/client-go/applyconfigurations/{resource/v1alpha2/podschedulingcontext.go => networking/v1beta1/servicecidr.go} (59%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/servicecidrstatus.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/allocationresult.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/podschedulingcontextstatus.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimparametersreference.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourceclaimspec.go delete mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha2/resourcehandle.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocateddevicestatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/basicdevice.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/celdeviceselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/device.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceallocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceattribute.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclaimconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclass.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceconstraint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/devicerequestallocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/networkdevicedata.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/opaquedeviceconfiguration.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/resourceclaim.go (84%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/resourceclaimconsumerreference.go (94%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/resourceclaimstatus.go (55%) rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1alpha3}/resourceclaimtemplate.go (84%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimtemplatespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourcepool.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2/resourceclass.go => v1alpha3/resourceslice.go} (57%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceslicespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocateddevicestatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/allocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/basicdevice.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/celdeviceselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/device.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceallocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceattribute.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicecapacity.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclaimconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclass.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceclassspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceconstraint.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequest.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/devicerequestallocationresult.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/deviceselector.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/networkdevicedata.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/opaquedeviceconfiguration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimconsumerreference.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceclaimtemplate.go rename vendor/k8s.io/client-go/applyconfigurations/resource/{v1alpha2 => v1beta1}/resourceclaimtemplatespec.go (83%) create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourcepool.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslice.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/resource/v1beta1/resourceslicespec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/groupversionresource.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/migrationcondition.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigration.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationspec.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/storagemigration/v1alpha1/storageversionmigrationstatus.go create mode 100644 vendor/k8s.io/client-go/applyconfigurations/utils.go create mode 100644 vendor/k8s.io/client-go/features/envvar.go create mode 100644 vendor/k8s.io/client-go/features/features.go create mode 100644 vendor/k8s.io/client-go/features/known_features.go create mode 100644 vendor/k8s.io/client-go/gentype/fake.go create mode 100644 vendor/k8s.io/client-go/gentype/type.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/informers/coordination/v1alpha2/interface.go create mode 100644 vendor/k8s.io/client-go/informers/coordination/v1alpha2/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1beta1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1beta1/servicecidr.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1alpha3/deviceclass.go rename vendor/k8s.io/client-go/informers/resource/{v1alpha2 => v1alpha3}/interface.go (74%) rename vendor/k8s.io/client-go/informers/resource/{v1alpha2 => v1alpha3}/resourceclaim.go (83%) rename vendor/k8s.io/client-go/informers/resource/{v1alpha2 => v1alpha3}/resourceclaimtemplate.go (83%) rename vendor/k8s.io/client-go/informers/resource/{v1alpha2/resourceclass.go => v1alpha3/resourceslice.go} (62%) create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta1/deviceclass.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta1/resourceclaim.go rename vendor/k8s.io/client-go/informers/resource/{v1alpha2/podschedulingcontext.go => v1beta1/resourceclaimtemplate.go} (50%) create mode 100644 vendor/k8s.io/client-go/informers/resource/v1beta1/resourceslice.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/informers/storagemigration/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storagemigration/v1alpha1/storageversionmigration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/fake/fake_validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_mutatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/fake/fake_mutatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/coordination_client.go rename vendor/k8s.io/client-go/kubernetes/typed/{resource => coordination}/v1alpha2/doc.go (100%) rename vendor/k8s.io/client-go/kubernetes/typed/{resource => coordination}/v1alpha2/fake/doc.go (100%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake/fake_coordination_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/fake/fake_leasecandidate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1alpha2/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_ipaddress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake/fake_servicecidr.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/servicecidr.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_podschedulingcontext.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaim.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclaimtemplate.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/fake/fake_resourceclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/podschedulingcontext.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaim.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclaimtemplate.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/deviceclass.go rename vendor/{sigs.k8s.io/controller-runtime/pkg/config => k8s.io/client-go/kubernetes/typed/resource/v1alpha3}/doc.go (76%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_deviceclass.go rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha2 => v1alpha3}/fake/fake_resource_client.go (54%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resourceslice.go rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha2 => v1alpha3}/generated_expansion.go (87%) rename vendor/k8s.io/client-go/kubernetes/typed/resource/{v1alpha2 => v1alpha3}/resource_client.go (61%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resourceslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/deviceclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_deviceclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resource_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/fake/fake_resourceslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resource_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/resource/v1beta1/resourceslice.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/fake/fake_volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storagemigration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/fake/fake_storageversionmigration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storagemigration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storagemigration/v1alpha1/storageversionmigration.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1/validatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/mutatingadmissionpolicybinding.go create mode 100644 vendor/k8s.io/client-go/listers/coordination/v1alpha2/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/coordination/v1alpha2/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/listers/doc.go create mode 100644 vendor/k8s.io/client-go/listers/generic_helpers.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1beta1/ipaddress.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1beta1/servicecidr.go delete mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/podschedulingcontext.go delete mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha2/resourceclass.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha3/deviceclass.go rename vendor/k8s.io/client-go/listers/resource/{v1alpha2 => v1alpha3}/expansion_generated.go (71%) rename vendor/k8s.io/client-go/listers/resource/{v1alpha2 => v1alpha3}/resourceclaim.go (56%) create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceclaimtemplate.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1alpha3/resourceslice.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta1/deviceclass.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta1/resourceclaim.go rename vendor/k8s.io/client-go/listers/resource/{v1alpha2 => v1beta1}/resourceclaimtemplate.go (57%) create mode 100644 vendor/k8s.io/client-go/listers/resource/v1beta1/resourceslice.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/volumeattributesclass.go create mode 100644 vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/storagemigration/v1alpha1/storageversionmigration.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/merge.go create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/leasecandidate.go create mode 100644 vendor/k8s.io/client-go/util/apply/apply.go create mode 100644 vendor/k8s.io/client-go/util/consistencydetector/data_consistency_detector.go create mode 100644 vendor/k8s.io/client-go/util/consistencydetector/list_data_consistency_detector.go create mode 100644 vendor/k8s.io/client-go/util/consistencydetector/watch_list_data_consistency_detector.go create mode 100644 vendor/k8s.io/client-go/util/watchlist/watch_list.go delete mode 100644 vendor/k8s.io/component-base/config/OWNERS delete mode 100644 vendor/k8s.io/component-base/config/types.go delete mode 100644 vendor/k8s.io/component-base/config/v1alpha1/conversion.go delete mode 100644 vendor/k8s.io/component-base/config/v1alpha1/defaults.go delete mode 100644 vendor/k8s.io/component-base/config/v1alpha1/register.go delete mode 100644 vendor/k8s.io/component-base/config/v1alpha1/types.go delete mode 100644 vendor/k8s.io/component-base/config/v1alpha1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/component-base/config/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/component-base/config/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/gengo/v2/Makefile create mode 100644 vendor/k8s.io/gengo/v2/parser/parse_122.go create mode 100644 vendor/k8s.io/gengo/v2/parser/parse_pre_122.go create mode 100644 vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go create mode 100644 vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/README.md create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/config.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/enum.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/extension.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/markers.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/openapi.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/union.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/sets/empty.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/sets/string.go delete mode 100644 vendor/k8s.io/utils/integer/integer.go create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/golang-lru/lru.go create mode 100644 vendor/k8s.io/utils/lru/lru.go create mode 100644 vendor/k8s.io/utils/net/multi_listen.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/.gomodcheck.yaml create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/errors.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldowner.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fieldvalidation.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/config.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/register.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/types.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/config/v1alpha1/zz_generated.deepcopy.go rename vendor/sigs.k8s.io/controller-runtime/pkg/{config/v1alpha1/doc.go => controller/name.go} (52%) create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/metrics.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue/priorityqueue.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/metrics/workqueue.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/internal/syncs/syncs.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/ratelimiter/ratelimiter.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/defaulter.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/metrics/metrics.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/validator.go create mode 100644 vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/priority.go rename vendor/{github.com/matttproud/golang_protobuf_extensions => sigs.k8s.io/yaml/goyaml.v2}/LICENSE (100%) rename vendor/{github.com/go-ole/go-ole/LICENSE => sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml} (66%) rename vendor/{k8s.io/component-base/config/doc.go => sigs.k8s.io/yaml/goyaml.v2/NOTICE} (79%) create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/README.md create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go create mode 100644 vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go diff --git a/go.mod b/go.mod index 5dfd27f04d..be50a0eb38 100644 --- a/go.mod +++ b/go.mod @@ -5,20 +5,20 @@ go 1.24 require ( github.com/agiledragon/gomonkey/v2 v2.11.0 github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 - github.com/cilium/cilium v1.14.1 - github.com/containernetworking/cni v1.1.2 + github.com/cilium/cilium v1.17.1 + github.com/containernetworking/cni v1.2.3 github.com/containernetworking/plugins v1.5.1 github.com/go-openapi/errors v0.22.0 - github.com/go-openapi/loads v0.21.2 - github.com/go-openapi/runtime v0.26.2 + github.com/go-openapi/loads v0.22.0 + github.com/go-openapi/runtime v0.28.0 github.com/go-openapi/spec v0.21.0 - github.com/go-openapi/strfmt v0.21.8 + github.com/go-openapi/strfmt v0.23.0 github.com/go-openapi/swag v0.23.0 - github.com/go-openapi/validate v0.22.3 + github.com/go-openapi/validate v0.24.0 github.com/go-swagger/go-swagger v0.30.4 github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 - github.com/google/gops v0.3.27 + github.com/google/gops v0.3.28 github.com/grafana/pyroscope-go v1.2.0 github.com/jessevdk/go-flags v1.5.0 github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.4.0 @@ -27,22 +27,22 @@ require ( github.com/onsi/ginkgo/v2 v2.22.1 github.com/onsi/gomega v1.36.2 github.com/openkruise/kruise-api v1.3.0 - github.com/prometheus/client_golang v1.17.0 - github.com/sasha-s/go-deadlock v0.3.1 + github.com/prometheus/client_golang v1.20.5 + github.com/sasha-s/go-deadlock v0.3.5 github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 + github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace github.com/spidernet-io/e2eframework v0.0.0-20240816061218-9ba7f53b8c73 github.com/tigera/operator v1.33.0 - github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739 - go.opentelemetry.io/otel v1.25.0 + github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81 + go.opentelemetry.io/otel v1.32.0 go.opentelemetry.io/otel/exporters/prometheus v0.44.0 - go.opentelemetry.io/otel/metric v1.25.0 - go.opentelemetry.io/otel/sdk v1.24.0 + go.opentelemetry.io/otel/metric v1.32.0 + go.opentelemetry.io/otel/sdk v1.28.0 go.opentelemetry.io/otel/sdk/metric v1.24.0 - go.opentelemetry.io/otel/trace v1.25.0 // indirect - go.uber.org/atomic v1.10.0 + go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.uber.org/atomic v1.11.0 go.uber.org/multierr v1.11.0 - go.uber.org/zap v1.25.0 + go.uber.org/zap v1.27.0 golang.org/x/net v0.33.0 golang.org/x/sync v0.10.0 // indirect golang.org/x/sys v0.28.0 @@ -50,33 +50,31 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.29.4 - k8s.io/apiextensions-apiserver v0.29.4 - k8s.io/apimachinery v0.30.0-beta.0 - k8s.io/client-go v0.29.4 - k8s.io/code-generator v0.30.0-beta.0 + k8s.io/api v0.32.0 + k8s.io/apiextensions-apiserver v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + k8s.io/code-generator v0.32.0 k8s.io/kubernetes v1.29.0 - k8s.io/utils v0.0.0-20230726121419-3b25d923346b + k8s.io/utils v0.0.0-20241210054802-24370beab758 kubevirt.io/api v1.2.0 - sigs.k8s.io/controller-runtime v0.16.1 - sigs.k8s.io/controller-tools v0.11.4 - sigs.k8s.io/yaml v1.3.0 + sigs.k8s.io/controller-runtime v0.20.1 + sigs.k8s.io/controller-tools v0.16.5 + sigs.k8s.io/yaml v1.4.0 ) require ( github.com/go-logr/logr v1.4.2 - k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect ) require github.com/google/go-cmp v0.6.0 // indirect -require k8s.io/component-base v0.29.4 // indirect - require ( github.com/hashicorp/go-multierror v1.1.1 github.com/safchain/ethtool v0.4.0 go.uber.org/automaxprocs v1.5.3 - k8s.io/kubectl v0.26.3 + k8s.io/kubectl v0.32.0 ) require ( @@ -85,53 +83,56 @@ require ( github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cilium/ebpf v0.17.1 // indirect + github.com/cilium/hive v0.0.0-20250121145729-e67f66eb0375 // indirect + github.com/cilium/proxy v0.0.0-20241115112946-fb67566cbd95 // indirect + github.com/cilium/statedb v0.3.4 // indirect + github.com/cilium/stream v0.0.0-20241203114243-53c3e5d79744 // indirect github.com/coreos/go-iptables v0.7.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/inflect v0.19.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/gobuffalo/flect v0.3.0 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/gobuffalo/flect v1.0.3 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/uuid v1.6.0 // indirect + github.com/gopacket/gopacket v1.3.1 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/huandu/xstrings v1.3.3 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.8 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/mackerelio/go-osstat v0.2.5 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -142,50 +143,47 @@ require ( github.com/openshift/api v0.0.0-20230503133300-8bbcb7ca7183 // indirect github.com/openshift/custom-resource-status v1.1.2 // indirect github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect - github.com/petermattis/goid v0.0.0-20221018141743-354ef7f2fd21 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/projectcalico/api v0.0.0-20220722155641-439a754a988b // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.61.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/shirou/gopsutil/v3 v3.23.5 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/afero v1.10.0 // indirect - github.com/spf13/cast v1.5.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/viper v1.16.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/viper v1.19.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/tigera/api v0.0.0-20230406222214-ca74195900cb // indirect - github.com/tklauser/go-sysconf v0.3.11 // indirect - github.com/tklauser/numcpus v0.6.0 // indirect github.com/toqueteos/webbrowser v1.2.0 // indirect - github.com/vishvananda/netns v0.0.4 // indirect - github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.mongodb.org/mongo-driver v1.13.1 // indirect - go.uber.org/dig v1.17.0 // indirect + github.com/vishvananda/netns v0.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect + go.uber.org/dig v1.17.1 // indirect + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/term v0.27.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.3.0 // indirect + golang.org/x/time v0.8.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/protobuf v1.36.1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect kubevirt.io/containerized-data-importer-api v1.57.0-alpha1 // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect ) diff --git a/go.sum b/go.sum index 2d92cac42c..31e958bf51 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,10 @@ +cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= +cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -16,7 +17,6 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= @@ -37,10 +37,9 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= @@ -62,11 +61,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/agiledragon/gomonkey/v2 v2.11.0 h1:5oxSgA+tC1xuGsrIorR+sYiziYltmJyEZ9qA25b6l5U= github.com/agiledragon/gomonkey/v2 v2.11.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -74,25 +70,31 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w= -github.com/cilium/checkmate v1.0.3/go.mod h1:KiBTasf39/F2hf2yAmHw21YFl3hcEyP4Yk6filxc12A= -github.com/cilium/cilium v1.14.1 h1:8yj+DVgv7bvBkqiKL3F/nPB6ddNTnnnbye6gznAsXH4= -github.com/cilium/cilium v1.14.1/go.mod h1:ghd9LkTSbRPtJal0Bsdq1ise+j5Ezy14xgaM2o3XLCI= -github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c h1:/NqY4jLr92f7VcUJe1gHS6CgSGWFUCeD2f4QhxO8tgE= -github.com/cilium/proxy v0.0.0-20230623092907-8fddead4e52c/go.mod h1:iOlDXIgPGBabS7J0Npbq8MC5+gfvUGSBISnxXIJjfgs= +github.com/cilium/cilium v1.17.1 h1:ntPELVbdXCuenYm2qbzLGzHnrBZe8WYyRi6C952+hsc= +github.com/cilium/cilium v1.17.1/go.mod h1:RPlqqedvumcC6VKrDI0fsJ+O1NWWFXQo1Fx3BhtnjFQ= +github.com/cilium/ebpf v0.17.1 h1:G8mzU81R2JA1nE5/8SRubzqvBMmAmri2VL8BIZPWvV0= +github.com/cilium/ebpf v0.17.1/go.mod h1:vay2FaYSmIlv3r8dNACd4mW/OCaZLJKJOo+IHBvCIO8= +github.com/cilium/hive v0.0.0-20250121145729-e67f66eb0375 h1:EhoCO0AI3qJavnhfAls4w7VpVVpAr12wIh293sNA0hQ= +github.com/cilium/hive v0.0.0-20250121145729-e67f66eb0375/go.mod h1:pI2GJ1n3SLKIQVFrKF7W6A6gb6BQkZ+3Hp4PAEo5SuI= +github.com/cilium/proxy v0.0.0-20241115112946-fb67566cbd95 h1:iMn0++U3CDqoDINY5JLOhlPcjj3kW/xCmse+d+EZkOM= +github.com/cilium/proxy v0.0.0-20241115112946-fb67566cbd95/go.mod h1:/UoCz3gByKwF5gCHFMUhwmIN5/Pgmb8LTIrfBlmjGCo= +github.com/cilium/statedb v0.3.4 h1:nb5qNntmtaNljJD1r2s5zGOs62LP87AqLhFKIZH2rRE= +github.com/cilium/statedb v0.3.4/go.mod h1:hpcYZXvrOhmdBd02/N/WqxSjbeO2HYG8l3Z2fGq6Ioo= +github.com/cilium/stream v0.0.0-20241203114243-53c3e5d79744 h1:f+CgYUy2YyZ2EX31QSqf3vwFiJJQSAMIQLn4d3QQYno= +github.com/cilium/stream v0.0.0-20241203114243-53c3e5d79744/go.mod h1:/e83AwqvNKpyg4n3C41qmnmj1x2G9DwzI+jb7GkF4lI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= -github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= -github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM= +github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M= github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ= github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM= github.com/coreos/go-iptables v0.7.0 h1:XWM3V+MPRr5/q51NuWSgU0fqMad64Zyxs8ZUoMsamr8= @@ -100,11 +102,10 @@ github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFE github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= -github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -113,34 +114,36 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.11.2-0.20200112161605-a7c079c43d51+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -154,13 +157,10 @@ github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= @@ -173,28 +173,26 @@ github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL9 github.com/go-openapi/jsonreference v0.19.4-0.20191224164422-1f9748e5f45e/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.26.2 h1:elWyB9MacRzvIVgAZCBJmqTi7hBzU0hlKD4IvfX0Zl0= -github.com/go-openapi/runtime v0.26.2/go.mod h1:O034jyRZ557uJKzngbMDJXkcKJVzXJiymdSfgejrcRw= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.8 h1:VYBUoKYRLAlgKDrIxR/I0lKrztDQ0tuTDrbhLVP8Erg= -github.com/go-openapi/strfmt v0.21.8/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-openapi/validate v0.22.3 h1:KxG9mu5HBRYbecRb37KRCihvGGtND2aXziBAv0NNfyI= -github.com/go-openapi/validate v0.22.3/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-swagger/go-swagger v0.30.4 h1:cPrWLSXY6ZdcgfRicOj0lANg72TkTHz6uv/OlUdzO5U= github.com/go-swagger/go-swagger v0.30.4/go.mod h1:YM5D5kR9c1ft3ynMXvDk2uo/7UZHKFEqKXcAL9f4Phc= github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013 h1:l9rI6sNaZgNC0LnF3MiE+qTmyBA/tZAg1rtyrGbUMK0= @@ -202,16 +200,17 @@ github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013/go.m github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= -github.com/gobuffalo/flect v0.3.0 h1:erfPWM+K1rFNIQeRPdeEXxo8yFr/PO17lhRnS8FUrtk= -github.com/gobuffalo/flect v0.3.0/go.mod h1:5pf3aGnsvqvCj50AVni7mJJF8ICxGZ8HomberC3pXLE= +github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4= +github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -241,10 +240,11 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -258,7 +258,6 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -266,8 +265,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gops v0.3.27 h1:BDdWfedShsBbeatZ820oA4DbVOC8yJ4NI8xAlDFWfgI= -github.com/google/gops v0.3.27/go.mod h1:lYqabmfnq4Q6UumWNx96Hjup5BDAVc8zmfIy0SkNCSk= +github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark= +github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -280,7 +279,6 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -295,7 +293,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopacket/gopacket v1.3.1 h1:ZppWyLrOJNZPe5XkdjLbtuTkfQoxQ0xyMJzQCqtqaPU= +github.com/gopacket/gopacket v1.3.1/go.mod h1:3I13qcqSpB2R9fFQg866OOgzylYkZxLTmkvcXhvf6qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= @@ -313,8 +312,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -327,11 +326,14 @@ github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= +github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -344,10 +346,8 @@ github.com/kdoctor-io/kdoctor v0.2.0 h1:X2XHrTZ+RDuAJ+HiKZ1UHzIvFM3/Al6JDyg2nxtz github.com/kdoctor-io/kdoctor v0.2.0/go.mod h1:TxkjBwM4sdnOTHABxgL1gO68tlzHUnbiuRYBHRLKYTc= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -356,10 +356,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o= +github.com/mackerelio/go-osstat v0.2.5/go.mod h1:atxwWF+POUZcdtR1wnsUcQxTytoHG4uhl2AKKzrOajY= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -368,22 +372,23 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mdlayher/ndp v1.0.1 h1:+yAD79/BWyFlvAoeG5ncPS0ItlHP/eVbH7bQ6/+LVA4= github.com/mdlayher/ndp v1.0.1/go.mod h1:rf3wKaWhAYJEXFKpgF8kQ2AxypxVbfNcZbqoAo6fVzk= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -398,7 +403,6 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -418,7 +422,6 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -436,68 +439,65 @@ github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPf github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/petermattis/goid v0.0.0-20221018141743-354ef7f2fd21 h1:PfiCACRd+dzB+gLQAY3ZekMo/56XZ1haOzEguVZ1ZYE= -github.com/petermattis/goid v0.0.0-20221018141743-354ef7f2fd21/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7 h1:Dx7Ovyv/SFnMFw3fD4oEoeorXc6saIiQ23LrGLth0Gw= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/projectcalico/api v0.0.0-20220722155641-439a754a988b h1:dW+UhJMzusDO6hqVGuCYeDxXWAzc7HnA9CsPN+uHPnA= github.com/projectcalico/api v0.0.0-20220722155641-439a754a988b/go.mod h1:Avoy1rTN1GfeisnHGf3WhQNqR+BuGOcwfNFsdWX6OHE= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.4.0 h1:vq1i2HCjshJNywOXFZ1BpwIjyeFR/kvNdHiRzqSElDI= github.com/safchain/ethtool v0.4.0/go.mod h1:XLLnZmy4OCRTkksP/UiMjij96YmIsBfmBQcs7H6tA48= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= -github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/shirou/gopsutil/v3 v3.23.5 h1:5SgDCeQ0KW0S4N0znjeM/eFHXXOKyv2dVNgRq/c9P6Y= -github.com/shirou/gopsutil/v3 v3.23.5/go.mod h1:Ng3Maa27Q2KARVJ0SPZF5NdrQSC3XHKP8IIWrHgMeLY= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= -github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= -github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= +github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= -github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace h1:9PNP1jnUjRhfmGMlkXHjYPishpcw4jpSt/V/xYY3FMA= +github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/spidernet-io/e2eframework v0.0.0-20240816061218-9ba7f53b8c73 h1:KzfBFPaiBnT6LBVhwrabJ59o/0Vsv/9CKszUgaz1TIs= github.com/spidernet-io/e2eframework v0.0.0-20240816061218-9ba7f53b8c73/go.mod h1:k0KYxyNjZYyEG1bsGzSbMx5Q+Z1H6oOjEq5qz9UlBzY= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= @@ -514,33 +514,26 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tigera/api v0.0.0-20230406222214-ca74195900cb h1:Y7r5Al3V235KaEoAzGBz9RYXEbwDu8CPaZoCq2PlD8w= github.com/tigera/api v0.0.0-20230406222214-ca74195900cb/go.mod h1:ZZghiX3CUsBAc0osBjRvV6y/eun2ObYdvSbjqXAoj/w= github.com/tigera/operator v1.33.0 h1:ml2d8+eADJHMxenBcMlMpC4ZRZ0bgvXGx9i6fQsKje0= github.com/tigera/operator v1.33.0/go.mod h1:wNvPsEdBNdcVBC7pDKz+D8+2Jg/1mTK0bm0Ob/xT8Ho= -github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= -github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739 h1:mi+RH1U/MmAQvz2Ys7r1/8OWlGJoBvF8iCXRKk2uym4= -github.com/vishvananda/netlink v1.2.1-beta.2.0.20230621221334-77712cff8739/go.mod h1:0BeLktV/jHb2/Hmw1yLD7+yaIB8PDy11RCty0tCPWZg= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81 h1:9fkQcQYvtTr9ayFXuMfDMVuDt4+BYG9FwsGLnrBde0M= +github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -549,11 +542,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= -go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -561,41 +551,40 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/otel v1.25.0 h1:gldB5FfhRl7OJQbUHt/8s0a7cE8fbsPAtdpRaApKy4k= -go.opentelemetry.io/otel v1.25.0/go.mod h1:Wa2ds5NOXEMkCmUou1WA7ZBfLTHWIsp034OVD7AO+Vg= +go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= +go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/exporters/prometheus v0.44.0 h1:08qeJgaPC0YEBu2PQMbqU3rogTlyzpjhCI2b58Yn00w= go.opentelemetry.io/otel/exporters/prometheus v0.44.0/go.mod h1:ERL2uIeBtg4TxZdojHUwzZfIFlUIjZtxubT5p4h1Gjg= -go.opentelemetry.io/otel/metric v1.25.0 h1:LUKbS7ArpFL/I2jJHdJcqMGxkRdxpPHE0VU/D4NuEwA= -go.opentelemetry.io/otel/metric v1.25.0/go.mod h1:rkDLUSd2lC5lq2dFNrX9LGAbINP5B7WBkC78RXCpH5s= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= +go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= go.opentelemetry.io/otel/sdk/metric v1.24.0 h1:yyMQrPzF+k88/DbH7o4FMAs80puqd+9osbiBrJrz/w8= go.opentelemetry.io/otel/sdk/metric v1.24.0/go.mod h1:I6Y5FjH6rvEnTTAYQz3Mmv2kl6Ek5IIrmwTLqMrrOE0= -go.opentelemetry.io/otel/trace v1.25.0 h1:tqukZGLwQYRIFtSQM2u2+yfMVTgGVeqRLPUYx1Dq6RM= -go.opentelemetry.io/otel/trace v1.25.0/go.mod h1:hCCs70XM/ljO+BeQkyFnbK28SBIJ/Emuha+ccrCRT7I= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= +go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= -go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= -go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= @@ -609,8 +598,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -672,7 +661,6 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -681,7 +669,6 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -700,8 +687,8 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -727,18 +714,15 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -753,34 +737,30 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.1-0.20230616193735-e0c3b6e6ae3b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -799,17 +779,15 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -860,7 +838,6 @@ golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -903,8 +880,6 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -940,18 +915,16 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= +google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -992,6 +965,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1009,7 +984,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1024,45 +998,43 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.23.2/go.mod h1:sYuDb3flCtRPI8ghn6qFrcK5ZBu2mhbElxRE95qpwlI= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= -k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0= -k8s.io/apiextensions-apiserver v0.29.4 h1:M7hbuHU/ckbibR7yPbe6DyNWgTFKNmZDbdZKD8q1Smk= -k8s.io/apiextensions-apiserver v0.29.4/go.mod h1:TTDC9fB+0kHY2rogf5hgBR03KBKCwED+GHUsXGpR7SM= +k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= +k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= +k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= +k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= k8s.io/apimachinery v0.23.2/go.mod h1:zDqeV0AK62LbCI0CI7KbWCAYdLg+E+8UXJ0rIz5gmS8= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.30.0-beta.0 h1:/gaNLWP5ynEG0ExJ+4w2YCj5/L4MU66RsWEAKciy0/g= -k8s.io/apimachinery v0.30.0-beta.0/go.mod h1:wEJvNDlfxMRaMhyv38SIHIEC9hah/xuzqUUhxIyUv7Y= +k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= +k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/client-go v0.23.2/go.mod h1:k3YbsWg6GWdHF1THHTQP88X9RhB1DWPo3Dq7KfU/D1c= -k8s.io/client-go v0.29.4 h1:79ytIedxVfyXV8rpH3jCBW0u+un0fxHDwX5F9K8dPR8= -k8s.io/client-go v0.29.4/go.mod h1:kC1thZQ4zQWYwldsfI088BbK6RkxK+aF5ebV8y9Q4tk= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/code-generator v0.30.0-beta.0 h1:p+51J7CG4i6Cu/cyRrpXU7zT/XaHIHv7NK/mujr0gdY= -k8s.io/code-generator v0.30.0-beta.0/go.mod h1:kvx3eylE/Y/Z2dj8ncw3CR/zjQ37ou9lc3A0Pt8xX54= -k8s.io/component-base v0.29.4 h1:xeKzuuHI/1tjleu5jycDAcYbhAxeGHCQBZUY2eRIkOo= -k8s.io/component-base v0.29.4/go.mod h1:pYjt+oEZP9gtmwSikwAJgfSBikqKX2gOqRat0QjmQt0= +k8s.io/code-generator v0.32.0 h1:s0lNN8VSWny8LBz5t5iy7MCdgwdOhdg7vAGVxvS+VWU= +k8s.io/code-generator v0.32.0/go.mod h1:b7Q7KMZkvsYFy72A79QYjiv4aTz3GvW0f1T3UfhFq4s= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= -k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 h1:si3PfKm8dDYxgfbeA6orqrtLkvvIeH8UqffFJDl0bz4= +k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/kubectl v0.26.3 h1:bZ5SgFyeEXw6XTc1Qji0iNdtqAC76lmeIIQULg2wNXM= -k8s.io/kubectl v0.26.3/go.mod h1:02+gv7Qn4dupzN3fi/9OvqqdW+uG/4Zi56vc4Zmsp1g= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kubectl v0.32.0 h1:rpxl+ng9qeG79YA4Em9tLSfX0G8W0vfaiPVrc/WR7Xw= +k8s.io/kubectl v0.32.0/go.mod h1:qIjSX+QgPQUgdy8ps6eKsYNF+YmFOAO3WygfucIqFiE= k8s.io/kubernetes v1.29.0 h1:DOLN7g8+nnAYBi8JHoW0+/MCrZKDPIqAxzLCXDXd0cg= k8s.io/kubernetes v1.29.0/go.mod h1:9kztbUQf9stVDcIYXx+BX3nuGCsAQDsuClkGMpPs3pA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kubevirt.io/api v1.2.0 h1:1f8XQLPl4BuHPsc6SHTPnYSYeDxucKCQGa8CdrGJSRc= kubevirt.io/api v1.2.0/go.mod h1:SbeR9ma4EwnaOZEUkh/lNz0kzYm5LPpEDE30vKXC5Zg= kubevirt.io/containerized-data-importer-api v1.57.0-alpha1 h1:IWo12+ei3jltSN5jQN1xjgakfvRSF3G3Rr4GXVOOy2I= @@ -1072,17 +1044,18 @@ kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.16.1 h1:+15lzrmHsE0s2kNl0Dl8cTchI5Cs8qofo5PGcPrV9z0= -sigs.k8s.io/controller-runtime v0.16.1/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU= -sigs.k8s.io/controller-tools v0.11.4 h1:jqXJ/Xb6yBgbgcBbw1YoC3rC+Bt1XZWiLjj0ZHv/GrU= -sigs.k8s.io/controller-tools v0.11.4/go.mod h1:qcfX7jfcfYD/b7lAhvqAyTbt/px4GpvN88WKLFFv7p8= +sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= +sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= +sigs.k8s.io/controller-tools v0.16.5 h1:5k9FNRqziBPwqr17AMEPPV/En39ZBplLAdOwwQHruP4= +sigs.k8s.io/controller-tools v0.16.5/go.mod h1:8vztuRVzs8IuuJqKqbXCSlXcw+lkAv/M2sTpg55qjMY= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 8bf0e5b781..33c88305c4 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) - [FastCache](https://github.com/VictoriaMetrics/fastcache) +- [Ristretto](https://github.com/dgraph-io/ristretto) +- [Badger](https://github.com/dgraph-io/badger) diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index a9e0d45c9d..78bddf1cee 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -19,10 +19,13 @@ const ( // Store the primes in an array as well. // // The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. +// contiguous array for the assembly code. var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. +// +// Note that a zero-valued Digest is not ready to receive writes. +// Call Reset or create a Digest using New before calling other methods. type Digest struct { v1 uint64 v2 uint64 @@ -33,19 +36,31 @@ type Digest struct { n int // how much of mem is used } -// New creates a new Digest that computes the 64-bit xxHash algorithm. +// New creates a new Digest with a zero seed. func New() *Digest { + return NewWithSeed(0) +} + +// NewWithSeed creates a new Digest with the given seed. +func NewWithSeed(seed uint64) *Digest { var d Digest - d.Reset() + d.ResetWithSeed(seed) return &d } // Reset clears the Digest's state so that it can be reused. +// It uses a seed value of zero. func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] + d.ResetWithSeed(0) +} + +// ResetWithSeed clears the Digest's state so that it can be reused. +// It uses the given seed to initialize the state. +func (d *Digest) ResetWithSeed(seed uint64) { + d.v1 = seed + prime1 + prime2 + d.v2 = seed + prime2 + d.v3 = seed + d.v4 = seed - prime1 d.total = 0 d.n = 0 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index 9216e0a40c..78f95f2561 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -6,7 +6,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. // //go:noescape func Sum64(b []byte) uint64 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 26df13bba4..118e49e819 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -3,7 +3,7 @@ package xxhash -// Sum64 computes the 64-bit xxHash digest of b. +// Sum64 computes the 64-bit xxHash digest of b with a zero seed. func Sum64(b []byte) uint64 { // A simpler version would be // d := New() diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index e86f1b5fd8..05f5e7dfe7 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -5,7 +5,7 @@ package xxhash -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. func Sum64String(s string) uint64 { return Sum64([]byte(s)) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 1c1638fd88..cf9d42aed5 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -33,7 +33,7 @@ import ( // // See https://github.com/golang/go/issues/42739 for discussion. -// Sum64String computes the 64-bit xxHash digest of s. +// Sum64String computes the 64-bit xxHash digest of s with a zero seed. // It may be faster than Sum64([]byte(s)) by avoiding a copy. func Sum64String(s string) uint64 { b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) diff --git a/vendor/github.com/cilium/cilium/AUTHORS b/vendor/github.com/cilium/cilium/AUTHORS index 743732d3fe..ab4d345a1b 100644 --- a/vendor/github.com/cilium/cilium/AUTHORS +++ b/vendor/github.com/cilium/cilium/AUTHORS @@ -1,6 +1,7 @@ The following people, in alphabetical order, have either authored or signed off on commits in the Cilium repository: +a5r0n a5r0n@users.noreply.github.com Aaron Ecay aaron.ecay@sky.uk Àbéjídé Àyodélé abejideayodele@gmail.com Abirdcfly fp544037857@gmail.com @@ -9,8 +10,10 @@ Adam Korcz adam@adalogics.com Adam Wolfe Gordon awg@digitalocean.com adamzhoul adamzhoul186@gmail.com Aditi Ghag aditi@cilium.io +Aditya Kumar aditya.kumar60@infosys.com Aditya Purandare aditya.p1993@hotmail.com Aditya Sharma aditya.sharma@shopify.com +Adrian Berger adrian.berger@bedag.ch Adrien Trouillaud adrienjt@users.noreply.github.com Ahmed Bebars 1381372+abebars@users.noreply.github.com Akhil Velagapudi 4@4khil.com @@ -20,32 +23,54 @@ Alban Crequy alban@kinvolk.io Aleksander Mistewicz amistewicz@google.com Aleksei Zakharov zakharov.a.g@yandex.ru Alexander Alemayhu alexander@alemayhu.com -Alexander Berger alex-berger@users.noreply.github.com +Alexander Berger alex-berger@gmx.ch Alexander Block ablock84@gmail.com +Alexander Demichev demichev.alexander@gmail.com +Alexandre Barone abalexandrebarone@gmail.com Alexandre Perrin alex@isovalent.com Alexei Starovoitov alexei.starovoitov@gmail.com Alexey Grevtsev alexey.grevtcev@gmail.com +Alexis La Goutte alexis.lagoutte@gmail.com Alex Katsman alexkats@google.com Alex Romanov alex@romanov.ws Alex Szakaly alex.szakaly@gmail.com -Alok Kumar Singh alokaks601@gmail.com +Alex Waring alex.waring@starlingbank.com +alisdairbr alisdairbr@users.noreply.github.com +Alkama Hasan gl3118@myamu.ac.in +Alois Petutschnig alois@petutschnig.net +Alvaro Aleman alvaroaleman@users.noreply.github.com +Alvaro Muñoz pwntester@github.com +Alvaro Uria alvaro.uria@isovalent.com Amey Bhide amey@covalent.io +Amir Kheirkhahan amir.kheirkhahan@dbschenker.com amitmavgupta 115551423+amitmavgupta@users.noreply.github.com Amol Ambekar ambekara@google.com Amre Shakimov amre@covalent.io Anderson, David L david.l.anderson@intel.com Andor Nemeth andor_nemeth@swissre.com +Andreas Mårtensson andreas@addem.se +André Costa ancosta@gmail.com Andree Klattenhoff mail@andr.ee +Andrei Kvapil kvapss@gmail.com André Martins andre@cilium.io Andrew Bulford andrew.bulford@form3.tech +Andrew E. Timmes atimmes@seatgeek.com Andrew Holt andrew.holt@utmost.co +Andrew Li hui0787411@163.com Andrew Sauber 2046750+asauber@users.noreply.github.com Andrew Sy Kim kim.andrewsy@gmail.com +Andrew Titmuss iandrewt@icloud.com Andrey Devyatkin andrey.devyatkin@fivexl.io Andrey Klimentyev andrey.klimentyev@flant.com +Andrey Maltsev maltsev.andrey@gmail.com Andrey Voronkov voronkovaa@gmail.com +Andrii Iuspin andrii.iuspin@isovalent.com Andrzej Mamak nqaegg@gmail.com Andy Allred andy@punasusi.com +andychuang andy.chuang@shoplineapp.com +Angelo Poerio apoerio@cuebiq.com +Angelos Kolaitis neoaggelos@gmail.com +Animesh Pathak 53110238+Sonichigo@users.noreply.github.com Aniruddha Amit Dutta duttaaniruddha31@gmail.com Anish Shah anishshah@google.com Anit Gandhi anitgandhi@gmail.com @@ -55,31 +80,49 @@ Anthony Rabbito hello@anthonyrabbito.com Antoine Coetsier acr@exoscale.ch Antoine Legrand 2t.antoine@gmail.com Antonio Ojea aojea@google.com +Anton Ippolitov anton.ippolitov@datadoghq.com +Antoni Zawodny zawodny@google.com Anton Protopopov aspsk@isovalent.com Anton Tykhyy atykhyy@gmail.com +Antony Reynaud antony.reynaud@isovalent.com +Anubhab Majumdar anmajumdar@microsoft.com Anurag Aggarwal anurag.aggarwal@flipkart.com Archana Shinde archana.m.shinde@intel.com +Archer Wu archerwu9425@icloud.com +Ardika Bagus me@ardikabs.com Arika Chen eaglesora@gmail.com +Arkadiusz Kaliwoda (akaliwod) akaliwod@cisco.com Arnaud Meukam ameukam@gmail.com +Arseniy Belorukov a.belorukov@team.bumble.com +Artem Tokarev enjoy1288@gmail.com Arthur Chiao arthurchiao@hotmail.com ArthurChiao arthurchiao@hotmail.com Arthur Evstifeev mail@ap4y.me Arthur Outhenin-Chalandre arthur@cri.epita.fr Arvind Soni arvind@covalent.io +Ashley Reese ashley.reese@firma.seznam.cz Ashray Jain ashrayj@palantir.com Ashwin Paranjpe ashwin@covalent.io Assiya Khuzyakhmetova assiya.khuzyakhmetova@nu.edu.kz Atkins Chang atkinschang@gmail.com Augustas Berneckas a.berneckas@gmail.com +Aurelien Benoist aurelien.benoist@sony.com Austin Cawley-Edwards austin.cawley@gmail.com +AwesomePatrol AwesomePatrol@users.noreply.github.com ayesha khaliq ayeshakhaliqrana@gmail.com Ayush Dwivedi ayush.dwivedi@accuknox.com +bakito github@bakito.ch +Barış Ekin Yıldırım 101638632+beyildirim@users.noreply.github.com Barun Acharya barun1024@gmail.com Basit Mustafa basit.mustafa@gmail.com Beatriz Martínez beatriz@isovalent.com +behren mobile.niclas@gmail.com +Benjamin Gentil benjamin.gentil@infomaniak.com Benjamin Leggett benjamin.leggett@solo.io Benjamin Pineau benjamin.pineau@datadoghq.com +Benoît Knecht bknecht@protonmail.ch Benoît Sauvère benoit.sauvere@backmarket.com +Bernard Halas bernard.halas@berops.com Bill Mulligan billmulligan516@gmail.com Bingshen Wang bingshen.wbs@alibaba-inc.com Bingwu Yang detailyang@gmail.com @@ -88,88 +131,125 @@ Bob Bouteillier bob.bouteillier@datadoghq.com Bokang Li libokang.dev@gmail.com Bolun Zhao blzhao@google.com Boran Car boran.car@gmail.com -Boris Petrovic carnerito.b@gmail.com +Boris Petrovic boris.petrovic@united.cloud +Bowei Du bowei@google.com Brad Whitfield bradswhitfield@gmail.com +Brandon Ewing brandon.ewing@imc.com Brandon McNama brandonmcnama@outlook.com +Brian Payne payne.in.the.brian@gmail.com Brian Topping brian@coglative.com Bruno Miguel Custódio brunomcustodio@gmail.com Bryan Stenson bryan.stenson@okta.com +bzsuni bingzhe.sun@daocloud.io Calum MacRae hi@cmacr.ae +Cameron McAvoy cmcavoy@indeed.com Camilo Schoeningh camilo.schoeningh@dunnhumby.com Canh Ngo canhnt@gmail.com Carlos Andrés Rocha rchalumeau@magicleap.com Carlos Castro carlos.castro@jumo.world +Carlos Rodríguez Hernández carlosrh@vmware.com Carson Anderson carson.anderson@goteleport.com +Carson Yang yangchuansheng33@gmail.com Casey Callendrello cdc@isovalent.com +cdtzabra 22188574+cdtzabra@users.noreply.github.com Cezary Zawadka czawadka@google.com Chance Zibolski chance.zibolski@gmail.com Changyu Wang changyuwang@tencent.com Charles-Edouard Brétéché charled.breteche@gmail.com Charles-Henri Guérin charles-henri.guerin@zenika.com +Charles Uneze charlesniklaus@gmail.com +chaunceyjiang chaunceyjiang@gmail.com Chen Kang kongchen28@gmail.com +chentanjun tanjunchen20@gmail.com chenyahui chenyahui9@jd.com Chen Yaqi chenyaqi01@baidu.com chenyuezhou zcy.chenyue.zhou@gmail.com +chez-shanpu tomoki-sugiura@cybozu.co.jp +Chris Bannister c.bannister@gmail.com Chris Tarazi chris@isovalent.com Christian Hörtnagl christian2@univie.ac.at Christian Hüning christian.huening@finleap.com Christine Chen christine.chen@datadoghq.com +Christine Kim xtineskim@gmail.com +Christophe Jauffret christophe.jauffret@nutanix.com Christopher Biscardi chris@christopherbiscardi.com Christopher Schmidt fakod666@gmail.com +Christoph Puhl cpu@isovalent.com Chris Werner Rau cwrau@cwrau.info ChrsMark chrismarkou92@gmail.com Cilium Imagebot noreply@cilium.io +Cilium Release Bot noreply@cilium.io Cintia Sanchez Garcia cynthiasg@icloud.com +CJ Virtucio cjv287@gmail.com Claudia J. Kang claudiajkang@gmail.com Clément Delzotti elk1ns@outlook.fr cleverhu shouping.hu@daocloud.io cndoit18 cndoit18@outlook.com Connor Jones cj@cjmakes.com Cookie Wang luckymrwang@163.com +cornfeedhobo cornfeedhobo@fuzzlabs.org +Cory Snyder csnyder@1111systems.com Craig Box craig.box@gmail.com +crashiura crashiura@gmail.com cui fliter imcusg@gmail.com +cx 1249843194@qq.com Cynthia Thomas cynthia@covalent.io Cyril Corbon corboncyril@gmail.com Cyril Scetbon cscetbon@gmail.com czybjtu smartczy@outlook.com Dale Ragan dale.ragan@sap.com Dalton Hubble dghubble@gmail.com +Damian Sawicki dsawicki@google.com +Dan Everton deverton@godaddy.com Daneyon Hansen daneyon.hansen@solo.io Đặng Minh Dũng dungdm93@live.com +Daniel Bodky daniel.bodky@netways.de Daniel Borkmann daniel@iogearbox.net Daniel Dao dqminh89@gmail.com Daniel Finneran dan@thebsdbox.co.uk Daniel Hawton daniel.hawton@solo.io Daniel Qian qsj.daniel@gmail.com Daniel T. Lee danieltimlee@gmail.com +Daniel Vos danielvos@outlook.com Danni Skov Høglund skuffe@pwnz.dk Dan Sexton dan.b.sexton@gmail.com Dan Wendlandt dan@covalent.io Dario Mader maderdario@gmail.com -darox maderdario@gmail.com Darren Foo darren.foo@shopify.com Darren Mackintosh unixdaddy@gmail.com Darshan Chaudhary deathbullet@gmail.com +DaShaun 826271+dashaun@users.noreply.github.com David Bimmler david.bimmler@isovalent.com David Birks davidebirks@gmail.com +David Boslee david@goteleport.com David Bouchare david.bouchare@datadoghq.com David Calvert david@0xdc.me -David Chen davidchen94@outlook.com +David Cheng david.cheng@shopline.com +David Chosrova dchosrova@gmail.com David Donchez donch@dailymotion.com David Korczynski david@adalogics.com +David Leadbeater dgl@dgl.cx David Schlosnagle davids@palantir.com +David Swafford dswafford@coreweave.com David Wolffberg 1350533+wolffberg@users.noreply.github.com Dawn lx1960753013@gmail.com +dddddai dddwq@foxmail.com +Dean 22192242+saintdle@users.noreply.github.com Deepesha Burse deepesha.3007@gmail.com Deepesh Pathak deepshpathak@gmail.com +Denis GERMAIN dgermain@deezer.com Denis Khachyan khachyanda.gmail.com +Derek Chen derek_chen@live.com Derek Gaffney 17263955+gaffneyd4@users.noreply.github.com Deshi Xiao xiaods@gmail.com +deterclosed fliter@outlook.com Devarshi Sathiya devarshisathiya5@gmail.com +dhamick dharmicksaik@gmail.com Dharma Bellamkonda dharma.bellamkonda@gmail.com Didier Durand durand.didier@gmail.com Diego Casati diego.casati@gmail.com Dima Pugachev krabradosty@gmail.com +Dipankar Das dipankardas0115@gmail.com Divine Odazie dodazie@gmail.com Divya Mohan divya.mohan0209@gmail.com Divyansh Kamboj divyansh.kamboj@accuknox.com @@ -183,13 +263,19 @@ Dmitry Shurupov dmitry.shurupov@palark.com Dom Del Nano ddelnano@gmail.com Dom Goodwin dom.goodwin@capgemini.com Donia Chaiehloudj donia.cld@isovalent.com +Donnie McMahan jmcmaha1@gmail.com Dorde Lapcevic dordel@google.com Duffie Cooley dcooley@isovalent.com +dwalker-sabiogroup 100362969+dwalker-sabiogroup@users.noreply.github.com Dylan Reimerink dylan.reimerink@isovalent.com +egoust ustinov16@gmail.com Ekene Nwobodo nwobodoe71@gmail.com +Electron alokaks601@gmail.com El-Fadel Bonfoh elfadel@accuknox.com +Elias Hernandez elirayhernandez@gmail.com +eliranw 39266788+eliranw@users.noreply.github.com Ellie Springsteen ellie.springsteen@appian.com -Eloy Coto eloy.coto@gmail.com +Eloy Coto eloy.coto@acalustra.com Emin Aktas eminaktas34@gmail.com Emmanuel T Odeke emmanuel@orijtech.com Emre Savcı emre.savci@trendyol.com @@ -198,47 +284,64 @@ Eohyung Lee liquidnuker@gmail.com Eric Bailey e.bailey@sportradar.com Eric Ferreira ericarlos23@gmail.com Eric Hausig 16280871+ehausig@users.noreply.github.com +Eric Mountain eric.mountain@datadoghq.com Eric M. Yanulis eric@eyanulis.net Eric Ripa eric@ripa.io Erik Chang erik.chang@nordstrom.com Eugene Starchenko 17835122+eugenestarchenko@users.noreply.github.com Ewout Prangsma ewout@prangsma.net +Fabian Fischer fabian.fischer@isovalent.com Fabio Falzoi fabio.falzoi@isovalent.com Faiyaz Ahmed faiyaza@gmail.com Fankaixi Li fankaixi.li@bytedance.com Federico Hernandez f@ederi.co +feifeifei wangyufeimoon@gamil.com Felix Färjsjö felix.farjsjo@gmail.com fengshunli 1171313930@qq.com +ferenets ferenets@nebius.com Fernand Galiana fernand.galiana@gmail.com Feroz Salam feroz.salam@isovalent.com FeynmanZhou pengfeizhou@yunify.com +Filip Nikolic oss.filipn@gmail.com Fish-pro zechun.chen@daocloud.io Florian Koch f0@users.noreply.github.com Florian Lehner dev@der-flo.net +Foyer Unix foyerunix@foyer.lu Francois Allard francois@breathelife.com François Joulaud francois.joulaud@radiofrance.com Frank Villaro-Dixon frank.villaro@infomaniak.com Frederic Branczyk fbranczyk@gmail.com +Frederic Giloux frederic.giloux@isovalent.com +Fred Heinecke fred.heinecke@yahoo.com Fred Hsu fredlhsu@gmail.com Fredrik Lönnegren fredrik.lonnegren@gmail.com Fulvio Risso fulvio.risso@polito.it +Gabe Conradi gconradi@seatgeek.com +gailsuccess 157372272+gailsuccess@users.noreply.github.com Gaurav Genani h3llix.pvt@gmail.com Gaurav Yadav gaurav.dev.iiitm@gmail.com Gavin McNair gavin.mcnair@kaluza.com George Gaál gb12335@gmail.com George Kontridze gkontridze@plaid.com +Gerald Pape gerald@giantswarm.io Geyslan G. Bem geyslan@gmail.com GH action ghabot@does.not.exist.cilium.org Gianluca Arbezzano gianarb92@gmail.com Gilberto Bertin jibi@cilium.io gjmzj jmgaozz@hotmail.com +Glen Yu glen.yu@gmail.com Glib Smaga code@gsmaga.com Gobinath Krishnamoorthy gobinathk@google.com +GoGstickGo janilution@gmail.com Gowtham Sundara gowtham.sundara@rapyuta-robotics.com -Gray Lian gray.liang@isovalent.com +Gray Liang gray.liang@isovalent.com +guangwu guoguangwug@gmail.com Guilherme Oki guilherme.oki@wildlifestudios.com Guilherme Souza 101073+guilhermef@users.noreply.github.com Gunju Kim gjkim042@gmail.com +Gyutae Bae gyu.8ae@gmail.com +hacktivist123 akintayoshedrack@gmail.com +Hadrien Patte hadrien.patte@datadoghq.com Haitao Li lihaitao@gmail.com Haiyue Wang haiyue.wang@intel.com Hang Yan hang.yan@hotmail.com @@ -248,41 +351,73 @@ Hao Zhang hao.zhang.am.i@gmail.com Harsh Modi harshmodi@google.com harsimran pabla hpabla@isovalent.com Hart Hoover hart.hoover@gmail.com +Hector Monsalve hmonsalv@gmail.com Heiko Rothe me@heikorothe.com Hemanth Malla hemanth.malla@datadoghq.com Hemslo Wang hemslo.wang@gmail.com +Hongbo Miao 3375461+hongbo-miao@users.noreply.github.com +Hong Chen hong.chen.7219@gmail.com Hrittik hrittikcom@gmail.com +Huagong Wang wanghuagong@kylinos.cn huangxuesen huangxuesen@kuaishou.com Hui Kong hui.kong@qunar.com +Hunter Gregory 42728408+huntergregory@users.noreply.github.com Hunter Massey hmassey@tradestation.com +Husni Alhamdani dhanielluis@gmail.com +Huweicai i@huweicai.com hxysayhi 51870525+hxysayhi@users.noreply.github.com Ian Vernon ian@cilium.io Ifeanyi Ubah ify1992@yahoo.com +Igor Klemenski igor.klemenski@microsoft.com +ii2day ii2day.zoro@gmail.com +Iiqbal2000 iqbalhafizh2000@gmail.com +Ilia Chernov cherno8.ilya@gmail.com Ilya Dmitrichenko errordeveloper@gmail.com Ilya Shaisultanov ilya.shaisultanov@gmail.com +Ioannis Androulidakis androulidakis.ioannis@gmail.com +Isala Piyarisi mail@isala.me +ishuar ishansharma887@gmail.com Ivan Makarychev i.makarychev@tinkoff.ru Ivar Lazzaro ivarlazzaro@gmail.com +JabJ sajjadjafaribojd@gmail.com Jack-R-lantern tjdfkr2421@gmail.com +Jacob Henner code@ventricle.us Jacopo Nardiello jnardiello@users.noreply.github.com Jaff Cheng jaff.cheng.sh@gmail.com Jaime Caamaño Ruiz jcaamano@suse.com +James Bodkin james.bodkin@amphora.net James Brookes jbrookes@confluent.io +James Harr james.harr@gmail.com James Laverack james@isovalent.com James McShane james.mcshane@superorbital.io +James Strong strong.james.e@gmail.com Jan-Erik Rediger janerik@fnordig.de +Jan Jansen jan.jansen@gdata.de Jan Mraz strudelpi@pm.me +Jan Unger jan-emanuel.unger@gmx.de +janvi01 janvibajo1@gmail.com +Jared Ledvina jared.ledvina@datadoghq.com Jarno Rajahalme jarno@isovalent.com +Jason Aliyetti jaliyetti@gmail.com +Javier Vela fjvela@gmail.com +Jayesh Kumar 57744184+k8s-dev@users.noreply.github.com +Jean-Benoit Paux 9682558+jbpaux@users.noreply.github.com Jean Raby jean@raby.sh Jed Salazar jedsalazar@gmail.com Jef Spaleta jspaleta@gmail.com Jerry J. Muzsik jerrymuzsik@icloud.com +Jesse Haka haka.jesse@gmail.com Jess Frazelle acidburn@microsoft.com Jiang Wang jiang.wang@bytedance.com Jianlin Lv Jianlin.Lv@arm.com Jian Zeng anonymousknight96@gmail.com JieJhih Jhang jiejhihjhang@gmail.com +jignyasamishra iamjignyasa@gmail.com Jim Angel jimangel@google.com.com +Jimmy Song rootsongjc@gmail.com Jim Ntosas ntosas@gmail.com +Jingyuan Liang jingyuanliang@google.com +jinjiadu jinjiadu@aliyun.com JinLin Fu withlin@apache.org Jiong Wang jiong.wang@netronome.com jiuker 2818723467@qq.com @@ -296,64 +431,105 @@ Joey Espinosa jlouis.espinosa@gmail.com Johannes Liebermann johanan.liebermann@gmail.com John Fastabend john.fastabend@gmail.com John Gardiner Myers jgmyers@proofpoint.com +John Howard howardjohn@google.com +John Karoyannis karoyannis@yahoo.com +john-r-swyftx john.roche@swyftx.com.au John Watson johnw@planetscale.com John Zheng johnzhengaz@gmail.com Jomen Xiao jomenxiao@gmail.com +Jonas Badstübner jonas@jb.software +Jonas Krüger Svensson jonas.svensson@intility.no Jonathan Davies jpds@protonmail.com +Jonathan Grahl jonathan@keyholders.io +Jonathan Siegel 248302+usiegj00@users.noreply.github.com Jones Shi shilei@hotstone.com.cn +Jonny jonny@linkpool.io +Jooho Lee jhlee@si-analytics.ai +Jordan Rife jrife@google.com Jorik Jonker jorik.jonker@eu.equinix.com Joseph-Irving joseph.irving500@gmail.com +Joseph Ligier joseph.ligier@accenture.com Joseph Sheng jiajun.sheng@microfocus.com Joseph Stevens thejosephstevens@gmail.com +Josh Soref 2119212+jsoref@users.noreply.github.com +joshua 54235339+sujoshua@users.noreply.github.com Joshua Roppo joshroppo@gmail.com +jshr-w shjayaraman@microsoft.com Juan Jimenez-Anca cortopy@users.noreply.github.com Juha Tiensyrjä juha.tiensyrja@ouraring.com Julian Wiedmann jwi@isovalent.com Julien Balestra julien.balestra@datadoghq.com +Julien D barajus@users.noreply.github.com Julien Kassar github@kassisol.com +Julius Hinze jhinze@cisco.com Jun Chen answer1991.chen@gmail.com Junli Ou oujunli306@gmail.com Jussi Maki jussi@isovalent.com +Jussi Mäki jussi.maki@isovalent.com +kachi-bits 76791974+kachi-bits@users.noreply.github.com +Kaczyniec kaczynska@google.com kahirokunn okinakahiro@gmail.com Kaito Ii kaitoii1111@gmail.com +Kaloyan Yordanov Kaloyan.Yordanov@starlizard.com Kamil Lach kamil.lach.rs@gmail.com Karim Naufal rimkashox@gmail.com Karl Heins karlheins@northwesternmutual.com Karsten Nielsen karsten.nielsen@ingka.ikea.com Katarzyna Borkmann kasia@iogearbox.net +Katarzyna Lach katarzynalach@google.com +Katie Struthers 99215338+katiestruthers@users.noreply.github.com Kazuki Suda kazuki.suda@gmail.com Keisuke Kondo k.gryphus@gmail.com Kenshin Chen smwyzi@qq.com kerthcet kerthcet@gmail.com Kevin Burke kevin@burke.dev Kevin Holditch 82885135+kevholditch-f3@users.noreply.github.com +Kevin Reeuwijk kevin.reeuwijk@spectrocloud.com Kiran Bondalapati kiran@bondalapati.com Kir Kolyshkin kolyshkin@gmail.com Koichiro Den den@klaipeden.com Konstantin Aksenov konstantin.aksenov@flant.com Kornilios Kourtis kornilios@isovalent.com +kwakubiney kebiney@hotmail.com +l1b0k libokang.lbk@alibaba-inc.com Laurent Bernaille laurent.bernaille@datadoghq.com +Lawrence Gadban lawrence.gadban@solo.io +ldelossa louis.delos@gmail.com Lehner Florian dev@der-flo.net +Leiw steven.l.wang@linux.intel.com Leonard Cohnen lc@edgeless.systems leonliao xiaobo.liao@gmail.com +Liam Parker liamchat500@gmail.com Liang Zhou zhoul110@chinatelecom.cn Li Chengyuan chengyuanli@hotmail.com +Li Chun lichun823@gmail.com LiHui andrewli@yunify.com Lin Dong lindongld@google.com Lin Sun lin.sun@solo.io +Lior Lieberman liorlieberman@google.com Lior Rozen liorr@tailorbrands.com Liu Qun qunliu@zyhx-group.com liuxu liuxu623@gmail.com Livingstone S E livingstone.s.e@gmail.com Li Yiheng lyhutopi@gmail.com +Liyi Huang pdshly@gmail.com Liz Rice liz@lizrice.com +log1cb0mb nabeelnrana@gmail.com +Loïc Blot nerzhul@users.noreply.github.com LongHui Li longhui.li@woqutech.com +loomkoom 29258685+loomkoom@users.noreply.github.com Lorenz Bauer lmb@isovalent.com Lorenzo Fundaró lorenzofundaro@gmail.com -Louis DeLosSantos louis@isovalent.com +Louis DeLosSantos louis.delos@isovalent.com lou-lan loulan@loulan.me +Lucas Fernando Cardoso Nunes lucasfc.nunes@gmail.com +Lucas Leblow lucasleblow@mailbox.org +Lucas Rattz lucas.rattz@syself.com lucming 2876757716@qq.com -Maartje Eyskens maartje@eyskens.me +Ludovic Ortega ludovic.ortega@adminafk.fr +Lukas Stehlik stehlik.lukas@gmail.com +Luke Livingstone luke.livingstone@imaginecurve.com +Maartje Eyskens maartje.eyskens@isovalent.com Maciej Fijalkowski maciej.fijalkowski@intel.com Maciej Kwiek maciej@isovalent.com Maciej Skrocki maciejskrocki@google.com @@ -361,62 +537,87 @@ Madhu Challa madhu@cilium.io Madhusudan.C.S madhusudancs@gmail.com Mahadev Panchal mahadev.panchal@benisontech.com MaiReo sawako.saki@gmail.com +Mais mai.saleh@siemens.com Maksym Lushpenko iviakciivi@gmail.com Manali Bhutiyani manali@covalent.io Mandar U Jog mjog@google.com Manuel Buil mbuil@suse.com +Manuel Rüger manuel@rueg.eu Manuel Stößel manuel.stoessel@t-systems.com -Marcel Zieba marcel.zieba@isovalent.com +Marc Barry 4965634+marc-barry@users.noreply.github.com +Marcelo Moreira de Mello tchello.mello@gmail.com +Marcel Zięba marcel.zieba@isovalent.com Marcin Skarbek git@skarbek.name Marcin Swiderski forgems@gmail.com +Marco Aurelio Caldas Miranda 17923899+macmiranda@users.noreply.github.com Marco Hofstetter marco.hofstetter@isovalent.com Marco Iorio marco.iorio@isovalent.com Marco Kilchhofer mkilchhofer@users.noreply.github.com +Marc 'risson' Schmitt marc.schmitt@risson.space Marc Stulz m@footek.ch +Marc Suñé marc.sune@isovalent.com Marek Chodor mchodor@google.com Marga Manterola marga@isovalent.com +Marino Wijay 45947861+distributethe6ix@users.noreply.github.com Mario Constanti mario@constanti.de Marius Gerling marius.gerling@uniberg.com Mark deVilliers markdevilliers@gmail.com Mark Pashmfouroush mark@isovalent.com +Mark St John markstjohn@google.com Markus Blaschke mblaschke82@gmail.com +Markus Nilsson markus.nilsson@yubico.com Martin Charles martincharles07@gmail.com Martin Koppehel martin.koppehel@st.ovgu.de Martin Odstrcilik martin.odstrcilik@gmail.com -Martynas Pumputis m@lambda.lt +Martynas Pumputis martynas@isovalent.com Marvin Gaube dev@marvingaube.de Marwin Baumann 56264798+marwinbaumannsbp@users.noreply.github.com Matej Gera matejgera@gmail.com Mathias Herzog mathu@gmx.ch -Mathieu Parent math.parent@gmail.com +Mathieu Parent mathieu.parent@insee.fr Mathieu Tortuyaux mtortuyaux@microsoft.com +Mathis Joffre 51022808+Joffref@users.noreply.github.com Matt Anderson matanderson@equinix.com Matthew Fenwick mfenwick100@gmail.com Matthew Gumport me@gum.pt +Matthew Hembree 47449406+matthewhembree@users.noreply.github.com +Matthias Baur m.baur@syseleven.de Matthieu Antoine matthieu.antoine@jumo.world +Matthieu MOREL matthieu.morel35@gmail.com Matt Layher mdlayher@gmail.com +Matt Oswalt matt@oswalt.dev Matyáš Kroupa kroupa.matyas@gmail.com Mauricio Vásquez mauricio@kinvolk.io Maxime Brunet max@brnt.mx Maxime Visonneau maxime.visonneau@gmail.com Maximilian Bischoff maximilian.bischoff@inovex.de Maximilian Mack max@mack.io -Maxim Mikityanskiy maxim@isovalent.com +Maxim Krasilnikov m.krasilnikov@space307.com +Maxim Mikityanskiy maxtram95@gmail.com Max Körbächer 16919345+mkorbi@users.noreply.github.com +MeherRushi sudharushi0@gmail.com Melissa Peiffer mbp83@nau.edu +Mengxin Liu mengxin@alauda.io Michael Aspinwall maspinwall@google.com Michael Fischer fiscmi@amazon.com Michael Fornaro 20387402+xUnholy@users.noreply.github.com Michael Francis michael@melenion.com Michael Kashin mmkashin@gmail.com +Michael Mykhaylov 32168861+mikemykhaylov@users.noreply.github.com Michael Petrov michael@openai.com Michael Ryan Dempsey bluestealth@bluestealth.pw +michaelsaah michael.saah@segment.com +Michael Saah msaah@twilio.com Michael Schubert michael@kinvolk.io Michael Vorburger vorburger@redhat.com Michal Rostecki vadorovsky@gmail.com +Michal Siwinski siwy@google.com Michi Mutsuzaki michi@isovalent.com +Mikael Johansson mik.json@gmail.com Mike Fedosin mfedosin@gmail.com MikeLing sabergeass@gmail.com +Mike Mwanje mwanjemike767@gmail.com +Misha Bragin bangvalo@gmail.com Mitch Hulscher mitch.hulscher@lib.io Moh Ahmed moh.ahmed@cengn.ca Mohammad Yosefpor 47300215+m-yosefpor@users.noreply.github.com @@ -424,6 +625,9 @@ Mohit Marathe mohitmarathe23@gmail.com Moritz Eckert m1gh7ym0@gmail.com Moritz Johner beller.moritz@googlemail.com Moshe Immerman moshe.immerman@vitalitygroup.com +mrproliu 741550557@qq.com +mvtab mvtabilitas@protonmail.com +Natalia Reka Ivanko natalia@isovalent.com Nate Sweet nathanjsweet@pm.me Nate Taylor ntaylor1781@gmail.com Nathan Bird njbird@infiniteenergy.com @@ -431,147 +635,229 @@ nathannaveen 42319948+nathannaveen@users.noreply.gith Nathan Perkins nperkins487@gmail.com Nathan Taylor ntaylor1781@gmail.com Navin Kukreja navin.kukreja@isovalent.com +Nebula 40148908+nebula-it@users.noreply.github.com necatican necaticanyildirim@gmail.com Neela Jacques neela@isovalent.com Neil Seward neil.seward@elasticpath.com Neil Wilson neil@aldur.co.uk +Neutrollized glen.yu@gmail.com +Nicholas Lane nicklaneovi@gmail.com Nick M 4718+rkage@users.noreply.github.com Nick Young nick@isovalent.com Niclas Mietz solidnerd@users.noreply.github.com Nico Berlee nico.berlee@on2it.net Nicolas Busseneau nicolas@isovalent.com -Nico Vibert nicolas.vibert@isovalent.com +Nicolò Ciraci ciraci.nicolo@gmail.com +Nico Vibert nvibert@cisco.com Nikhil Jha nikhiljha@users.noreply.github.com Nikhil Sharma nikhilsharma230303@gmail.com Nikolay Aleksandrov nikolay@isovalent.com Nikolay Nikolaev nicknickolaev@gmail.com +Nimisha Mehta nimishamehta5@gmail.com Nirmoy Das ndas@suse.de Nishant Burte nburte@google.com Nitish Malhotra nitishm@microsoft.com +Nitish Tiwari nitish@parseable.io Noel Georgi git@frezbo.dev nrnrk noriki6t@gmail.com +nuwa nuwa@yannis.codes +nxyt lolnoxy@gmail.com Odin Ugedal ougedal@palantir.com Oilbeater mengxin@alauda.io Oksana Baranova oksana.baranova@intel.com +Olaf Klischat olaf.klischat@gmail.com Ole Markus With o.with@sportradar.com +Olga Mirensky 5200844+olga-mir@users.noreply.github.com Oliver Hofmann 91730056+olinux-dev@users.noreply.github.com Oliver Ni oliver.ni@gmail.com Oliver Wang a0924100192@gmail.com Omar Aloraini ooraini.dev@gmail.com Ondrej Blazek ondrej.blazek@firma.seznam.cz +Ondrej Sika ondrej@ondrejsika.com +oneumyvakin oneumyvaking@mail.ru +Oshan Galwaduge oshan304@gmail.com Osthues osthues.matthias@gmail.com +Ovidiu Tirla otirla@google.com Pablo Ruiz pablo.ruiz@gmail.com Paco Xu paco.xu@daocloud.io Parth Patel parth.psu@gmail.com Patrice Chalin chalin@cncf.io Patrice Peterson patrice.peterson@mailbox.org Patrick Mahoney pmahoney@greenkeytech.com +Patrick O’Brien patrick.obrien@thetradedesk.com +Patrick Pichler git@patrickpichler.dev +Patrick Reich patrick@neodyme.io Pat Riehecky riehecky@fnal.gov Patrik Cyvoct patrik@ptrk.io +Paul Arah paularah.self@gmail.com +Paul Bailey spacepants@users.noreply.github.com Paul Chaignon paul.chaignon@gmail.com +Paulo Castello da Costa pcastello@google.com Paulo Gomes pjbgf@linux.com Pavel Pavlov 40396270+PavelPavlov46@users.noreply.github.com +Pavel Tishkov pavel.tishkov@flant.com Paweł Prażak pawelprazak@users.noreply.github.com +Pedro Ignacio pedroig100.pi@gmail.com Peiqi Shi uestc.shi@gmail.com +Pelle van Gils pelle@vangils.dev pengbinbin1 pengbiny@163.com Pengfei Song pengfei.song@daocloud.io Peter Jausovec peter.jausovec@solo.io +Peter Matulis pmatulis@gmail.com Peter Slovak slovak.peto@gmail.com +Petr Baloun petr.baloun@firma.seznam.cz Philippe Lafoucrière philippe.lafoucriere@gmail.com Philipp Gniewosz philipp.gniewosz@daimlertruck.com -Philip Schmid philip.schmid@isovalent.com +Philip Schmid phisch@cisco.com Pierre-Yves Aillet pyaillet@gmail.com +Pieter van der Giessen pieter@pionative.com +Pooja Trivedi poojatrivedi@gmail.com +Prabhakhar Kaliyamurthy (PK) prabhakhar@gmail.com Pranavi Roy pranvyr@gmail.com Prashanth.B beeps@google.com +Pratyay Banerjee putubanerjee23@gmail.com Pratyush Singhal psinghal20@gmail.com +Praveen Krishna pkrishn@google.com Priya Sharma Priya.Sharma6693@gmail.com Qasim Sarfraz qasim.sarfraz@esailors.de Qifeng Guo qifeng.guo@daocloud.io Qingchuan Hao qinhao@microsoft.com -Quentin Monnet quentin@isovalent.com +Quang Nguyen nguyenquang@microsoft.com +Quan Wei quanwei.153@bytedance.com +Quentin Monnet qmo@qmon.net Raam ram29@bskyb.com Rachid Zarouali rachid.zarouali@sevensphere.io +Rafael da Fonseca rafael.fonseca@wildlifestudios.com +Raffael Sahli raffael.sahli@doodle.com Raghu Gyambavantha raghug@bld-ml-loan4.olympus.f5net.com Rahul Jadhav nyrahul@gmail.com Rahul Joshi rkjoshi@google.com +rahulk789 rahul.u.india@gmail.com Rajat Jindal rajatjindal83@gmail.com +Ralph Bankston ralph@isovalent.com +Ramses Rodriguez Martinez ramses@nextdigital.es Raphael Campos raphael@accuknox.com Raphaël Pinson raphael@isovalent.com Rastislav Szabo rastislav.szabo@isovalent.com Rauan Mayemir rauan@mayemir.io +rawmind0 rawmind@gmail.com Ray Bejjani ray.bejjani@gmail.com Raymond de Jong raymond.dejong@isovalent.com Reilly Brogan reilly@reillybrogan.com Rei Shimizu Shikugawa@gmail.com +Remi Gelinas mail@remigelin.as Rémy Léone rleone@scaleway.com Renat Tuktarov yandzeek@gmail.com +Renaud Gaubert renaud@openai.com Rene Luria rene@luria.ch +René Veenhuis re.veenhuis@gmail.com Rene Zbinden rene.zbinden@postfinance.ch +renyunkang rykren1998@gmail.com +Richard Lavoie richard.lavoie@logmein.com +Richard Tweed RichardoC@users.noreply.github.com Ricky Ho horicky78@gmail.com Rio Kierkels riokierkels@gmail.com -Robin Gögge r.goegge@isovalent.com +Robin Elfrink robin@15augustus.nl +Robin Gögge r.goegge@gmail.com Robin Hahling robin.hahling@gw-computing.net +Rob Scott robertjscott@google.com Rocky Chen 40374064+rockc2020@users.noreply.github.com Rodrigo Chacon rochacon@gmail.com +Rohan George 83759161+rohan-changejar@users.noreply.github.com Romain Lenglet rlenglet@google.com Roman Ptitcyn romanspb@yahoo.com +Romuald Zdebskiy zdebskiy@hotmail.com Ronald van Zantvoort the.loeki@gmail.com Ross Guarino rssguar@gmail.com +roykharman roykharman@gmail.com +Rudrakh Panigrahi rudrakh97@gmail.com +Rui Cao caorui.io@bytedance.com +Rui Chen rui@chenrui.dev Rui Gu rui@covalent.io Rushikesh Butley rushikeshbutley@gmail.com Russell Bryant russell@russellbryant.net +rusttech gopher@before.tech Ryan Drew ryan.drew@isovalent.com Ryan McNamara rmcnamara@palantir.com +ryebridge 88094554+ryebridge@users.noreply.github.com Sachin Maurya sachin.maurya7666@gmail.com Sadik Kuzu sadikkuzu@hotmail.com Sahid Orentino Ferdjaoui sahid.ferdjaoui@industrialdiscipline.com +saiaunghlyanhtet saiaunghlyanhtet2003@gmail.com Saikrishna Edupuganti saikrishna.edupuganti@intel.com Saim Safdar 59512053+Saim-Safdar@users.noreply.github.com Saiyam Pathak saiyam@civo.com Salvatore Mazzarino salvatore@accuknox.com +Sam Day me@samcday.com Sami Yessou fnzv@users.noreply.github.com +Samuel Lang gh@lang-sam.de Samuel Torres samuelpirestorres@gmail.com Sander Timmerman stimmerman@schubergphilis.com Sandipan Panda samparksandipan@gmail.com Sarah Corleissen sarah.corleissen@isovalent.com Sarvesh Rangnekar sarveshr@google.com +Sascha Grunert sgrunert@redhat.com +Satish Matti smatti@google.com Scott Albertson ascottalbertson@gmail.com Sean Winn sean@isovalent.com +Sebastian Gaiser sebastiangaiser@users.noreply.github.com Sebastian Nickel nick@nine.ch Sebastian Rojo arpagon@gmail.com Sebastian Wicki sebastian@isovalent.com +Sebastien Lafond sebastien.lafond@cdiscount.com Sebastien Thomas prune@lecentre.net Sergey Generalov sergey@isovalent.com Sergey Monakhov monakhov@puzl.ee Sergey Shevchenko sergeyshevchdevelop@gmail.com Sergio Ballesteros snaker@locolandia.net +sh2 shawnhxh@outlook.com Shane Utt shaneutt@linux.com +shankeerthan-kasilingam shankeerthan1995@gmail.com Shantanu Deshpande shantanud106@gmail.com +Shardul Srivastava shardul.srivastava007@gmail.com Shunpoco tkngsnsk313320@gmail.com Sigurd Spieckermann sigurd.spieckermann@gmail.com +Simone Magnani simone.magnani@isovalent.com Simone Sciarrati s.sciarrati@gmail.com +Simon Felding 45149055+simonfelding@users.noreply.github.com +Simon Gerber simon.gerber@vshn.ch +Simon Lackerbauer mail@ciil.io Simon Pasquier spasquier@mirantis.com +Sjouke de Vries info@sdvservices.nl +SkalaNetworks contact@skala.network +sknop 118932232+sknop-cgn@users.noreply.github.com Smaine Kahlouch smainklh@gmail.com +soggiest nicholas@isovalent.com +Song 1120344670@qq.com spacewander spacewanderlzx@gmail.com +Sridhar K N Rao sridharkn@u.nus.edu +ssttehrani ssttehrani@gmail.com Stacy Kim stacy.kim@ucla.edu +Stefan Zwanenburg stefan@zwanenburg.info Stephen Martin lockwood@opperline.com +Steve Gargan sgargan@qualtrics.com +Steven Armstrong steven.armstrong@id.ethz.ch Steven Ceuppens steven.ceuppens@icloud.com Steven Dake steven.dake@gmail.com Steven Johnson sjdot@protonmail.com +Steven Kreitzer skre@skre.me Steven Normore snormore@digitalocean.com Steven Shuang stevenshuang521@gmail.com Stevo Slavić sslavic@gmail.com Stijn Smits stijn@stijn98s.nl Strukov Anton anstrukov@luxoft.com Stuart Preston mail@stuartpreston.net +Su Fei sofat1989@126.com Sugang Li sugangli@google.com Sven Haardiek sven.haardiek@uni-muenster.de Swaminathan Vasudevan svasudevan@suse.com Taeung Song treeze.taeung@gmail.com +Taizeng Wu wutaizeng@gmail.com Takayoshi Nishida takayoshi.nishida@gmail.com -Tamilmani tamanoha@microsoft.comwq +Tamilmani tamanoha@microsoft.com Tam Mach tam.mach@cilium.io Tasdik Rahman prodicus@outlook.com +Taylor tskinn12@gmail.com Te-Yu Chang dale.teyuchang@gmail.com Thales Paiva thales@accuknox.com TheAifam5 theaifam5@gmail.com @@ -580,35 +866,54 @@ Thiago Navarro navarro@accuknox.com Thi Van Le vannnyle@gmail.com Thomas Bachman tbachman@yahoo.com Thomas Balthazar thomas@balthazar.info +thomas.chen thomas.chen@trustasia.com Thomas Gosteli thomas.gosteli@protonmail.com Thomas Graf thomas@cilium.io +Thorben von Hacht tvonhacht@apple.com +Thorsten Pfister thorsten.pfister@form3.tech tigerK yanru.lv@daocloud.io +Tilusch til.heini@swisscom.com Tim Horner timothy.horner@isovalent.com Timo Beckers timo@isovalent.com Timo Reimann ttr314@googlemail.com Timur Solodovnikov timur.solodovnikov@clickhouse.com +tkna naoki-take@cybozu.co.jp Tobias Brunner tobias.brunner@vshn.ch Tobias Klauser tobias@cilium.io Tobias Kohlbau tobias@kohlbau.de Tobias Mose mosetobias@gmail.com +Tomas Leypold tomas@leypold.cz Tom Hadlaw tom.hadlaw@isovalent.com -Tomoki Sugiura cheztomo513@gmail.com +Tommo Cowling 952241+tlcowling@users.noreply.github.com +Tomoki Sugiura tomoki-sugiura@cybozu.co.jp Tomoya Fujita Tomoya.Fujita@sony.com -Tom Payne twpayne@gmail.com +Tom Payne tom@isovalent.com +Toni Tauro toni.tauro@adfinis.com Tony Lambiris tony@criticalstack.com Tony Lu tonylu@linux.alibaba.com Tony Norlin tony.norlin@localdomain.se +Torben Tretau torben@tretau.net Tore S. Loenoey tore.lonoy@gmail.com +ToroNZ tomas-github@maggio.nz toVersus toversus2357@gmail.com Travis Glenn Hansen travisghansen@yahoo.com Trevor Roberts Jr Trevor.Roberts.Jr@gmail.com Trevor Tao trevor.tao@arm.com +Tyler Auerbeck tylerauerbeck@users.noreply.github.com +u5surf u5.horie@gmail.com +Ubuntu ubuntu@ip-172-31-10-3.eu-west-3.compute.internal Umesh Keerthy B S umesh.freelance@gmail.com +Umesh Keerthy umesh.freelance@gmail.com +usiegl00 50933431+usiegl00@users.noreply.github.com Vadim Ponomarev velizarx@gmail.com +vakr vakr@microsoft.com Valas Valancius valas@google.com Vance Li vanceli@tencent.com +Vanilla osu_Vanilla@126.com +Vasu Dasari vasudasari@google.com +verysonglaa 39988258+verysonglaa@users.noreply.github.com Vigneshwaren Sunder vickymailed@gmail.com -viktor-kurchenko 69600804+viktor-kurchenko@users.noreply.github.com +Viktor Kurchenko viktor.kurchenko@isovalent.com Viktor Kuzmin kvaster@gmail.com Viktor Oreshkin imselfish@stek29.rocks Ville Ojamo bluikko@users.noreply.github.com @@ -624,15 +929,18 @@ Vlad Ungureanu vladu@palantir.com Wang Dong xdragon007@gmail.com Wang Li wangli09@kuaishou.com Wang Zhen lazybetrayer@gmail.com +Wanlin Du wanlindu@google.com Wayne Haber whaber@gitlab.com Wazir Ahmed wazir@accuknox.com Weilong Cui cuiwl@google.com Wei Yang wei.yang@daocloud.io Weizhou Lan weizhou.lan@daocloud.io Wenhu Wang wang15691700816@gmail.com +wenlxie xwlpt@126.com Wenxian Li wofanli@gmail.com Will Daly widaly@microsoft.com Will Deuschle wdeuschle@palantir.com +Willi Eggeling willi.eggeling@cloutomate.de Will Stewart will@northflank.com Wojtek Czekalski me@wczekalski.com Wongyu Lee kyu21@outlook.com @@ -641,12 +949,16 @@ Xiaoqing xiaoqingnb@gmail.com Xiaoyang Zhu zhuxiaoyang1996@gmail.com XiaozhiD-web chuanzhi.dai@daocloud.io Xin Li xin.li@daocloud.io +xinwenqiang xinwenqiang@bytedance.com Xinyuan Zhang zhangxinyuan@google.com yanggang gang.yang@daocloud.io yanhongchang yanhongchang@100tal.com +Yann ILAS yann.ilas@gmail.com Yash Shetty yashshetty@google.com Ye Sijun junnplus@gmail.com Yiannis Yiakoumis yiannis@selfienetworks.com +Yingnan Zhang 342144303@qq.com +yogesh1801 yogeshsingla481@gmail.com Yongkun Gui ygui@google.com Yosh de Vos yosh@elzorro.nl youhonglian honglian.you@daocloud.io @@ -657,16 +969,23 @@ Yugo Kobayashi kobdotsh@gmail.com yulng wei.yang@daocloud.io Yurii Dzobak yurii.dzobak@lotusflare.com Yurii Komar Subreptivus@gmail.com -Yusuke Suzuki yusuke-suzuki@cybozu.co.jp +Yusho Yamaguchi yusho.yamaguchi@sony.com +Yusuke Suzuki yusuke.suzuki@isovalent.com Yutaro Hayakawa yutaro.hayakawa@isovalent.com Yves Blusseau yves.blusseau@acoss.fr +yylt yang8518296@163.com Zang Li zangli@google.com zhanghe9702 zhanghe9702@163.com Zhang Qiang qiangzhang@qiyi.com +zhaojizhuang 571130360@qq.com +zhikuodu duzhk@qq.com Zhiyuan Hou zhiyuan2048@linux.alibaba.com zhouhaibing089 zhouhaibing089@gmail.com Zhu Yan hackzhuyan@gmail.com +Zijian Zhang zz2795@columbia.edu Zisis Lianas zl@consol.de +zufardhiyaulhaq zufardhiyaulhaq@gmail.com +尤理衡 (Li-Heng Yu) 007seadog@gmail.com The following additional people are mentioned in commit logs as having provided helpful bug reports, suggestions or have otherwise provided value to the diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go index 194e031d02..ad99fc8d06 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/bgp_client.go @@ -12,6 +12,7 @@ import ( "fmt" "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -20,6 +21,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new bgp API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new bgp API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for bgp API */ @@ -28,13 +54,17 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods type ClientService interface { GetBgpPeers(params *GetBgpPeersParams, opts ...ClientOption) (*GetBgpPeersOK, error) + GetBgpRoutePolicies(params *GetBgpRoutePoliciesParams, opts ...ClientOption) (*GetBgpRoutePoliciesOK, error) + + GetBgpRoutes(params *GetBgpRoutesParams, opts ...ClientOption) (*GetBgpRoutesOK, error) + SetTransport(transport runtime.ClientTransport) } @@ -81,6 +111,86 @@ func (a *Client) GetBgpPeers(params *GetBgpPeersParams, opts ...ClientOption) (* panic(msg) } +/* +GetBgpRoutePolicies lists b g p route policies configured in b g p control plane + +Retrieves route policies from BGP Control Plane. +*/ +func (a *Client) GetBgpRoutePolicies(params *GetBgpRoutePoliciesParams, opts ...ClientOption) (*GetBgpRoutePoliciesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetBgpRoutePoliciesParams() + } + op := &runtime.ClientOperation{ + ID: "GetBgpRoutePolicies", + Method: "GET", + PathPattern: "/bgp/route-policies", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetBgpRoutePoliciesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetBgpRoutePoliciesOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for GetBgpRoutePolicies: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + +/* +GetBgpRoutes lists b g p routes from b g p control plane r i b + +Retrieves routes from BGP Control Plane RIB filtered by parameters you specify +*/ +func (a *Client) GetBgpRoutes(params *GetBgpRoutesParams, opts ...ClientOption) (*GetBgpRoutesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGetBgpRoutesParams() + } + op := &runtime.ClientOperation{ + ID: "GetBgpRoutes", + Method: "GET", + PathPattern: "/bgp/routes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GetBgpRoutesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*GetBgpRoutesOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for GetBgpRoutes: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + // SetTransport changes the transport on the client func (a *Client) SetTransport(transport runtime.ClientTransport) { a.transport = transport diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go index f06304ce69..23bf551240 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_peers_responses.go @@ -9,6 +9,7 @@ package bgp // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -32,8 +33,20 @@ func (o *GetBgpPeersReader) ReadResponse(response runtime.ClientResponse, consum return nil, err } return result, nil + case 500: + result := NewGetBgpPeersInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewGetBgpPeersDisabled() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /bgp/peers] GetBgpPeers", response, response.Code()) } } @@ -76,12 +89,19 @@ func (o *GetBgpPeersOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get bgp peers o k response +func (o *GetBgpPeersOK) Code() int { + return 200 +} + func (o *GetBgpPeersOK) Error() string { - return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersOK %s", 200, payload) } func (o *GetBgpPeersOK) String() string { - return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersOK %s", 200, payload) } func (o *GetBgpPeersOK) GetPayload() []*models.BgpPeer { @@ -97,3 +117,139 @@ func (o *GetBgpPeersOK) readResponse(response runtime.ClientResponse, consumer r return nil } + +// NewGetBgpPeersInternalServerError creates a GetBgpPeersInternalServerError with default headers values +func NewGetBgpPeersInternalServerError() *GetBgpPeersInternalServerError { + return &GetBgpPeersInternalServerError{} +} + +/* +GetBgpPeersInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type GetBgpPeersInternalServerError struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp peers internal server error response has a 2xx status code +func (o *GetBgpPeersInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp peers internal server error response has a 3xx status code +func (o *GetBgpPeersInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp peers internal server error response has a 4xx status code +func (o *GetBgpPeersInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp peers internal server error response has a 5xx status code +func (o *GetBgpPeersInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp peers internal server error response a status code equal to that given +func (o *GetBgpPeersInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get bgp peers internal server error response +func (o *GetBgpPeersInternalServerError) Code() int { + return 500 +} + +func (o *GetBgpPeersInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersInternalServerError %s", 500, payload) +} + +func (o *GetBgpPeersInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersInternalServerError %s", 500, payload) +} + +func (o *GetBgpPeersInternalServerError) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpPeersInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpPeersDisabled creates a GetBgpPeersDisabled with default headers values +func NewGetBgpPeersDisabled() *GetBgpPeersDisabled { + return &GetBgpPeersDisabled{} +} + +/* +GetBgpPeersDisabled describes a response with status code 501, with default header values. + +BGP Control Plane disabled +*/ +type GetBgpPeersDisabled struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp peers disabled response has a 2xx status code +func (o *GetBgpPeersDisabled) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp peers disabled response has a 3xx status code +func (o *GetBgpPeersDisabled) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp peers disabled response has a 4xx status code +func (o *GetBgpPeersDisabled) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp peers disabled response has a 5xx status code +func (o *GetBgpPeersDisabled) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp peers disabled response a status code equal to that given +func (o *GetBgpPeersDisabled) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the get bgp peers disabled response +func (o *GetBgpPeersDisabled) Code() int { + return 501 +} + +func (o *GetBgpPeersDisabled) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersDisabled %s", 501, payload) +} + +func (o *GetBgpPeersDisabled) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/peers][%d] getBgpPeersDisabled %s", 501, payload) +} + +func (o *GetBgpPeersDisabled) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpPeersDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_parameters.go new file mode 100644 index 0000000000..db2421c1a4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_parameters.go @@ -0,0 +1,169 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package bgp + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetBgpRoutePoliciesParams creates a new GetBgpRoutePoliciesParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetBgpRoutePoliciesParams() *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetBgpRoutePoliciesParamsWithTimeout creates a new GetBgpRoutePoliciesParams object +// with the ability to set a timeout on a request. +func NewGetBgpRoutePoliciesParamsWithTimeout(timeout time.Duration) *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + timeout: timeout, + } +} + +// NewGetBgpRoutePoliciesParamsWithContext creates a new GetBgpRoutePoliciesParams object +// with the ability to set a context for a request. +func NewGetBgpRoutePoliciesParamsWithContext(ctx context.Context) *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + Context: ctx, + } +} + +// NewGetBgpRoutePoliciesParamsWithHTTPClient creates a new GetBgpRoutePoliciesParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetBgpRoutePoliciesParamsWithHTTPClient(client *http.Client) *GetBgpRoutePoliciesParams { + return &GetBgpRoutePoliciesParams{ + HTTPClient: client, + } +} + +/* +GetBgpRoutePoliciesParams contains all the parameters to send to the API endpoint + + for the get bgp route policies operation. + + Typically these are written to a http.Request. +*/ +type GetBgpRoutePoliciesParams struct { + + /* RouterAsn. + + Autonomous System Number (ASN) identifying a BGP virtual router instance. + If not specified, all virtual router instances are selected. + + */ + RouterAsn *int64 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get bgp route policies params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetBgpRoutePoliciesParams) WithDefaults() *GetBgpRoutePoliciesParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get bgp route policies params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetBgpRoutePoliciesParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithTimeout(timeout time.Duration) *GetBgpRoutePoliciesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithContext(ctx context.Context) *GetBgpRoutePoliciesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithHTTPClient(client *http.Client) *GetBgpRoutePoliciesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRouterAsn adds the routerAsn to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) WithRouterAsn(routerAsn *int64) *GetBgpRoutePoliciesParams { + o.SetRouterAsn(routerAsn) + return o +} + +// SetRouterAsn adds the routerAsn to the get bgp route policies params +func (o *GetBgpRoutePoliciesParams) SetRouterAsn(routerAsn *int64) { + o.RouterAsn = routerAsn +} + +// WriteToRequest writes these params to a swagger request +func (o *GetBgpRoutePoliciesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.RouterAsn != nil { + + // query param router_asn + var qrRouterAsn int64 + + if o.RouterAsn != nil { + qrRouterAsn = *o.RouterAsn + } + qRouterAsn := swag.FormatInt64(qrRouterAsn) + if qRouterAsn != "" { + + if err := r.SetQueryParam("router_asn", qRouterAsn); err != nil { + return err + } + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_responses.go new file mode 100644 index 0000000000..1340be6e15 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_route_policies_responses.go @@ -0,0 +1,255 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package bgp + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" +) + +// GetBgpRoutePoliciesReader is a Reader for the GetBgpRoutePolicies structure. +type GetBgpRoutePoliciesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetBgpRoutePoliciesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetBgpRoutePoliciesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 500: + result := NewGetBgpRoutePoliciesInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewGetBgpRoutePoliciesDisabled() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[GET /bgp/route-policies] GetBgpRoutePolicies", response, response.Code()) + } +} + +// NewGetBgpRoutePoliciesOK creates a GetBgpRoutePoliciesOK with default headers values +func NewGetBgpRoutePoliciesOK() *GetBgpRoutePoliciesOK { + return &GetBgpRoutePoliciesOK{} +} + +/* +GetBgpRoutePoliciesOK describes a response with status code 200, with default header values. + +Success +*/ +type GetBgpRoutePoliciesOK struct { + Payload []*models.BgpRoutePolicy +} + +// IsSuccess returns true when this get bgp route policies o k response has a 2xx status code +func (o *GetBgpRoutePoliciesOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get bgp route policies o k response has a 3xx status code +func (o *GetBgpRoutePoliciesOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp route policies o k response has a 4xx status code +func (o *GetBgpRoutePoliciesOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp route policies o k response has a 5xx status code +func (o *GetBgpRoutePoliciesOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get bgp route policies o k response a status code equal to that given +func (o *GetBgpRoutePoliciesOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get bgp route policies o k response +func (o *GetBgpRoutePoliciesOK) Code() int { + return 200 +} + +func (o *GetBgpRoutePoliciesOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesOK %s", 200, payload) +} + +func (o *GetBgpRoutePoliciesOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesOK %s", 200, payload) +} + +func (o *GetBgpRoutePoliciesOK) GetPayload() []*models.BgpRoutePolicy { + return o.Payload +} + +func (o *GetBgpRoutePoliciesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpRoutePoliciesInternalServerError creates a GetBgpRoutePoliciesInternalServerError with default headers values +func NewGetBgpRoutePoliciesInternalServerError() *GetBgpRoutePoliciesInternalServerError { + return &GetBgpRoutePoliciesInternalServerError{} +} + +/* +GetBgpRoutePoliciesInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type GetBgpRoutePoliciesInternalServerError struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp route policies internal server error response has a 2xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp route policies internal server error response has a 3xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp route policies internal server error response has a 4xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp route policies internal server error response has a 5xx status code +func (o *GetBgpRoutePoliciesInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp route policies internal server error response a status code equal to that given +func (o *GetBgpRoutePoliciesInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get bgp route policies internal server error response +func (o *GetBgpRoutePoliciesInternalServerError) Code() int { + return 500 +} + +func (o *GetBgpRoutePoliciesInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesInternalServerError %s", 500, payload) +} + +func (o *GetBgpRoutePoliciesInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesInternalServerError %s", 500, payload) +} + +func (o *GetBgpRoutePoliciesInternalServerError) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpRoutePoliciesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpRoutePoliciesDisabled creates a GetBgpRoutePoliciesDisabled with default headers values +func NewGetBgpRoutePoliciesDisabled() *GetBgpRoutePoliciesDisabled { + return &GetBgpRoutePoliciesDisabled{} +} + +/* +GetBgpRoutePoliciesDisabled describes a response with status code 501, with default header values. + +BGP Control Plane disabled +*/ +type GetBgpRoutePoliciesDisabled struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp route policies disabled response has a 2xx status code +func (o *GetBgpRoutePoliciesDisabled) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp route policies disabled response has a 3xx status code +func (o *GetBgpRoutePoliciesDisabled) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp route policies disabled response has a 4xx status code +func (o *GetBgpRoutePoliciesDisabled) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp route policies disabled response has a 5xx status code +func (o *GetBgpRoutePoliciesDisabled) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp route policies disabled response a status code equal to that given +func (o *GetBgpRoutePoliciesDisabled) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the get bgp route policies disabled response +func (o *GetBgpRoutePoliciesDisabled) Code() int { + return 501 +} + +func (o *GetBgpRoutePoliciesDisabled) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesDisabled %s", 501, payload) +} + +func (o *GetBgpRoutePoliciesDisabled) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/route-policies][%d] getBgpRoutePoliciesDisabled %s", 501, payload) +} + +func (o *GetBgpRoutePoliciesDisabled) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpRoutePoliciesDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_parameters.go new file mode 100644 index 0000000000..9fa279b03d --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_parameters.go @@ -0,0 +1,286 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package bgp + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGetBgpRoutesParams creates a new GetBgpRoutesParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewGetBgpRoutesParams() *GetBgpRoutesParams { + return &GetBgpRoutesParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewGetBgpRoutesParamsWithTimeout creates a new GetBgpRoutesParams object +// with the ability to set a timeout on a request. +func NewGetBgpRoutesParamsWithTimeout(timeout time.Duration) *GetBgpRoutesParams { + return &GetBgpRoutesParams{ + timeout: timeout, + } +} + +// NewGetBgpRoutesParamsWithContext creates a new GetBgpRoutesParams object +// with the ability to set a context for a request. +func NewGetBgpRoutesParamsWithContext(ctx context.Context) *GetBgpRoutesParams { + return &GetBgpRoutesParams{ + Context: ctx, + } +} + +// NewGetBgpRoutesParamsWithHTTPClient creates a new GetBgpRoutesParams object +// with the ability to set a custom HTTPClient for a request. +func NewGetBgpRoutesParamsWithHTTPClient(client *http.Client) *GetBgpRoutesParams { + return &GetBgpRoutesParams{ + HTTPClient: client, + } +} + +/* +GetBgpRoutesParams contains all the parameters to send to the API endpoint + + for the get bgp routes operation. + + Typically these are written to a http.Request. +*/ +type GetBgpRoutesParams struct { + + /* Afi. + + Address Family Indicator (AFI) of a BGP route + */ + Afi string + + /* Neighbor. + + IP address specifying a BGP neighbor. + Has to be specified only when table type is adj-rib-in or adj-rib-out. + + */ + Neighbor *string + + /* RouterAsn. + + Autonomous System Number (ASN) identifying a BGP virtual router instance. + If not specified, all virtual router instances are selected. + + */ + RouterAsn *int64 + + /* Safi. + + Subsequent Address Family Indicator (SAFI) of a BGP route + */ + Safi string + + /* TableType. + + BGP Routing Information Base (RIB) table type + */ + TableType string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the get bgp routes params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetBgpRoutesParams) WithDefaults() *GetBgpRoutesParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the get bgp routes params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *GetBgpRoutesParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the get bgp routes params +func (o *GetBgpRoutesParams) WithTimeout(timeout time.Duration) *GetBgpRoutesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the get bgp routes params +func (o *GetBgpRoutesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the get bgp routes params +func (o *GetBgpRoutesParams) WithContext(ctx context.Context) *GetBgpRoutesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the get bgp routes params +func (o *GetBgpRoutesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the get bgp routes params +func (o *GetBgpRoutesParams) WithHTTPClient(client *http.Client) *GetBgpRoutesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the get bgp routes params +func (o *GetBgpRoutesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAfi adds the afi to the get bgp routes params +func (o *GetBgpRoutesParams) WithAfi(afi string) *GetBgpRoutesParams { + o.SetAfi(afi) + return o +} + +// SetAfi adds the afi to the get bgp routes params +func (o *GetBgpRoutesParams) SetAfi(afi string) { + o.Afi = afi +} + +// WithNeighbor adds the neighbor to the get bgp routes params +func (o *GetBgpRoutesParams) WithNeighbor(neighbor *string) *GetBgpRoutesParams { + o.SetNeighbor(neighbor) + return o +} + +// SetNeighbor adds the neighbor to the get bgp routes params +func (o *GetBgpRoutesParams) SetNeighbor(neighbor *string) { + o.Neighbor = neighbor +} + +// WithRouterAsn adds the routerAsn to the get bgp routes params +func (o *GetBgpRoutesParams) WithRouterAsn(routerAsn *int64) *GetBgpRoutesParams { + o.SetRouterAsn(routerAsn) + return o +} + +// SetRouterAsn adds the routerAsn to the get bgp routes params +func (o *GetBgpRoutesParams) SetRouterAsn(routerAsn *int64) { + o.RouterAsn = routerAsn +} + +// WithSafi adds the safi to the get bgp routes params +func (o *GetBgpRoutesParams) WithSafi(safi string) *GetBgpRoutesParams { + o.SetSafi(safi) + return o +} + +// SetSafi adds the safi to the get bgp routes params +func (o *GetBgpRoutesParams) SetSafi(safi string) { + o.Safi = safi +} + +// WithTableType adds the tableType to the get bgp routes params +func (o *GetBgpRoutesParams) WithTableType(tableType string) *GetBgpRoutesParams { + o.SetTableType(tableType) + return o +} + +// SetTableType adds the tableType to the get bgp routes params +func (o *GetBgpRoutesParams) SetTableType(tableType string) { + o.TableType = tableType +} + +// WriteToRequest writes these params to a swagger request +func (o *GetBgpRoutesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param afi + qrAfi := o.Afi + qAfi := qrAfi + if qAfi != "" { + + if err := r.SetQueryParam("afi", qAfi); err != nil { + return err + } + } + + if o.Neighbor != nil { + + // query param neighbor + var qrNeighbor string + + if o.Neighbor != nil { + qrNeighbor = *o.Neighbor + } + qNeighbor := qrNeighbor + if qNeighbor != "" { + + if err := r.SetQueryParam("neighbor", qNeighbor); err != nil { + return err + } + } + } + + if o.RouterAsn != nil { + + // query param router_asn + var qrRouterAsn int64 + + if o.RouterAsn != nil { + qrRouterAsn = *o.RouterAsn + } + qRouterAsn := swag.FormatInt64(qrRouterAsn) + if qRouterAsn != "" { + + if err := r.SetQueryParam("router_asn", qRouterAsn); err != nil { + return err + } + } + } + + // query param safi + qrSafi := o.Safi + qSafi := qrSafi + if qSafi != "" { + + if err := r.SetQueryParam("safi", qSafi); err != nil { + return err + } + } + + // query param table_type + qrTableType := o.TableType + qTableType := qrTableType + if qTableType != "" { + + if err := r.SetQueryParam("table_type", qTableType); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_responses.go new file mode 100644 index 0000000000..8f26c2f219 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/bgp/get_bgp_routes_responses.go @@ -0,0 +1,255 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package bgp + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" +) + +// GetBgpRoutesReader is a Reader for the GetBgpRoutes structure. +type GetBgpRoutesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GetBgpRoutesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGetBgpRoutesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 500: + result := NewGetBgpRoutesInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 501: + result := NewGetBgpRoutesDisabled() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[GET /bgp/routes] GetBgpRoutes", response, response.Code()) + } +} + +// NewGetBgpRoutesOK creates a GetBgpRoutesOK with default headers values +func NewGetBgpRoutesOK() *GetBgpRoutesOK { + return &GetBgpRoutesOK{} +} + +/* +GetBgpRoutesOK describes a response with status code 200, with default header values. + +Success +*/ +type GetBgpRoutesOK struct { + Payload []*models.BgpRoute +} + +// IsSuccess returns true when this get bgp routes o k response has a 2xx status code +func (o *GetBgpRoutesOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this get bgp routes o k response has a 3xx status code +func (o *GetBgpRoutesOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp routes o k response has a 4xx status code +func (o *GetBgpRoutesOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp routes o k response has a 5xx status code +func (o *GetBgpRoutesOK) IsServerError() bool { + return false +} + +// IsCode returns true when this get bgp routes o k response a status code equal to that given +func (o *GetBgpRoutesOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the get bgp routes o k response +func (o *GetBgpRoutesOK) Code() int { + return 200 +} + +func (o *GetBgpRoutesOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesOK %s", 200, payload) +} + +func (o *GetBgpRoutesOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesOK %s", 200, payload) +} + +func (o *GetBgpRoutesOK) GetPayload() []*models.BgpRoute { + return o.Payload +} + +func (o *GetBgpRoutesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpRoutesInternalServerError creates a GetBgpRoutesInternalServerError with default headers values +func NewGetBgpRoutesInternalServerError() *GetBgpRoutesInternalServerError { + return &GetBgpRoutesInternalServerError{} +} + +/* +GetBgpRoutesInternalServerError describes a response with status code 500, with default header values. + +Internal Server Error +*/ +type GetBgpRoutesInternalServerError struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp routes internal server error response has a 2xx status code +func (o *GetBgpRoutesInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp routes internal server error response has a 3xx status code +func (o *GetBgpRoutesInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp routes internal server error response has a 4xx status code +func (o *GetBgpRoutesInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp routes internal server error response has a 5xx status code +func (o *GetBgpRoutesInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp routes internal server error response a status code equal to that given +func (o *GetBgpRoutesInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the get bgp routes internal server error response +func (o *GetBgpRoutesInternalServerError) Code() int { + return 500 +} + +func (o *GetBgpRoutesInternalServerError) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesInternalServerError %s", 500, payload) +} + +func (o *GetBgpRoutesInternalServerError) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesInternalServerError %s", 500, payload) +} + +func (o *GetBgpRoutesInternalServerError) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpRoutesInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGetBgpRoutesDisabled creates a GetBgpRoutesDisabled with default headers values +func NewGetBgpRoutesDisabled() *GetBgpRoutesDisabled { + return &GetBgpRoutesDisabled{} +} + +/* +GetBgpRoutesDisabled describes a response with status code 501, with default header values. + +BGP Control Plane disabled +*/ +type GetBgpRoutesDisabled struct { + Payload models.Error +} + +// IsSuccess returns true when this get bgp routes disabled response has a 2xx status code +func (o *GetBgpRoutesDisabled) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this get bgp routes disabled response has a 3xx status code +func (o *GetBgpRoutesDisabled) IsRedirect() bool { + return false +} + +// IsClientError returns true when this get bgp routes disabled response has a 4xx status code +func (o *GetBgpRoutesDisabled) IsClientError() bool { + return false +} + +// IsServerError returns true when this get bgp routes disabled response has a 5xx status code +func (o *GetBgpRoutesDisabled) IsServerError() bool { + return true +} + +// IsCode returns true when this get bgp routes disabled response a status code equal to that given +func (o *GetBgpRoutesDisabled) IsCode(code int) bool { + return code == 501 +} + +// Code gets the status code for the get bgp routes disabled response +func (o *GetBgpRoutesDisabled) Code() int { + return 501 +} + +func (o *GetBgpRoutesDisabled) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesDisabled %s", 501, payload) +} + +func (o *GetBgpRoutesDisabled) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /bgp/routes][%d] getBgpRoutesDisabled %s", 501, payload) +} + +func (o *GetBgpRoutesDisabled) GetPayload() models.Error { + return o.Payload +} + +func (o *GetBgpRoutesDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/cilium_api_client.go b/vendor/github.com/cilium/cilium/api/v1/client/cilium_api_client.go index 109454afa9..35b33e5386 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/cilium_api_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/cilium_api_client.go @@ -17,12 +17,10 @@ import ( "github.com/cilium/cilium/api/v1/client/daemon" "github.com/cilium/cilium/api/v1/client/endpoint" "github.com/cilium/cilium/api/v1/client/ipam" - "github.com/cilium/cilium/api/v1/client/metrics" "github.com/cilium/cilium/api/v1/client/policy" "github.com/cilium/cilium/api/v1/client/prefilter" "github.com/cilium/cilium/api/v1/client/recorder" "github.com/cilium/cilium/api/v1/client/service" - "github.com/cilium/cilium/api/v1/client/statedb" ) // Default cilium API HTTP client. @@ -71,12 +69,10 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) *CiliumAPI cli.Daemon = daemon.New(transport, formats) cli.Endpoint = endpoint.New(transport, formats) cli.Ipam = ipam.New(transport, formats) - cli.Metrics = metrics.New(transport, formats) cli.Policy = policy.New(transport, formats) cli.Prefilter = prefilter.New(transport, formats) cli.Recorder = recorder.New(transport, formats) cli.Service = service.New(transport, formats) - cli.Statedb = statedb.New(transport, formats) return cli } @@ -129,8 +125,6 @@ type CiliumAPI struct { Ipam ipam.ClientService - Metrics metrics.ClientService - Policy policy.ClientService Prefilter prefilter.ClientService @@ -139,8 +133,6 @@ type CiliumAPI struct { Service service.ClientService - Statedb statedb.ClientService - Transport runtime.ClientTransport } @@ -151,10 +143,8 @@ func (c *CiliumAPI) SetTransport(transport runtime.ClientTransport) { c.Daemon.SetTransport(transport) c.Endpoint.SetTransport(transport) c.Ipam.SetTransport(transport) - c.Metrics.SetTransport(transport) c.Policy.SetTransport(transport) c.Prefilter.SetTransport(transport) c.Recorder.SetTransport(transport) c.Service.SetTransport(transport) - c.Statedb.SetTransport(transport) } diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go index 0f824d32b6..8a5d78313b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go @@ -13,6 +13,7 @@ import ( "io" "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -21,6 +22,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new daemon API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new daemon API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for daemon API */ @@ -29,7 +55,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods @@ -174,7 +200,7 @@ func (a *Client) GetConfig(params *GetConfigParams, opts ...ClientOption) (*GetC } /* -GetDebuginfo retrieves information about the agent and evironment for debugging +GetDebuginfo retrieves information about the agent and environment for debugging */ func (a *Client) GetDebuginfo(params *GetDebuginfoParams, opts ...ClientOption) (*GetDebuginfoOK, error) { // TODO: Validate the params before sending diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_responses.go index 3c16b36970..dfbfabca5f 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_responses.go @@ -9,6 +9,7 @@ package daemon // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -39,7 +40,7 @@ func (o *GetCgroupDumpMetadataReader) ReadResponse(response runtime.ClientRespon } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /cgroup-dump-metadata] GetCgroupDumpMetadata", response, response.Code()) } } @@ -82,12 +83,19 @@ func (o *GetCgroupDumpMetadataOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get cgroup dump metadata o k response +func (o *GetCgroupDumpMetadataOK) Code() int { + return 200 +} + func (o *GetCgroupDumpMetadataOK) Error() string { - return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataOK %s", 200, payload) } func (o *GetCgroupDumpMetadataOK) String() string { - return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataOK %s", 200, payload) } func (o *GetCgroupDumpMetadataOK) GetPayload() *models.CgroupDumpMetadata { @@ -145,12 +153,19 @@ func (o *GetCgroupDumpMetadataFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get cgroup dump metadata failure response +func (o *GetCgroupDumpMetadataFailure) Code() int { + return 500 +} + func (o *GetCgroupDumpMetadataFailure) Error() string { - return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataFailure %s", 500, payload) } func (o *GetCgroupDumpMetadataFailure) String() string { - return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataFailure %s", 500, payload) } func (o *GetCgroupDumpMetadataFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_responses.go index 2f767bbf53..fcbf069a66 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_responses.go @@ -9,6 +9,7 @@ package daemon // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetClusterNodesReader) ReadResponse(response runtime.ClientResponse, co } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /cluster/nodes] GetClusterNodes", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetClusterNodesOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get cluster nodes o k response +func (o *GetClusterNodesOK) Code() int { + return 200 +} + func (o *GetClusterNodesOK) Error() string { - return fmt.Sprintf("[GET /cluster/nodes][%d] getClusterNodesOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /cluster/nodes][%d] getClusterNodesOK %s", 200, payload) } func (o *GetClusterNodesOK) String() string { - return fmt.Sprintf("[GET /cluster/nodes][%d] getClusterNodesOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /cluster/nodes][%d] getClusterNodesOK %s", 200, payload) } func (o *GetClusterNodesOK) GetPayload() *models.ClusterNodeStatus { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_responses.go index 10da9ace52..273684b620 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_responses.go @@ -9,6 +9,7 @@ package daemon // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetConfigReader) ReadResponse(response runtime.ClientResponse, consumer } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /config] GetConfig", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetConfigOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get config o k response +func (o *GetConfigOK) Code() int { + return 200 +} + func (o *GetConfigOK) Error() string { - return fmt.Sprintf("[GET /config][%d] getConfigOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /config][%d] getConfigOK %s", 200, payload) } func (o *GetConfigOK) String() string { - return fmt.Sprintf("[GET /config][%d] getConfigOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /config][%d] getConfigOK %s", 200, payload) } func (o *GetConfigOK) GetPayload() *models.DaemonConfiguration { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_responses.go index a7fa07a316..8572b5b59a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_responses.go @@ -9,6 +9,7 @@ package daemon // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -39,7 +40,7 @@ func (o *GetDebuginfoReader) ReadResponse(response runtime.ClientResponse, consu } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /debuginfo] GetDebuginfo", response, response.Code()) } } @@ -82,12 +83,19 @@ func (o *GetDebuginfoOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get debuginfo o k response +func (o *GetDebuginfoOK) Code() int { + return 200 +} + func (o *GetDebuginfoOK) Error() string { - return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoOK %s", 200, payload) } func (o *GetDebuginfoOK) String() string { - return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoOK %s", 200, payload) } func (o *GetDebuginfoOK) GetPayload() *models.DebugInfo { @@ -145,12 +153,19 @@ func (o *GetDebuginfoFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get debuginfo failure response +func (o *GetDebuginfoFailure) Code() int { + return 500 +} + func (o *GetDebuginfoFailure) Error() string { - return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoFailure %s", 500, payload) } func (o *GetDebuginfoFailure) String() string { - return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoFailure %s", 500, payload) } func (o *GetDebuginfoFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_parameters.go index 235c69da35..c2c5b08f6d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_parameters.go @@ -72,6 +72,15 @@ type GetHealthzParams struct { */ Brief *bool + /* RequireK8sConnectivity. + + If set to true, failure of the agent to connect to the Kubernetes control plane will cause the agent's health status to also fail. + + + Default: true + */ + RequireK8sConnectivity *bool + timeout time.Duration Context context.Context HTTPClient *http.Client @@ -89,7 +98,18 @@ func (o *GetHealthzParams) WithDefaults() *GetHealthzParams { // // All values with no default are reset to their zero value. func (o *GetHealthzParams) SetDefaults() { - // no default values defined for this parameter + var ( + requireK8sConnectivityDefault = bool(true) + ) + + val := GetHealthzParams{ + RequireK8sConnectivity: &requireK8sConnectivityDefault, + } + + val.timeout = o.timeout + val.Context = o.Context + val.HTTPClient = o.HTTPClient + *o = val } // WithTimeout adds the timeout to the get healthz params @@ -136,6 +156,17 @@ func (o *GetHealthzParams) SetBrief(brief *bool) { o.Brief = brief } +// WithRequireK8sConnectivity adds the requireK8sConnectivity to the get healthz params +func (o *GetHealthzParams) WithRequireK8sConnectivity(requireK8sConnectivity *bool) *GetHealthzParams { + o.SetRequireK8sConnectivity(requireK8sConnectivity) + return o +} + +// SetRequireK8sConnectivity adds the requireK8sConnectivity to the get healthz params +func (o *GetHealthzParams) SetRequireK8sConnectivity(requireK8sConnectivity *bool) { + o.RequireK8sConnectivity = requireK8sConnectivity +} + // WriteToRequest writes these params to a swagger request func (o *GetHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { @@ -152,6 +183,14 @@ func (o *GetHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Re } } + if o.RequireK8sConnectivity != nil { + + // header param require-k8s-connectivity + if err := r.SetHeaderParam("require-k8s-connectivity", swag.FormatBool(*o.RequireK8sConnectivity)); err != nil { + return err + } + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_responses.go index c3b73feea0..51bff203a4 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_responses.go @@ -9,6 +9,7 @@ package daemon // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetHealthzReader) ReadResponse(response runtime.ClientResponse, consume } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /healthz] GetHealthz", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetHealthzOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get healthz o k response +func (o *GetHealthzOK) Code() int { + return 200 +} + func (o *GetHealthzOK) Error() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %s", 200, payload) } func (o *GetHealthzOK) String() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %s", 200, payload) } func (o *GetHealthzOK) GetPayload() *models.StatusResponse { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_responses.go index 6dc187478a..74e04e4b07 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_responses.go @@ -38,7 +38,7 @@ func (o *GetMapNameEventsReader) ReadResponse(response runtime.ClientResponse, c } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /map/{name}/events] GetMapNameEvents", response, response.Code()) } } @@ -84,12 +84,17 @@ func (o *GetMapNameEventsOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get map name events o k response +func (o *GetMapNameEventsOK) Code() int { + return 200 +} + func (o *GetMapNameEventsOK) Error() string { - return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsOK %+v", 200, o.Payload) + return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsOK", 200) } func (o *GetMapNameEventsOK) String() string { - return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsOK %+v", 200, o.Payload) + return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsOK", 200) } func (o *GetMapNameEventsOK) GetPayload() io.Writer { @@ -144,12 +149,17 @@ func (o *GetMapNameEventsNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get map name events not found response +func (o *GetMapNameEventsNotFound) Code() int { + return 404 +} + func (o *GetMapNameEventsNotFound) Error() string { - return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsNotFound ", 404) + return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsNotFound", 404) } func (o *GetMapNameEventsNotFound) String() string { - return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsNotFound ", 404) + return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsNotFound", 404) } func (o *GetMapNameEventsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_responses.go index f03de4ce30..9cb02fc764 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_responses.go @@ -9,6 +9,7 @@ package daemon // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -39,7 +40,7 @@ func (o *GetMapNameReader) ReadResponse(response runtime.ClientResponse, consume } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /map/{name}] GetMapName", response, response.Code()) } } @@ -82,12 +83,19 @@ func (o *GetMapNameOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get map name o k response +func (o *GetMapNameOK) Code() int { + return 200 +} + func (o *GetMapNameOK) Error() string { - return fmt.Sprintf("[GET /map/{name}][%d] getMapNameOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /map/{name}][%d] getMapNameOK %s", 200, payload) } func (o *GetMapNameOK) String() string { - return fmt.Sprintf("[GET /map/{name}][%d] getMapNameOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /map/{name}][%d] getMapNameOK %s", 200, payload) } func (o *GetMapNameOK) GetPayload() *models.BPFMap { @@ -144,12 +152,17 @@ func (o *GetMapNameNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get map name not found response +func (o *GetMapNameNotFound) Code() int { + return 404 +} + func (o *GetMapNameNotFound) Error() string { - return fmt.Sprintf("[GET /map/{name}][%d] getMapNameNotFound ", 404) + return fmt.Sprintf("[GET /map/{name}][%d] getMapNameNotFound", 404) } func (o *GetMapNameNotFound) String() string { - return fmt.Sprintf("[GET /map/{name}][%d] getMapNameNotFound ", 404) + return fmt.Sprintf("[GET /map/{name}][%d] getMapNameNotFound", 404) } func (o *GetMapNameNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_responses.go index dae2f2cf52..fee5e2b6fa 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_responses.go @@ -9,6 +9,7 @@ package daemon // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetMapReader) ReadResponse(response runtime.ClientResponse, consumer ru } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /map] GetMap", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetMapOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get map o k response +func (o *GetMapOK) Code() int { + return 200 +} + func (o *GetMapOK) Error() string { - return fmt.Sprintf("[GET /map][%d] getMapOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /map][%d] getMapOK %s", 200, payload) } func (o *GetMapOK) String() string { - return fmt.Sprintf("[GET /map][%d] getMapOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /map][%d] getMapOK %s", 200, payload) } func (o *GetMapOK) GetPayload() *models.BPFMapList { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_responses.go index 1b4131485a..6528e7e35c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_responses.go @@ -9,6 +9,7 @@ package daemon // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetNodeIdsReader) ReadResponse(response runtime.ClientResponse, consume } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /node/ids] GetNodeIds", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetNodeIdsOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get node ids o k response +func (o *GetNodeIdsOK) Code() int { + return 200 +} + func (o *GetNodeIdsOK) Error() string { - return fmt.Sprintf("[GET /node/ids][%d] getNodeIdsOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /node/ids][%d] getNodeIdsOK %s", 200, payload) } func (o *GetNodeIdsOK) String() string { - return fmt.Sprintf("[GET /node/ids][%d] getNodeIdsOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /node/ids][%d] getNodeIdsOK %s", 200, payload) } func (o *GetNodeIdsOK) GetPayload() []*models.NodeID { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_responses.go index 87397f6750..179860ff83 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_responses.go @@ -9,6 +9,7 @@ package daemon // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -51,7 +52,7 @@ func (o *PatchConfigReader) ReadResponse(response runtime.ClientResponse, consum } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PATCH /config] PatchConfig", response, response.Code()) } } @@ -93,12 +94,17 @@ func (o *PatchConfigOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the patch config o k response +func (o *PatchConfigOK) Code() int { + return 200 +} + func (o *PatchConfigOK) Error() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigOK ", 200) + return fmt.Sprintf("[PATCH /config][%d] patchConfigOK", 200) } func (o *PatchConfigOK) String() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigOK ", 200) + return fmt.Sprintf("[PATCH /config][%d] patchConfigOK", 200) } func (o *PatchConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -145,12 +151,19 @@ func (o *PatchConfigBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the patch config bad request response +func (o *PatchConfigBadRequest) Code() int { + return 400 +} + func (o *PatchConfigBadRequest) Error() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /config][%d] patchConfigBadRequest %s", 400, payload) } func (o *PatchConfigBadRequest) String() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /config][%d] patchConfigBadRequest %s", 400, payload) } func (o *PatchConfigBadRequest) GetPayload() models.Error { @@ -205,12 +218,17 @@ func (o *PatchConfigForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the patch config forbidden response +func (o *PatchConfigForbidden) Code() int { + return 403 +} + func (o *PatchConfigForbidden) Error() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigForbidden ", 403) + return fmt.Sprintf("[PATCH /config][%d] patchConfigForbidden", 403) } func (o *PatchConfigForbidden) String() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigForbidden ", 403) + return fmt.Sprintf("[PATCH /config][%d] patchConfigForbidden", 403) } func (o *PatchConfigForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -257,12 +275,19 @@ func (o *PatchConfigFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the patch config failure response +func (o *PatchConfigFailure) Code() int { + return 500 +} + func (o *PatchConfigFailure) Error() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /config][%d] patchConfigFailure %s", 500, payload) } func (o *PatchConfigFailure) String() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /config][%d] patchConfigFailure %s", 500, payload) } func (o *PatchConfigFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go index c2e2ea7930..28d2c62375 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go @@ -74,9 +74,11 @@ type DeleteEndpointIDParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_responses.go index b99123fecb..a71ed7bfef 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -63,7 +64,7 @@ func (o *DeleteEndpointIDReader) ReadResponse(response runtime.ClientResponse, c } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[DELETE /endpoint/{id}] DeleteEndpointID", response, response.Code()) } } @@ -105,12 +106,17 @@ func (o *DeleteEndpointIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the delete endpoint Id o k response +func (o *DeleteEndpointIDOK) Code() int { + return 200 +} + func (o *DeleteEndpointIDOK) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdOK ", 200) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdOK", 200) } func (o *DeleteEndpointIDOK) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdOK ", 200) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdOK", 200) } func (o *DeleteEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -157,12 +163,19 @@ func (o *DeleteEndpointIDErrors) IsCode(code int) bool { return code == 206 } +// Code gets the status code for the delete endpoint Id errors response +func (o *DeleteEndpointIDErrors) Code() int { + return 206 +} + func (o *DeleteEndpointIDErrors) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdErrors %+v", 206, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdErrors %s", 206, payload) } func (o *DeleteEndpointIDErrors) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdErrors %+v", 206, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdErrors %s", 206, payload) } func (o *DeleteEndpointIDErrors) GetPayload() int64 { @@ -220,12 +233,19 @@ func (o *DeleteEndpointIDInvalid) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the delete endpoint Id invalid response +func (o *DeleteEndpointIDInvalid) Code() int { + return 400 +} + func (o *DeleteEndpointIDInvalid) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdInvalid %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdInvalid %s", 400, payload) } func (o *DeleteEndpointIDInvalid) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdInvalid %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdInvalid %s", 400, payload) } func (o *DeleteEndpointIDInvalid) GetPayload() models.Error { @@ -280,12 +300,17 @@ func (o *DeleteEndpointIDForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the delete endpoint Id forbidden response +func (o *DeleteEndpointIDForbidden) Code() int { + return 403 +} + func (o *DeleteEndpointIDForbidden) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdForbidden ", 403) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdForbidden", 403) } func (o *DeleteEndpointIDForbidden) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdForbidden ", 403) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdForbidden", 403) } func (o *DeleteEndpointIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -331,12 +356,17 @@ func (o *DeleteEndpointIDNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the delete endpoint Id not found response +func (o *DeleteEndpointIDNotFound) Code() int { + return 404 +} + func (o *DeleteEndpointIDNotFound) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdNotFound ", 404) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdNotFound", 404) } func (o *DeleteEndpointIDNotFound) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdNotFound ", 404) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdNotFound", 404) } func (o *DeleteEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -382,12 +412,17 @@ func (o *DeleteEndpointIDTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the delete endpoint Id too many requests response +func (o *DeleteEndpointIDTooManyRequests) Code() int { + return 429 +} + func (o *DeleteEndpointIDTooManyRequests) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdTooManyRequests ", 429) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdTooManyRequests", 429) } func (o *DeleteEndpointIDTooManyRequests) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdTooManyRequests ", 429) + return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdTooManyRequests", 429) } func (o *DeleteEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_parameters.go new file mode 100644 index 0000000000..8d28629fed --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_parameters.go @@ -0,0 +1,153 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package endpoint + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" +) + +// NewDeleteEndpointParams creates a new DeleteEndpointParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewDeleteEndpointParams() *DeleteEndpointParams { + return &DeleteEndpointParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewDeleteEndpointParamsWithTimeout creates a new DeleteEndpointParams object +// with the ability to set a timeout on a request. +func NewDeleteEndpointParamsWithTimeout(timeout time.Duration) *DeleteEndpointParams { + return &DeleteEndpointParams{ + timeout: timeout, + } +} + +// NewDeleteEndpointParamsWithContext creates a new DeleteEndpointParams object +// with the ability to set a context for a request. +func NewDeleteEndpointParamsWithContext(ctx context.Context) *DeleteEndpointParams { + return &DeleteEndpointParams{ + Context: ctx, + } +} + +// NewDeleteEndpointParamsWithHTTPClient creates a new DeleteEndpointParams object +// with the ability to set a custom HTTPClient for a request. +func NewDeleteEndpointParamsWithHTTPClient(client *http.Client) *DeleteEndpointParams { + return &DeleteEndpointParams{ + HTTPClient: client, + } +} + +/* +DeleteEndpointParams contains all the parameters to send to the API endpoint + + for the delete endpoint operation. + + Typically these are written to a http.Request. +*/ +type DeleteEndpointParams struct { + + // Endpoint. + Endpoint *models.EndpointBatchDeleteRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the delete endpoint params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteEndpointParams) WithDefaults() *DeleteEndpointParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the delete endpoint params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *DeleteEndpointParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the delete endpoint params +func (o *DeleteEndpointParams) WithTimeout(timeout time.Duration) *DeleteEndpointParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the delete endpoint params +func (o *DeleteEndpointParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the delete endpoint params +func (o *DeleteEndpointParams) WithContext(ctx context.Context) *DeleteEndpointParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the delete endpoint params +func (o *DeleteEndpointParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the delete endpoint params +func (o *DeleteEndpointParams) WithHTTPClient(client *http.Client) *DeleteEndpointParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the delete endpoint params +func (o *DeleteEndpointParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEndpoint adds the endpoint to the delete endpoint params +func (o *DeleteEndpointParams) WithEndpoint(endpoint *models.EndpointBatchDeleteRequest) *DeleteEndpointParams { + o.SetEndpoint(endpoint) + return o +} + +// SetEndpoint adds the endpoint to the delete endpoint params +func (o *DeleteEndpointParams) SetEndpoint(endpoint *models.EndpointBatchDeleteRequest) { + o.Endpoint = endpoint +} + +// WriteToRequest writes these params to a swagger request +func (o *DeleteEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Endpoint != nil { + if err := r.SetBodyParam(o.Endpoint); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_responses.go new file mode 100644 index 0000000000..e39171fe7a --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_responses.go @@ -0,0 +1,353 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package endpoint + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// DeleteEndpointReader is a Reader for the DeleteEndpoint structure. +type DeleteEndpointReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *DeleteEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewDeleteEndpointOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 206: + result := NewDeleteEndpointErrors() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewDeleteEndpointInvalid() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 404: + result := NewDeleteEndpointNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 429: + result := NewDeleteEndpointTooManyRequests() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[DELETE /endpoint] DeleteEndpoint", response, response.Code()) + } +} + +// NewDeleteEndpointOK creates a DeleteEndpointOK with default headers values +func NewDeleteEndpointOK() *DeleteEndpointOK { + return &DeleteEndpointOK{} +} + +/* +DeleteEndpointOK describes a response with status code 200, with default header values. + +Success +*/ +type DeleteEndpointOK struct { +} + +// IsSuccess returns true when this delete endpoint o k response has a 2xx status code +func (o *DeleteEndpointOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this delete endpoint o k response has a 3xx status code +func (o *DeleteEndpointOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete endpoint o k response has a 4xx status code +func (o *DeleteEndpointOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete endpoint o k response has a 5xx status code +func (o *DeleteEndpointOK) IsServerError() bool { + return false +} + +// IsCode returns true when this delete endpoint o k response a status code equal to that given +func (o *DeleteEndpointOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the delete endpoint o k response +func (o *DeleteEndpointOK) Code() int { + return 200 +} + +func (o *DeleteEndpointOK) Error() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointOK", 200) +} + +func (o *DeleteEndpointOK) String() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointOK", 200) +} + +func (o *DeleteEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteEndpointErrors creates a DeleteEndpointErrors with default headers values +func NewDeleteEndpointErrors() *DeleteEndpointErrors { + return &DeleteEndpointErrors{} +} + +/* +DeleteEndpointErrors describes a response with status code 206, with default header values. + +Deleted with a number of errors encountered +*/ +type DeleteEndpointErrors struct { + Payload int64 +} + +// IsSuccess returns true when this delete endpoint errors response has a 2xx status code +func (o *DeleteEndpointErrors) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this delete endpoint errors response has a 3xx status code +func (o *DeleteEndpointErrors) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete endpoint errors response has a 4xx status code +func (o *DeleteEndpointErrors) IsClientError() bool { + return false +} + +// IsServerError returns true when this delete endpoint errors response has a 5xx status code +func (o *DeleteEndpointErrors) IsServerError() bool { + return false +} + +// IsCode returns true when this delete endpoint errors response a status code equal to that given +func (o *DeleteEndpointErrors) IsCode(code int) bool { + return code == 206 +} + +// Code gets the status code for the delete endpoint errors response +func (o *DeleteEndpointErrors) Code() int { + return 206 +} + +func (o *DeleteEndpointErrors) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointErrors %s", 206, payload) +} + +func (o *DeleteEndpointErrors) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointErrors %s", 206, payload) +} + +func (o *DeleteEndpointErrors) GetPayload() int64 { + return o.Payload +} + +func (o *DeleteEndpointErrors) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewDeleteEndpointInvalid creates a DeleteEndpointInvalid with default headers values +func NewDeleteEndpointInvalid() *DeleteEndpointInvalid { + return &DeleteEndpointInvalid{} +} + +/* +DeleteEndpointInvalid describes a response with status code 400, with default header values. + +Invalid endpoint delete request +*/ +type DeleteEndpointInvalid struct { +} + +// IsSuccess returns true when this delete endpoint invalid response has a 2xx status code +func (o *DeleteEndpointInvalid) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete endpoint invalid response has a 3xx status code +func (o *DeleteEndpointInvalid) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete endpoint invalid response has a 4xx status code +func (o *DeleteEndpointInvalid) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete endpoint invalid response has a 5xx status code +func (o *DeleteEndpointInvalid) IsServerError() bool { + return false +} + +// IsCode returns true when this delete endpoint invalid response a status code equal to that given +func (o *DeleteEndpointInvalid) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the delete endpoint invalid response +func (o *DeleteEndpointInvalid) Code() int { + return 400 +} + +func (o *DeleteEndpointInvalid) Error() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointInvalid", 400) +} + +func (o *DeleteEndpointInvalid) String() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointInvalid", 400) +} + +func (o *DeleteEndpointInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteEndpointNotFound creates a DeleteEndpointNotFound with default headers values +func NewDeleteEndpointNotFound() *DeleteEndpointNotFound { + return &DeleteEndpointNotFound{} +} + +/* +DeleteEndpointNotFound describes a response with status code 404, with default header values. + +No endpoints with provided parameters found +*/ +type DeleteEndpointNotFound struct { +} + +// IsSuccess returns true when this delete endpoint not found response has a 2xx status code +func (o *DeleteEndpointNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete endpoint not found response has a 3xx status code +func (o *DeleteEndpointNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete endpoint not found response has a 4xx status code +func (o *DeleteEndpointNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete endpoint not found response has a 5xx status code +func (o *DeleteEndpointNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this delete endpoint not found response a status code equal to that given +func (o *DeleteEndpointNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the delete endpoint not found response +func (o *DeleteEndpointNotFound) Code() int { + return 404 +} + +func (o *DeleteEndpointNotFound) Error() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointNotFound", 404) +} + +func (o *DeleteEndpointNotFound) String() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointNotFound", 404) +} + +func (o *DeleteEndpointNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewDeleteEndpointTooManyRequests creates a DeleteEndpointTooManyRequests with default headers values +func NewDeleteEndpointTooManyRequests() *DeleteEndpointTooManyRequests { + return &DeleteEndpointTooManyRequests{} +} + +/* +DeleteEndpointTooManyRequests describes a response with status code 429, with default header values. + +Rate-limiting too many requests in the given time frame +*/ +type DeleteEndpointTooManyRequests struct { +} + +// IsSuccess returns true when this delete endpoint too many requests response has a 2xx status code +func (o *DeleteEndpointTooManyRequests) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this delete endpoint too many requests response has a 3xx status code +func (o *DeleteEndpointTooManyRequests) IsRedirect() bool { + return false +} + +// IsClientError returns true when this delete endpoint too many requests response has a 4xx status code +func (o *DeleteEndpointTooManyRequests) IsClientError() bool { + return true +} + +// IsServerError returns true when this delete endpoint too many requests response has a 5xx status code +func (o *DeleteEndpointTooManyRequests) IsServerError() bool { + return false +} + +// IsCode returns true when this delete endpoint too many requests response a status code equal to that given +func (o *DeleteEndpointTooManyRequests) IsCode(code int) bool { + return code == 429 +} + +// Code gets the status code for the delete endpoint too many requests response +func (o *DeleteEndpointTooManyRequests) Code() int { + return 429 +} + +func (o *DeleteEndpointTooManyRequests) Error() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointTooManyRequests", 429) +} + +func (o *DeleteEndpointTooManyRequests) String() string { + return fmt.Sprintf("[DELETE /endpoint][%d] deleteEndpointTooManyRequests", 429) +} + +func (o *DeleteEndpointTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go index 34d3500071..080c296c3b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go @@ -12,6 +12,7 @@ import ( "fmt" "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -20,6 +21,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new endpoint API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new endpoint API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for endpoint API */ @@ -28,11 +54,13 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods type ClientService interface { + DeleteEndpoint(params *DeleteEndpointParams, opts ...ClientOption) (*DeleteEndpointOK, *DeleteEndpointErrors, error) + DeleteEndpointID(params *DeleteEndpointIDParams, opts ...ClientOption) (*DeleteEndpointIDOK, *DeleteEndpointIDErrors, error) GetEndpoint(params *GetEndpointParams, opts ...ClientOption) (*GetEndpointOK, error) @@ -58,6 +86,47 @@ type ClientService interface { SetTransport(transport runtime.ClientTransport) } +/* +DeleteEndpoint deletes a list of endpoints + +Deletes a list of endpoints that have endpoints matching the provided properties +*/ +func (a *Client) DeleteEndpoint(params *DeleteEndpointParams, opts ...ClientOption) (*DeleteEndpointOK, *DeleteEndpointErrors, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewDeleteEndpointParams() + } + op := &runtime.ClientOperation{ + ID: "DeleteEndpoint", + Method: "DELETE", + PathPattern: "/endpoint", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &DeleteEndpointReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, nil, err + } + switch value := result.(type) { + case *DeleteEndpointOK: + return value, nil, nil + case *DeleteEndpointErrors: + return nil, value, nil + } + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for endpoint: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* DeleteEndpointID deletes endpoint diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go index 027970d697..971bf69f06 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go @@ -74,9 +74,11 @@ type GetEndpointIDConfigParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_responses.go index e16cdfbf56..a6b577b284 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -45,7 +46,7 @@ func (o *GetEndpointIDConfigReader) ReadResponse(response runtime.ClientResponse } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /endpoint/{id}/config] GetEndpointIDConfig", response, response.Code()) } } @@ -88,12 +89,19 @@ func (o *GetEndpointIDConfigOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get endpoint Id config o k response +func (o *GetEndpointIDConfigOK) Code() int { + return 200 +} + func (o *GetEndpointIDConfigOK) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigOK %s", 200, payload) } func (o *GetEndpointIDConfigOK) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigOK %s", 200, payload) } func (o *GetEndpointIDConfigOK) GetPayload() *models.EndpointConfigurationStatus { @@ -150,12 +158,17 @@ func (o *GetEndpointIDConfigNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get endpoint Id config not found response +func (o *GetEndpointIDConfigNotFound) Code() int { + return 404 +} + func (o *GetEndpointIDConfigNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigNotFound ", 404) + return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigNotFound", 404) } func (o *GetEndpointIDConfigNotFound) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigNotFound ", 404) + return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigNotFound", 404) } func (o *GetEndpointIDConfigNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -201,12 +214,17 @@ func (o *GetEndpointIDConfigTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the get endpoint Id config too many requests response +func (o *GetEndpointIDConfigTooManyRequests) Code() int { + return 429 +} + func (o *GetEndpointIDConfigTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigTooManyRequests", 429) } func (o *GetEndpointIDConfigTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigTooManyRequests", 429) } func (o *GetEndpointIDConfigTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go index 11b97e7d96..b211692ce6 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go @@ -74,9 +74,11 @@ type GetEndpointIDHealthzParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_responses.go index a7e976534c..9728660e73 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -51,7 +52,7 @@ func (o *GetEndpointIDHealthzReader) ReadResponse(response runtime.ClientRespons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /endpoint/{id}/healthz] GetEndpointIDHealthz", response, response.Code()) } } @@ -94,12 +95,19 @@ func (o *GetEndpointIDHealthzOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get endpoint Id healthz o k response +func (o *GetEndpointIDHealthzOK) Code() int { + return 200 +} + func (o *GetEndpointIDHealthzOK) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzOK %s", 200, payload) } func (o *GetEndpointIDHealthzOK) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzOK %s", 200, payload) } func (o *GetEndpointIDHealthzOK) GetPayload() *models.EndpointHealth { @@ -156,12 +164,17 @@ func (o *GetEndpointIDHealthzInvalid) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get endpoint Id healthz invalid response +func (o *GetEndpointIDHealthzInvalid) Code() int { + return 400 +} + func (o *GetEndpointIDHealthzInvalid) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzInvalid ", 400) + return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzInvalid", 400) } func (o *GetEndpointIDHealthzInvalid) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzInvalid ", 400) + return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzInvalid", 400) } func (o *GetEndpointIDHealthzInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -207,12 +220,17 @@ func (o *GetEndpointIDHealthzNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get endpoint Id healthz not found response +func (o *GetEndpointIDHealthzNotFound) Code() int { + return 404 +} + func (o *GetEndpointIDHealthzNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzNotFound ", 404) + return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzNotFound", 404) } func (o *GetEndpointIDHealthzNotFound) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzNotFound ", 404) + return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzNotFound", 404) } func (o *GetEndpointIDHealthzNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -258,12 +276,17 @@ func (o *GetEndpointIDHealthzTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the get endpoint Id healthz too many requests response +func (o *GetEndpointIDHealthzTooManyRequests) Code() int { + return 429 +} + func (o *GetEndpointIDHealthzTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzTooManyRequests", 429) } func (o *GetEndpointIDHealthzTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzTooManyRequests", 429) } func (o *GetEndpointIDHealthzTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go index 9ed2d6d6bf..27dc404abd 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go @@ -74,9 +74,11 @@ type GetEndpointIDLabelsParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_responses.go index 3f4e5138d9..bf101beb3a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -45,7 +46,7 @@ func (o *GetEndpointIDLabelsReader) ReadResponse(response runtime.ClientResponse } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /endpoint/{id}/labels] GetEndpointIDLabels", response, response.Code()) } } @@ -88,12 +89,19 @@ func (o *GetEndpointIDLabelsOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get endpoint Id labels o k response +func (o *GetEndpointIDLabelsOK) Code() int { + return 200 +} + func (o *GetEndpointIDLabelsOK) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsOK %s", 200, payload) } func (o *GetEndpointIDLabelsOK) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsOK %s", 200, payload) } func (o *GetEndpointIDLabelsOK) GetPayload() *models.LabelConfiguration { @@ -150,12 +158,17 @@ func (o *GetEndpointIDLabelsNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get endpoint Id labels not found response +func (o *GetEndpointIDLabelsNotFound) Code() int { + return 404 +} + func (o *GetEndpointIDLabelsNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsNotFound ", 404) + return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsNotFound", 404) } func (o *GetEndpointIDLabelsNotFound) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsNotFound ", 404) + return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsNotFound", 404) } func (o *GetEndpointIDLabelsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -201,12 +214,17 @@ func (o *GetEndpointIDLabelsTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the get endpoint Id labels too many requests response +func (o *GetEndpointIDLabelsTooManyRequests) Code() int { + return 429 +} + func (o *GetEndpointIDLabelsTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsTooManyRequests", 429) } func (o *GetEndpointIDLabelsTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsTooManyRequests", 429) } func (o *GetEndpointIDLabelsTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go index 32bec87cc1..f93b295ca9 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go @@ -74,9 +74,11 @@ type GetEndpointIDLogParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_responses.go index db7227d1dd..e880b3413f 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -51,7 +52,7 @@ func (o *GetEndpointIDLogReader) ReadResponse(response runtime.ClientResponse, c } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /endpoint/{id}/log] GetEndpointIDLog", response, response.Code()) } } @@ -94,12 +95,19 @@ func (o *GetEndpointIDLogOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get endpoint Id log o k response +func (o *GetEndpointIDLogOK) Code() int { + return 200 +} + func (o *GetEndpointIDLogOK) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogOK %s", 200, payload) } func (o *GetEndpointIDLogOK) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogOK %s", 200, payload) } func (o *GetEndpointIDLogOK) GetPayload() models.EndpointStatusLog { @@ -154,12 +162,17 @@ func (o *GetEndpointIDLogInvalid) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get endpoint Id log invalid response +func (o *GetEndpointIDLogInvalid) Code() int { + return 400 +} + func (o *GetEndpointIDLogInvalid) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogInvalid ", 400) + return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogInvalid", 400) } func (o *GetEndpointIDLogInvalid) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogInvalid ", 400) + return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogInvalid", 400) } func (o *GetEndpointIDLogInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -205,12 +218,17 @@ func (o *GetEndpointIDLogNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get endpoint Id log not found response +func (o *GetEndpointIDLogNotFound) Code() int { + return 404 +} + func (o *GetEndpointIDLogNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogNotFound ", 404) + return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogNotFound", 404) } func (o *GetEndpointIDLogNotFound) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogNotFound ", 404) + return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogNotFound", 404) } func (o *GetEndpointIDLogNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -256,12 +274,17 @@ func (o *GetEndpointIDLogTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the get endpoint Id log too many requests response +func (o *GetEndpointIDLogTooManyRequests) Code() int { + return 429 +} + func (o *GetEndpointIDLogTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogTooManyRequests", 429) } func (o *GetEndpointIDLogTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogTooManyRequests", 429) } func (o *GetEndpointIDLogTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go index d43637631f..4bad76a970 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go @@ -74,9 +74,11 @@ type GetEndpointIDParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_responses.go index 4d07e3aa4d..8da57f2db6 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -51,7 +52,7 @@ func (o *GetEndpointIDReader) ReadResponse(response runtime.ClientResponse, cons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /endpoint/{id}] GetEndpointID", response, response.Code()) } } @@ -94,12 +95,19 @@ func (o *GetEndpointIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get endpoint Id o k response +func (o *GetEndpointIDOK) Code() int { + return 200 +} + func (o *GetEndpointIDOK) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdOK %s", 200, payload) } func (o *GetEndpointIDOK) String() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdOK %s", 200, payload) } func (o *GetEndpointIDOK) GetPayload() *models.Endpoint { @@ -157,12 +165,19 @@ func (o *GetEndpointIDInvalid) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get endpoint Id invalid response +func (o *GetEndpointIDInvalid) Code() int { + return 400 +} + func (o *GetEndpointIDInvalid) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdInvalid %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdInvalid %s", 400, payload) } func (o *GetEndpointIDInvalid) String() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdInvalid %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdInvalid %s", 400, payload) } func (o *GetEndpointIDInvalid) GetPayload() models.Error { @@ -217,12 +232,17 @@ func (o *GetEndpointIDNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get endpoint Id not found response +func (o *GetEndpointIDNotFound) Code() int { + return 404 +} + func (o *GetEndpointIDNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdNotFound ", 404) + return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdNotFound", 404) } func (o *GetEndpointIDNotFound) String() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdNotFound ", 404) + return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdNotFound", 404) } func (o *GetEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -268,12 +288,17 @@ func (o *GetEndpointIDTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the get endpoint Id too many requests response +func (o *GetEndpointIDTooManyRequests) Code() int { + return 429 +} + func (o *GetEndpointIDTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdTooManyRequests", 429) } func (o *GetEndpointIDTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdTooManyRequests", 429) } func (o *GetEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_responses.go index 5193537ea8..f7cd5ec56f 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -45,7 +46,7 @@ func (o *GetEndpointReader) ReadResponse(response runtime.ClientResponse, consum } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /endpoint] GetEndpoint", response, response.Code()) } } @@ -88,12 +89,19 @@ func (o *GetEndpointOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get endpoint o k response +func (o *GetEndpointOK) Code() int { + return 200 +} + func (o *GetEndpointOK) Error() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint][%d] getEndpointOK %s", 200, payload) } func (o *GetEndpointOK) String() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /endpoint][%d] getEndpointOK %s", 200, payload) } func (o *GetEndpointOK) GetPayload() []*models.Endpoint { @@ -148,12 +156,17 @@ func (o *GetEndpointNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get endpoint not found response +func (o *GetEndpointNotFound) Code() int { + return 404 +} + func (o *GetEndpointNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointNotFound ", 404) + return fmt.Sprintf("[GET /endpoint][%d] getEndpointNotFound", 404) } func (o *GetEndpointNotFound) String() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointNotFound ", 404) + return fmt.Sprintf("[GET /endpoint][%d] getEndpointNotFound", 404) } func (o *GetEndpointNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -199,12 +212,17 @@ func (o *GetEndpointTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the get endpoint too many requests response +func (o *GetEndpointTooManyRequests) Code() int { + return 429 +} + func (o *GetEndpointTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint][%d] getEndpointTooManyRequests", 429) } func (o *GetEndpointTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointTooManyRequests ", 429) + return fmt.Sprintf("[GET /endpoint][%d] getEndpointTooManyRequests", 429) } func (o *GetEndpointTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go index a1b33a28d6..9ecd8ef8a1 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go @@ -79,9 +79,11 @@ type PatchEndpointIDConfigParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_responses.go index fc2c4883a5..59fcf10da0 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -63,7 +64,7 @@ func (o *PatchEndpointIDConfigReader) ReadResponse(response runtime.ClientRespon } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PATCH /endpoint/{id}/config] PatchEndpointIDConfig", response, response.Code()) } } @@ -105,12 +106,17 @@ func (o *PatchEndpointIDConfigOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the patch endpoint Id config o k response +func (o *PatchEndpointIDConfigOK) Code() int { + return 200 +} + func (o *PatchEndpointIDConfigOK) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigOK ", 200) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigOK", 200) } func (o *PatchEndpointIDConfigOK) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigOK ", 200) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigOK", 200) } func (o *PatchEndpointIDConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -156,12 +162,17 @@ func (o *PatchEndpointIDConfigInvalid) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the patch endpoint Id config invalid response +func (o *PatchEndpointIDConfigInvalid) Code() int { + return 400 +} + func (o *PatchEndpointIDConfigInvalid) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigInvalid ", 400) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigInvalid", 400) } func (o *PatchEndpointIDConfigInvalid) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigInvalid ", 400) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigInvalid", 400) } func (o *PatchEndpointIDConfigInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -207,12 +218,17 @@ func (o *PatchEndpointIDConfigForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the patch endpoint Id config forbidden response +func (o *PatchEndpointIDConfigForbidden) Code() int { + return 403 +} + func (o *PatchEndpointIDConfigForbidden) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigForbidden ", 403) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigForbidden", 403) } func (o *PatchEndpointIDConfigForbidden) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigForbidden ", 403) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigForbidden", 403) } func (o *PatchEndpointIDConfigForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -258,12 +274,17 @@ func (o *PatchEndpointIDConfigNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the patch endpoint Id config not found response +func (o *PatchEndpointIDConfigNotFound) Code() int { + return 404 +} + func (o *PatchEndpointIDConfigNotFound) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigNotFound ", 404) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigNotFound", 404) } func (o *PatchEndpointIDConfigNotFound) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigNotFound ", 404) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigNotFound", 404) } func (o *PatchEndpointIDConfigNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -309,12 +330,17 @@ func (o *PatchEndpointIDConfigTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the patch endpoint Id config too many requests response +func (o *PatchEndpointIDConfigTooManyRequests) Code() int { + return 429 +} + func (o *PatchEndpointIDConfigTooManyRequests) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigTooManyRequests ", 429) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigTooManyRequests", 429) } func (o *PatchEndpointIDConfigTooManyRequests) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigTooManyRequests ", 429) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigTooManyRequests", 429) } func (o *PatchEndpointIDConfigTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -361,12 +387,19 @@ func (o *PatchEndpointIDConfigFailed) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the patch endpoint Id config failed response +func (o *PatchEndpointIDConfigFailed) Code() int { + return 500 +} + func (o *PatchEndpointIDConfigFailed) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigFailed %s", 500, payload) } func (o *PatchEndpointIDConfigFailed) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigFailed %s", 500, payload) } func (o *PatchEndpointIDConfigFailed) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go index db93b22dd1..8091565f55 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go @@ -79,9 +79,11 @@ type PatchEndpointIDLabelsParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_responses.go index d16eec5c36..d499da83c7 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -57,7 +58,7 @@ func (o *PatchEndpointIDLabelsReader) ReadResponse(response runtime.ClientRespon } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PATCH /endpoint/{id}/labels] PatchEndpointIDLabels", response, response.Code()) } } @@ -99,12 +100,17 @@ func (o *PatchEndpointIDLabelsOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the patch endpoint Id labels o k response +func (o *PatchEndpointIDLabelsOK) Code() int { + return 200 +} + func (o *PatchEndpointIDLabelsOK) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsOK ", 200) + return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsOK", 200) } func (o *PatchEndpointIDLabelsOK) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsOK ", 200) + return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsOK", 200) } func (o *PatchEndpointIDLabelsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -150,12 +156,17 @@ func (o *PatchEndpointIDLabelsForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the patch endpoint Id labels forbidden response +func (o *PatchEndpointIDLabelsForbidden) Code() int { + return 403 +} + func (o *PatchEndpointIDLabelsForbidden) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsForbidden ", 403) + return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsForbidden", 403) } func (o *PatchEndpointIDLabelsForbidden) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsForbidden ", 403) + return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsForbidden", 403) } func (o *PatchEndpointIDLabelsForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -201,12 +212,17 @@ func (o *PatchEndpointIDLabelsNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the patch endpoint Id labels not found response +func (o *PatchEndpointIDLabelsNotFound) Code() int { + return 404 +} + func (o *PatchEndpointIDLabelsNotFound) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsNotFound ", 404) + return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsNotFound", 404) } func (o *PatchEndpointIDLabelsNotFound) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsNotFound ", 404) + return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsNotFound", 404) } func (o *PatchEndpointIDLabelsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -252,12 +268,17 @@ func (o *PatchEndpointIDLabelsTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the patch endpoint Id labels too many requests response +func (o *PatchEndpointIDLabelsTooManyRequests) Code() int { + return 429 +} + func (o *PatchEndpointIDLabelsTooManyRequests) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsTooManyRequests ", 429) + return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsTooManyRequests", 429) } func (o *PatchEndpointIDLabelsTooManyRequests) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsTooManyRequests ", 429) + return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsTooManyRequests", 429) } func (o *PatchEndpointIDLabelsTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -304,12 +325,19 @@ func (o *PatchEndpointIDLabelsUpdateFailed) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the patch endpoint Id labels update failed response +func (o *PatchEndpointIDLabelsUpdateFailed) Code() int { + return 500 +} + func (o *PatchEndpointIDLabelsUpdateFailed) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsUpdateFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsUpdateFailed %s", 500, payload) } func (o *PatchEndpointIDLabelsUpdateFailed) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsUpdateFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsUpdateFailed %s", 500, payload) } func (o *PatchEndpointIDLabelsUpdateFailed) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go index fc8495b4c1..f718d19823 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go @@ -79,9 +79,11 @@ type PatchEndpointIDParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_responses.go index 7b28de272a..d7aa0edf29 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -63,7 +64,7 @@ func (o *PatchEndpointIDReader) ReadResponse(response runtime.ClientResponse, co } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PATCH /endpoint/{id}] PatchEndpointID", response, response.Code()) } } @@ -105,12 +106,17 @@ func (o *PatchEndpointIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the patch endpoint Id o k response +func (o *PatchEndpointIDOK) Code() int { + return 200 +} + func (o *PatchEndpointIDOK) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdOK ", 200) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdOK", 200) } func (o *PatchEndpointIDOK) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdOK ", 200) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdOK", 200) } func (o *PatchEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -157,12 +163,19 @@ func (o *PatchEndpointIDInvalid) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the patch endpoint Id invalid response +func (o *PatchEndpointIDInvalid) Code() int { + return 400 +} + func (o *PatchEndpointIDInvalid) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdInvalid %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdInvalid %s", 400, payload) } func (o *PatchEndpointIDInvalid) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdInvalid %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdInvalid %s", 400, payload) } func (o *PatchEndpointIDInvalid) GetPayload() models.Error { @@ -217,12 +230,17 @@ func (o *PatchEndpointIDForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the patch endpoint Id forbidden response +func (o *PatchEndpointIDForbidden) Code() int { + return 403 +} + func (o *PatchEndpointIDForbidden) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdForbidden ", 403) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdForbidden", 403) } func (o *PatchEndpointIDForbidden) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdForbidden ", 403) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdForbidden", 403) } func (o *PatchEndpointIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -268,12 +286,17 @@ func (o *PatchEndpointIDNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the patch endpoint Id not found response +func (o *PatchEndpointIDNotFound) Code() int { + return 404 +} + func (o *PatchEndpointIDNotFound) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdNotFound ", 404) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdNotFound", 404) } func (o *PatchEndpointIDNotFound) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdNotFound ", 404) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdNotFound", 404) } func (o *PatchEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -319,12 +342,17 @@ func (o *PatchEndpointIDTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the patch endpoint Id too many requests response +func (o *PatchEndpointIDTooManyRequests) Code() int { + return 429 +} + func (o *PatchEndpointIDTooManyRequests) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdTooManyRequests ", 429) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdTooManyRequests", 429) } func (o *PatchEndpointIDTooManyRequests) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdTooManyRequests ", 429) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdTooManyRequests", 429) } func (o *PatchEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -371,12 +399,19 @@ func (o *PatchEndpointIDFailed) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the patch endpoint Id failed response +func (o *PatchEndpointIDFailed) Code() int { + return 500 +} + func (o *PatchEndpointIDFailed) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdFailed %s", 500, payload) } func (o *PatchEndpointIDFailed) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdFailed %s", 500, payload) } func (o *PatchEndpointIDFailed) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go index 9692c00137..a7342cba0b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go @@ -79,9 +79,11 @@ type PutEndpointIDParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_responses.go index 97147e563d..8f94ff0da7 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_responses.go @@ -9,6 +9,7 @@ package endpoint // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -63,7 +64,7 @@ func (o *PutEndpointIDReader) ReadResponse(response runtime.ClientResponse, cons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PUT /endpoint/{id}] PutEndpointID", response, response.Code()) } } @@ -78,6 +79,7 @@ PutEndpointIDCreated describes a response with status code 201, with default hea Created */ type PutEndpointIDCreated struct { + Payload *models.Endpoint } // IsSuccess returns true when this put endpoint Id created response has a 2xx status code @@ -105,16 +107,34 @@ func (o *PutEndpointIDCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the put endpoint Id created response +func (o *PutEndpointIDCreated) Code() int { + return 201 +} + func (o *PutEndpointIDCreated) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdCreated ", 201) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdCreated %s", 201, payload) } func (o *PutEndpointIDCreated) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdCreated ", 201) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdCreated %s", 201, payload) +} + +func (o *PutEndpointIDCreated) GetPayload() *models.Endpoint { + return o.Payload } func (o *PutEndpointIDCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + o.Payload = new(models.Endpoint) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + return nil } @@ -157,12 +177,19 @@ func (o *PutEndpointIDInvalid) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the put endpoint Id invalid response +func (o *PutEndpointIDInvalid) Code() int { + return 400 +} + func (o *PutEndpointIDInvalid) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdInvalid %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdInvalid %s", 400, payload) } func (o *PutEndpointIDInvalid) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdInvalid %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdInvalid %s", 400, payload) } func (o *PutEndpointIDInvalid) GetPayload() models.Error { @@ -217,12 +244,17 @@ func (o *PutEndpointIDForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the put endpoint Id forbidden response +func (o *PutEndpointIDForbidden) Code() int { + return 403 +} + func (o *PutEndpointIDForbidden) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdForbidden ", 403) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdForbidden", 403) } func (o *PutEndpointIDForbidden) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdForbidden ", 403) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdForbidden", 403) } func (o *PutEndpointIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -268,12 +300,17 @@ func (o *PutEndpointIDExists) IsCode(code int) bool { return code == 409 } +// Code gets the status code for the put endpoint Id exists response +func (o *PutEndpointIDExists) Code() int { + return 409 +} + func (o *PutEndpointIDExists) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdExists ", 409) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdExists", 409) } func (o *PutEndpointIDExists) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdExists ", 409) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdExists", 409) } func (o *PutEndpointIDExists) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -319,12 +356,17 @@ func (o *PutEndpointIDTooManyRequests) IsCode(code int) bool { return code == 429 } +// Code gets the status code for the put endpoint Id too many requests response +func (o *PutEndpointIDTooManyRequests) Code() int { + return 429 +} + func (o *PutEndpointIDTooManyRequests) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdTooManyRequests ", 429) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdTooManyRequests", 429) } func (o *PutEndpointIDTooManyRequests) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdTooManyRequests ", 429) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdTooManyRequests", 429) } func (o *PutEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -371,12 +413,19 @@ func (o *PutEndpointIDFailed) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the put endpoint Id failed response +func (o *PutEndpointIDFailed) Code() int { + return 500 +} + func (o *PutEndpointIDFailed) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdFailed %s", 500, payload) } func (o *PutEndpointIDFailed) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdFailed %s", 500, payload) } func (o *PutEndpointIDFailed) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_responses.go index ae095b6799..1d05d7ee55 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_responses.go @@ -9,6 +9,7 @@ package ipam // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -63,7 +64,7 @@ func (o *DeleteIpamIPReader) ReadResponse(response runtime.ClientResponse, consu } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[DELETE /ipam/{ip}] DeleteIpamIP", response, response.Code()) } } @@ -105,12 +106,17 @@ func (o *DeleteIpamIPOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the delete ipam Ip o k response +func (o *DeleteIpamIPOK) Code() int { + return 200 +} + func (o *DeleteIpamIPOK) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpOK ", 200) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpOK", 200) } func (o *DeleteIpamIPOK) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpOK ", 200) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpOK", 200) } func (o *DeleteIpamIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -156,12 +162,17 @@ func (o *DeleteIpamIPInvalid) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the delete ipam Ip invalid response +func (o *DeleteIpamIPInvalid) Code() int { + return 400 +} + func (o *DeleteIpamIPInvalid) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpInvalid ", 400) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpInvalid", 400) } func (o *DeleteIpamIPInvalid) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpInvalid ", 400) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpInvalid", 400) } func (o *DeleteIpamIPInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -207,12 +218,17 @@ func (o *DeleteIpamIPForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the delete ipam Ip forbidden response +func (o *DeleteIpamIPForbidden) Code() int { + return 403 +} + func (o *DeleteIpamIPForbidden) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpForbidden ", 403) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpForbidden", 403) } func (o *DeleteIpamIPForbidden) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpForbidden ", 403) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpForbidden", 403) } func (o *DeleteIpamIPForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -258,12 +274,17 @@ func (o *DeleteIpamIPNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the delete ipam Ip not found response +func (o *DeleteIpamIPNotFound) Code() int { + return 404 +} + func (o *DeleteIpamIPNotFound) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpNotFound ", 404) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpNotFound", 404) } func (o *DeleteIpamIPNotFound) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpNotFound ", 404) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpNotFound", 404) } func (o *DeleteIpamIPNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -310,12 +331,19 @@ func (o *DeleteIpamIPFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the delete ipam Ip failure response +func (o *DeleteIpamIPFailure) Code() int { + return 500 +} + func (o *DeleteIpamIPFailure) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpFailure %s", 500, payload) } func (o *DeleteIpamIPFailure) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpFailure %s", 500, payload) } func (o *DeleteIpamIPFailure) GetPayload() models.Error { @@ -370,12 +398,17 @@ func (o *DeleteIpamIPDisabled) IsCode(code int) bool { return code == 501 } +// Code gets the status code for the delete ipam Ip disabled response +func (o *DeleteIpamIPDisabled) Code() int { + return 501 +} + func (o *DeleteIpamIPDisabled) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpDisabled ", 501) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpDisabled", 501) } func (o *DeleteIpamIPDisabled) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpDisabled ", 501) + return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpDisabled", 501) } func (o *DeleteIpamIPDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/ipam_client.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/ipam_client.go index bff4193d66..f2adf861c0 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/ipam_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/ipam_client.go @@ -12,6 +12,7 @@ import ( "fmt" "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -20,6 +21,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new ipam API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new ipam API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for ipam API */ @@ -28,7 +54,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_responses.go index 1c8cff834f..0e41391001 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_responses.go @@ -9,6 +9,7 @@ package ipam // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -63,7 +64,7 @@ func (o *PostIpamIPReader) ReadResponse(response runtime.ClientResponse, consume } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /ipam/{ip}] PostIpamIP", response, response.Code()) } } @@ -105,12 +106,17 @@ func (o *PostIpamIPOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the post ipam Ip o k response +func (o *PostIpamIPOK) Code() int { + return 200 +} + func (o *PostIpamIPOK) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpOK ", 200) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpOK", 200) } func (o *PostIpamIPOK) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpOK ", 200) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpOK", 200) } func (o *PostIpamIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -156,12 +162,17 @@ func (o *PostIpamIPInvalid) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the post ipam Ip invalid response +func (o *PostIpamIPInvalid) Code() int { + return 400 +} + func (o *PostIpamIPInvalid) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpInvalid ", 400) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpInvalid", 400) } func (o *PostIpamIPInvalid) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpInvalid ", 400) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpInvalid", 400) } func (o *PostIpamIPInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -207,12 +218,17 @@ func (o *PostIpamIPForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the post ipam Ip forbidden response +func (o *PostIpamIPForbidden) Code() int { + return 403 +} + func (o *PostIpamIPForbidden) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpForbidden ", 403) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpForbidden", 403) } func (o *PostIpamIPForbidden) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpForbidden ", 403) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpForbidden", 403) } func (o *PostIpamIPForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -258,12 +274,17 @@ func (o *PostIpamIPExists) IsCode(code int) bool { return code == 409 } +// Code gets the status code for the post ipam Ip exists response +func (o *PostIpamIPExists) Code() int { + return 409 +} + func (o *PostIpamIPExists) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpExists ", 409) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpExists", 409) } func (o *PostIpamIPExists) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpExists ", 409) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpExists", 409) } func (o *PostIpamIPExists) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -310,12 +331,19 @@ func (o *PostIpamIPFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the post ipam Ip failure response +func (o *PostIpamIPFailure) Code() int { + return 500 +} + func (o *PostIpamIPFailure) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpFailure %s", 500, payload) } func (o *PostIpamIPFailure) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpFailure %s", 500, payload) } func (o *PostIpamIPFailure) GetPayload() models.Error { @@ -370,12 +398,17 @@ func (o *PostIpamIPDisabled) IsCode(code int) bool { return code == 501 } +// Code gets the status code for the post ipam Ip disabled response +func (o *PostIpamIPDisabled) Code() int { + return 501 +} + func (o *PostIpamIPDisabled) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpDisabled ", 501) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpDisabled", 501) } func (o *PostIpamIPDisabled) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpDisabled ", 501) + return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpDisabled", 501) } func (o *PostIpamIPDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_responses.go index 2f13cc2945..324c9efe5e 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_responses.go @@ -9,6 +9,7 @@ package ipam // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -45,7 +46,7 @@ func (o *PostIpamReader) ReadResponse(response runtime.ClientResponse, consumer } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /ipam] PostIpam", response, response.Code()) } } @@ -88,12 +89,19 @@ func (o *PostIpamCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the post ipam created response +func (o *PostIpamCreated) Code() int { + return 201 +} + func (o *PostIpamCreated) Error() string { - return fmt.Sprintf("[POST /ipam][%d] postIpamCreated %+v", 201, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /ipam][%d] postIpamCreated %s", 201, payload) } func (o *PostIpamCreated) String() string { - return fmt.Sprintf("[POST /ipam][%d] postIpamCreated %+v", 201, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /ipam][%d] postIpamCreated %s", 201, payload) } func (o *PostIpamCreated) GetPayload() *models.IPAMResponse { @@ -150,12 +158,17 @@ func (o *PostIpamForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the post ipam forbidden response +func (o *PostIpamForbidden) Code() int { + return 403 +} + func (o *PostIpamForbidden) Error() string { - return fmt.Sprintf("[POST /ipam][%d] postIpamForbidden ", 403) + return fmt.Sprintf("[POST /ipam][%d] postIpamForbidden", 403) } func (o *PostIpamForbidden) String() string { - return fmt.Sprintf("[POST /ipam][%d] postIpamForbidden ", 403) + return fmt.Sprintf("[POST /ipam][%d] postIpamForbidden", 403) } func (o *PostIpamForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -202,12 +215,19 @@ func (o *PostIpamFailure) IsCode(code int) bool { return code == 502 } +// Code gets the status code for the post ipam failure response +func (o *PostIpamFailure) Code() int { + return 502 +} + func (o *PostIpamFailure) Error() string { - return fmt.Sprintf("[POST /ipam][%d] postIpamFailure %+v", 502, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /ipam][%d] postIpamFailure %s", 502, payload) } func (o *PostIpamFailure) String() string { - return fmt.Sprintf("[POST /ipam][%d] postIpamFailure %+v", 502, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[POST /ipam][%d] postIpamFailure %s", 502, payload) } func (o *PostIpamFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_parameters.go deleted file mode 100644 index 7dd482af5b..0000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package metrics - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetMetricsParams creates a new GetMetricsParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetMetricsParams() *GetMetricsParams { - return &GetMetricsParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetMetricsParamsWithTimeout creates a new GetMetricsParams object -// with the ability to set a timeout on a request. -func NewGetMetricsParamsWithTimeout(timeout time.Duration) *GetMetricsParams { - return &GetMetricsParams{ - timeout: timeout, - } -} - -// NewGetMetricsParamsWithContext creates a new GetMetricsParams object -// with the ability to set a context for a request. -func NewGetMetricsParamsWithContext(ctx context.Context) *GetMetricsParams { - return &GetMetricsParams{ - Context: ctx, - } -} - -// NewGetMetricsParamsWithHTTPClient creates a new GetMetricsParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetMetricsParamsWithHTTPClient(client *http.Client) *GetMetricsParams { - return &GetMetricsParams{ - HTTPClient: client, - } -} - -/* -GetMetricsParams contains all the parameters to send to the API endpoint - - for the get metrics operation. - - Typically these are written to a http.Request. -*/ -type GetMetricsParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get metrics params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetMetricsParams) WithDefaults() *GetMetricsParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get metrics params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetMetricsParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get metrics params -func (o *GetMetricsParams) WithTimeout(timeout time.Duration) *GetMetricsParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get metrics params -func (o *GetMetricsParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get metrics params -func (o *GetMetricsParams) WithContext(ctx context.Context) *GetMetricsParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get metrics params -func (o *GetMetricsParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get metrics params -func (o *GetMetricsParams) WithHTTPClient(client *http.Client) *GetMetricsParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get metrics params -func (o *GetMetricsParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetMetricsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_responses.go deleted file mode 100644 index 766782c92a..0000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_responses.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package metrics - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetMetricsReader is a Reader for the GetMetrics structure. -type GetMetricsReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetMetricsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetMetricsOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 500: - result := NewGetMetricsInternalServerError() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetMetricsOK creates a GetMetricsOK with default headers values -func NewGetMetricsOK() *GetMetricsOK { - return &GetMetricsOK{} -} - -/* -GetMetricsOK describes a response with status code 200, with default header values. - -Success -*/ -type GetMetricsOK struct { - Payload []*models.Metric -} - -// IsSuccess returns true when this get metrics o k response has a 2xx status code -func (o *GetMetricsOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get metrics o k response has a 3xx status code -func (o *GetMetricsOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get metrics o k response has a 4xx status code -func (o *GetMetricsOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get metrics o k response has a 5xx status code -func (o *GetMetricsOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get metrics o k response a status code equal to that given -func (o *GetMetricsOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetMetricsOK) Error() string { - return fmt.Sprintf("[GET /metrics/][%d] getMetricsOK %+v", 200, o.Payload) -} - -func (o *GetMetricsOK) String() string { - return fmt.Sprintf("[GET /metrics/][%d] getMetricsOK %+v", 200, o.Payload) -} - -func (o *GetMetricsOK) GetPayload() []*models.Metric { - return o.Payload -} - -func (o *GetMetricsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetMetricsInternalServerError creates a GetMetricsInternalServerError with default headers values -func NewGetMetricsInternalServerError() *GetMetricsInternalServerError { - return &GetMetricsInternalServerError{} -} - -/* -GetMetricsInternalServerError describes a response with status code 500, with default header values. - -Metrics cannot be retrieved -*/ -type GetMetricsInternalServerError struct { -} - -// IsSuccess returns true when this get metrics internal server error response has a 2xx status code -func (o *GetMetricsInternalServerError) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get metrics internal server error response has a 3xx status code -func (o *GetMetricsInternalServerError) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get metrics internal server error response has a 4xx status code -func (o *GetMetricsInternalServerError) IsClientError() bool { - return false -} - -// IsServerError returns true when this get metrics internal server error response has a 5xx status code -func (o *GetMetricsInternalServerError) IsServerError() bool { - return true -} - -// IsCode returns true when this get metrics internal server error response a status code equal to that given -func (o *GetMetricsInternalServerError) IsCode(code int) bool { - return code == 500 -} - -func (o *GetMetricsInternalServerError) Error() string { - return fmt.Sprintf("[GET /metrics/][%d] getMetricsInternalServerError ", 500) -} - -func (o *GetMetricsInternalServerError) String() string { - return fmt.Sprintf("[GET /metrics/][%d] getMetricsInternalServerError ", 500) -} - -func (o *GetMetricsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/metrics/metrics_client.go b/vendor/github.com/cilium/cilium/api/v1/client/metrics/metrics_client.go deleted file mode 100644 index d51858ef4b..0000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/metrics/metrics_client.go +++ /dev/null @@ -1,82 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package metrics - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new metrics API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for metrics API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - GetMetrics(params *GetMetricsParams, opts ...ClientOption) (*GetMetricsOK, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* -GetMetrics retrieves cilium metrics -*/ -func (a *Client) GetMetrics(params *GetMetricsParams, opts ...ClientOption) (*GetMetricsOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetMetricsParams() - } - op := &runtime.ClientOperation{ - ID: "GetMetrics", - Method: "GET", - PathPattern: "/metrics/", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetMetricsReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetMetricsOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetMetrics: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_responses.go index 10def651e0..ce1dd438ca 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -45,7 +46,7 @@ func (o *DeleteFqdnCacheReader) ReadResponse(response runtime.ClientResponse, co } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[DELETE /fqdn/cache] DeleteFqdnCache", response, response.Code()) } } @@ -87,12 +88,17 @@ func (o *DeleteFqdnCacheOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the delete fqdn cache o k response +func (o *DeleteFqdnCacheOK) Code() int { + return 200 +} + func (o *DeleteFqdnCacheOK) Error() string { - return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheOK ", 200) + return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheOK", 200) } func (o *DeleteFqdnCacheOK) String() string { - return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheOK ", 200) + return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheOK", 200) } func (o *DeleteFqdnCacheOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -139,12 +145,19 @@ func (o *DeleteFqdnCacheBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the delete fqdn cache bad request response +func (o *DeleteFqdnCacheBadRequest) Code() int { + return 400 +} + func (o *DeleteFqdnCacheBadRequest) Error() string { - return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheBadRequest %s", 400, payload) } func (o *DeleteFqdnCacheBadRequest) String() string { - return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheBadRequest %s", 400, payload) } func (o *DeleteFqdnCacheBadRequest) GetPayload() models.Error { @@ -199,12 +212,17 @@ func (o *DeleteFqdnCacheForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the delete fqdn cache forbidden response +func (o *DeleteFqdnCacheForbidden) Code() int { + return 403 +} + func (o *DeleteFqdnCacheForbidden) Error() string { - return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheForbidden ", 403) + return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheForbidden", 403) } func (o *DeleteFqdnCacheForbidden) String() string { - return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheForbidden ", 403) + return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheForbidden", 403) } func (o *DeleteFqdnCacheForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_responses.go index 538a831e3b..051d3ce208 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -57,7 +58,7 @@ func (o *DeletePolicyReader) ReadResponse(response runtime.ClientResponse, consu } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[DELETE /policy] DeletePolicy", response, response.Code()) } } @@ -100,12 +101,19 @@ func (o *DeletePolicyOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the delete policy o k response +func (o *DeletePolicyOK) Code() int { + return 200 +} + func (o *DeletePolicyOK) Error() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /policy][%d] deletePolicyOK %s", 200, payload) } func (o *DeletePolicyOK) String() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /policy][%d] deletePolicyOK %s", 200, payload) } func (o *DeletePolicyOK) GetPayload() *models.Policy { @@ -163,12 +171,19 @@ func (o *DeletePolicyInvalid) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the delete policy invalid response +func (o *DeletePolicyInvalid) Code() int { + return 400 +} + func (o *DeletePolicyInvalid) Error() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyInvalid %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /policy][%d] deletePolicyInvalid %s", 400, payload) } func (o *DeletePolicyInvalid) String() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyInvalid %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /policy][%d] deletePolicyInvalid %s", 400, payload) } func (o *DeletePolicyInvalid) GetPayload() models.Error { @@ -223,12 +238,17 @@ func (o *DeletePolicyForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the delete policy forbidden response +func (o *DeletePolicyForbidden) Code() int { + return 403 +} + func (o *DeletePolicyForbidden) Error() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyForbidden ", 403) + return fmt.Sprintf("[DELETE /policy][%d] deletePolicyForbidden", 403) } func (o *DeletePolicyForbidden) String() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyForbidden ", 403) + return fmt.Sprintf("[DELETE /policy][%d] deletePolicyForbidden", 403) } func (o *DeletePolicyForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -274,12 +294,17 @@ func (o *DeletePolicyNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the delete policy not found response +func (o *DeletePolicyNotFound) Code() int { + return 404 +} + func (o *DeletePolicyNotFound) Error() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyNotFound ", 404) + return fmt.Sprintf("[DELETE /policy][%d] deletePolicyNotFound", 404) } func (o *DeletePolicyNotFound) String() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyNotFound ", 404) + return fmt.Sprintf("[DELETE /policy][%d] deletePolicyNotFound", 404) } func (o *DeletePolicyNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -326,12 +351,19 @@ func (o *DeletePolicyFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the delete policy failure response +func (o *DeletePolicyFailure) Code() int { + return 500 +} + func (o *DeletePolicyFailure) Error() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /policy][%d] deletePolicyFailure %s", 500, payload) } func (o *DeletePolicyFailure) String() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /policy][%d] deletePolicyFailure %s", 500, payload) } func (o *DeletePolicyFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go index ea3b520e56..e4da10fcae 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go @@ -80,9 +80,11 @@ type GetFqdnCacheIDParams struct { Supported endpoint id prefixes: - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar + - cni-attachment-id: CNI attachment ID, e.g. cni-attachment-id:22222:eth0 + - container-id: Container runtime ID, e.g. container-id:22222 (deprecated, may not be unique) + - container-name: Container name, e.g. container-name:foobar (deprecated, may not be unique) + - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar (deprecated, may not be unique) + - cep-name: cep name for this container if K8s is enabled, e.g. pod-name:default:foobar-net1 - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 */ diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_responses.go index 7d42975239..9e52b3d0f1 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -45,7 +46,7 @@ func (o *GetFqdnCacheIDReader) ReadResponse(response runtime.ClientResponse, con } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /fqdn/cache/{id}] GetFqdnCacheID", response, response.Code()) } } @@ -88,12 +89,19 @@ func (o *GetFqdnCacheIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get fqdn cache Id o k response +func (o *GetFqdnCacheIDOK) Code() int { + return 200 +} + func (o *GetFqdnCacheIDOK) Error() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdOK %s", 200, payload) } func (o *GetFqdnCacheIDOK) String() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdOK %s", 200, payload) } func (o *GetFqdnCacheIDOK) GetPayload() []*models.DNSLookup { @@ -149,12 +157,19 @@ func (o *GetFqdnCacheIDBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get fqdn cache Id bad request response +func (o *GetFqdnCacheIDBadRequest) Code() int { + return 400 +} + func (o *GetFqdnCacheIDBadRequest) Error() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdBadRequest %s", 400, payload) } func (o *GetFqdnCacheIDBadRequest) String() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdBadRequest %s", 400, payload) } func (o *GetFqdnCacheIDBadRequest) GetPayload() models.Error { @@ -209,12 +224,17 @@ func (o *GetFqdnCacheIDNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get fqdn cache Id not found response +func (o *GetFqdnCacheIDNotFound) Code() int { + return 404 +} + func (o *GetFqdnCacheIDNotFound) Error() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdNotFound ", 404) + return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdNotFound", 404) } func (o *GetFqdnCacheIDNotFound) String() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdNotFound ", 404) + return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdNotFound", 404) } func (o *GetFqdnCacheIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_responses.go index 775a0fc201..c433b747b4 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -45,7 +46,7 @@ func (o *GetFqdnCacheReader) ReadResponse(response runtime.ClientResponse, consu } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /fqdn/cache] GetFqdnCache", response, response.Code()) } } @@ -88,12 +89,19 @@ func (o *GetFqdnCacheOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get fqdn cache o k response +func (o *GetFqdnCacheOK) Code() int { + return 200 +} + func (o *GetFqdnCacheOK) Error() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheOK %s", 200, payload) } func (o *GetFqdnCacheOK) String() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheOK %s", 200, payload) } func (o *GetFqdnCacheOK) GetPayload() []*models.DNSLookup { @@ -149,12 +157,19 @@ func (o *GetFqdnCacheBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get fqdn cache bad request response +func (o *GetFqdnCacheBadRequest) Code() int { + return 400 +} + func (o *GetFqdnCacheBadRequest) Error() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheBadRequest %s", 400, payload) } func (o *GetFqdnCacheBadRequest) String() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheBadRequest %s", 400, payload) } func (o *GetFqdnCacheBadRequest) GetPayload() models.Error { @@ -209,12 +224,17 @@ func (o *GetFqdnCacheNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get fqdn cache not found response +func (o *GetFqdnCacheNotFound) Code() int { + return 404 +} + func (o *GetFqdnCacheNotFound) Error() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheNotFound ", 404) + return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheNotFound", 404) } func (o *GetFqdnCacheNotFound) String() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheNotFound ", 404) + return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheNotFound", 404) } func (o *GetFqdnCacheNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_responses.go index b2b160db66..c678c5078e 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -39,7 +40,7 @@ func (o *GetFqdnNamesReader) ReadResponse(response runtime.ClientResponse, consu } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /fqdn/names] GetFqdnNames", response, response.Code()) } } @@ -82,12 +83,19 @@ func (o *GetFqdnNamesOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get fqdn names o k response +func (o *GetFqdnNamesOK) Code() int { + return 200 +} + func (o *GetFqdnNamesOK) Error() string { - return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesOK %s", 200, payload) } func (o *GetFqdnNamesOK) String() string { - return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesOK %s", 200, payload) } func (o *GetFqdnNamesOK) GetPayload() *models.NameManager { @@ -145,12 +153,19 @@ func (o *GetFqdnNamesBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get fqdn names bad request response +func (o *GetFqdnNamesBadRequest) Code() int { + return 400 +} + func (o *GetFqdnNamesBadRequest) Error() string { - return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesBadRequest %s", 400, payload) } func (o *GetFqdnNamesBadRequest) String() string { - return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesBadRequest %s", 400, payload) } func (o *GetFqdnNamesBadRequest) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_responses.go index 0608766c8d..0583dae40b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -39,7 +40,7 @@ func (o *GetIdentityEndpointsReader) ReadResponse(response runtime.ClientRespons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /identity/endpoints] GetIdentityEndpoints", response, response.Code()) } } @@ -82,12 +83,19 @@ func (o *GetIdentityEndpointsOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get identity endpoints o k response +func (o *GetIdentityEndpointsOK) Code() int { + return 200 +} + func (o *GetIdentityEndpointsOK) Error() string { - return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsOK %s", 200, payload) } func (o *GetIdentityEndpointsOK) String() string { - return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsOK %s", 200, payload) } func (o *GetIdentityEndpointsOK) GetPayload() []*models.IdentityEndpoints { @@ -142,12 +150,17 @@ func (o *GetIdentityEndpointsNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get identity endpoints not found response +func (o *GetIdentityEndpointsNotFound) Code() int { + return 404 +} + func (o *GetIdentityEndpointsNotFound) Error() string { - return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsNotFound ", 404) + return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsNotFound", 404) } func (o *GetIdentityEndpointsNotFound) String() string { - return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsNotFound ", 404) + return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsNotFound", 404) } func (o *GetIdentityEndpointsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_responses.go index c6d55d0cac..d8cb1758e8 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -57,7 +58,7 @@ func (o *GetIdentityIDReader) ReadResponse(response runtime.ClientResponse, cons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /identity/{id}] GetIdentityID", response, response.Code()) } } @@ -100,12 +101,19 @@ func (o *GetIdentityIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get identity Id o k response +func (o *GetIdentityIDOK) Code() int { + return 200 +} + func (o *GetIdentityIDOK) Error() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdOK %s", 200, payload) } func (o *GetIdentityIDOK) String() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdOK %s", 200, payload) } func (o *GetIdentityIDOK) GetPayload() *models.Identity { @@ -162,12 +170,17 @@ func (o *GetIdentityIDBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get identity Id bad request response +func (o *GetIdentityIDBadRequest) Code() int { + return 400 +} + func (o *GetIdentityIDBadRequest) Error() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdBadRequest ", 400) + return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdBadRequest", 400) } func (o *GetIdentityIDBadRequest) String() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdBadRequest ", 400) + return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdBadRequest", 400) } func (o *GetIdentityIDBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -213,12 +226,17 @@ func (o *GetIdentityIDNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get identity Id not found response +func (o *GetIdentityIDNotFound) Code() int { + return 404 +} + func (o *GetIdentityIDNotFound) Error() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdNotFound ", 404) + return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdNotFound", 404) } func (o *GetIdentityIDNotFound) String() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdNotFound ", 404) + return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdNotFound", 404) } func (o *GetIdentityIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -265,12 +283,19 @@ func (o *GetIdentityIDUnreachable) IsCode(code int) bool { return code == 520 } +// Code gets the status code for the get identity Id unreachable response +func (o *GetIdentityIDUnreachable) Code() int { + return 520 +} + func (o *GetIdentityIDUnreachable) Error() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdUnreachable %+v", 520, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdUnreachable %s", 520, payload) } func (o *GetIdentityIDUnreachable) String() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdUnreachable %+v", 520, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdUnreachable %s", 520, payload) } func (o *GetIdentityIDUnreachable) GetPayload() models.Error { @@ -326,12 +351,19 @@ func (o *GetIdentityIDInvalidStorageFormat) IsCode(code int) bool { return code == 521 } +// Code gets the status code for the get identity Id invalid storage format response +func (o *GetIdentityIDInvalidStorageFormat) Code() int { + return 521 +} + func (o *GetIdentityIDInvalidStorageFormat) Error() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdInvalidStorageFormat %+v", 521, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdInvalidStorageFormat %s", 521, payload) } func (o *GetIdentityIDInvalidStorageFormat) String() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdInvalidStorageFormat %+v", 521, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdInvalidStorageFormat %s", 521, payload) } func (o *GetIdentityIDInvalidStorageFormat) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_responses.go index 64ea45296d..fd8c9ff196 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -51,7 +52,7 @@ func (o *GetIdentityReader) ReadResponse(response runtime.ClientResponse, consum } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /identity] GetIdentity", response, response.Code()) } } @@ -94,12 +95,19 @@ func (o *GetIdentityOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get identity o k response +func (o *GetIdentityOK) Code() int { + return 200 +} + func (o *GetIdentityOK) Error() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity][%d] getIdentityOK %s", 200, payload) } func (o *GetIdentityOK) String() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity][%d] getIdentityOK %s", 200, payload) } func (o *GetIdentityOK) GetPayload() []*models.Identity { @@ -154,12 +162,17 @@ func (o *GetIdentityNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get identity not found response +func (o *GetIdentityNotFound) Code() int { + return 404 +} + func (o *GetIdentityNotFound) Error() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityNotFound ", 404) + return fmt.Sprintf("[GET /identity][%d] getIdentityNotFound", 404) } func (o *GetIdentityNotFound) String() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityNotFound ", 404) + return fmt.Sprintf("[GET /identity][%d] getIdentityNotFound", 404) } func (o *GetIdentityNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -206,12 +219,19 @@ func (o *GetIdentityUnreachable) IsCode(code int) bool { return code == 520 } +// Code gets the status code for the get identity unreachable response +func (o *GetIdentityUnreachable) Code() int { + return 520 +} + func (o *GetIdentityUnreachable) Error() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityUnreachable %+v", 520, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity][%d] getIdentityUnreachable %s", 520, payload) } func (o *GetIdentityUnreachable) String() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityUnreachable %+v", 520, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity][%d] getIdentityUnreachable %s", 520, payload) } func (o *GetIdentityUnreachable) GetPayload() models.Error { @@ -267,12 +287,19 @@ func (o *GetIdentityInvalidStorageFormat) IsCode(code int) bool { return code == 521 } +// Code gets the status code for the get identity invalid storage format response +func (o *GetIdentityInvalidStorageFormat) Code() int { + return 521 +} + func (o *GetIdentityInvalidStorageFormat) Error() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityInvalidStorageFormat %+v", 521, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity][%d] getIdentityInvalidStorageFormat %s", 521, payload) } func (o *GetIdentityInvalidStorageFormat) String() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityInvalidStorageFormat %+v", 521, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /identity][%d] getIdentityInvalidStorageFormat %s", 521, payload) } func (o *GetIdentityInvalidStorageFormat) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_parameters.go index 2f7c23112a..c692c1972b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_parameters.go @@ -17,6 +17,8 @@ import ( "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" + + "github.com/cilium/cilium/api/v1/models" ) // NewGetIPParams creates a new GetIPParams object, @@ -70,6 +72,13 @@ type GetIPParams struct { */ Cidr *string + /* Labels. + + List of labels + + */ + Labels models.Labels + timeout time.Duration Context context.Context HTTPClient *http.Client @@ -134,6 +143,17 @@ func (o *GetIPParams) SetCidr(cidr *string) { o.Cidr = cidr } +// WithLabels adds the labels to the get IP params +func (o *GetIPParams) WithLabels(labels models.Labels) *GetIPParams { + o.SetLabels(labels) + return o +} + +// SetLabels adds the labels to the get IP params +func (o *GetIPParams) SetLabels(labels models.Labels) { + o.Labels = labels +} + // WriteToRequest writes these params to a swagger request func (o *GetIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { @@ -158,6 +178,11 @@ func (o *GetIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registr } } } + if o.Labels != nil { + if err := r.SetBodyParam(o.Labels); err != nil { + return err + } + } if len(res) > 0 { return errors.CompositeValidationError(res...) diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_responses.go index 37db9ccd25..ff37f37cda 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -45,7 +46,7 @@ func (o *GetIPReader) ReadResponse(response runtime.ClientResponse, consumer run } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /ip] GetIP", response, response.Code()) } } @@ -88,12 +89,19 @@ func (o *GetIPOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get Ip o k response +func (o *GetIPOK) Code() int { + return 200 +} + func (o *GetIPOK) Error() string { - return fmt.Sprintf("[GET /ip][%d] getIpOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /ip][%d] getIpOK %s", 200, payload) } func (o *GetIPOK) String() string { - return fmt.Sprintf("[GET /ip][%d] getIpOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /ip][%d] getIpOK %s", 200, payload) } func (o *GetIPOK) GetPayload() []*models.IPListEntry { @@ -149,12 +157,19 @@ func (o *GetIPBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get Ip bad request response +func (o *GetIPBadRequest) Code() int { + return 400 +} + func (o *GetIPBadRequest) Error() string { - return fmt.Sprintf("[GET /ip][%d] getIpBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /ip][%d] getIpBadRequest %s", 400, payload) } func (o *GetIPBadRequest) String() string { - return fmt.Sprintf("[GET /ip][%d] getIpBadRequest %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /ip][%d] getIpBadRequest %s", 400, payload) } func (o *GetIPBadRequest) GetPayload() models.Error { @@ -209,12 +224,17 @@ func (o *GetIPNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get Ip not found response +func (o *GetIPNotFound) Code() int { + return 404 +} + func (o *GetIPNotFound) Error() string { - return fmt.Sprintf("[GET /ip][%d] getIpNotFound ", 404) + return fmt.Sprintf("[GET /ip][%d] getIpNotFound", 404) } func (o *GetIPNotFound) String() string { - return fmt.Sprintf("[GET /ip][%d] getIpNotFound ", 404) + return fmt.Sprintf("[GET /ip][%d] getIpNotFound", 404) } func (o *GetIPNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_responses.go index cba45439de..6717729837 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -39,7 +40,7 @@ func (o *GetPolicyReader) ReadResponse(response runtime.ClientResponse, consumer } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /policy] GetPolicy", response, response.Code()) } } @@ -82,12 +83,19 @@ func (o *GetPolicyOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get policy o k response +func (o *GetPolicyOK) Code() int { + return 200 +} + func (o *GetPolicyOK) Error() string { - return fmt.Sprintf("[GET /policy][%d] getPolicyOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /policy][%d] getPolicyOK %s", 200, payload) } func (o *GetPolicyOK) String() string { - return fmt.Sprintf("[GET /policy][%d] getPolicyOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /policy][%d] getPolicyOK %s", 200, payload) } func (o *GetPolicyOK) GetPayload() *models.Policy { @@ -144,12 +152,17 @@ func (o *GetPolicyNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get policy not found response +func (o *GetPolicyNotFound) Code() int { + return 404 +} + func (o *GetPolicyNotFound) Error() string { - return fmt.Sprintf("[GET /policy][%d] getPolicyNotFound ", 404) + return fmt.Sprintf("[GET /policy][%d] getPolicyNotFound", 404) } func (o *GetPolicyNotFound) String() string { - return fmt.Sprintf("[GET /policy][%d] getPolicyNotFound ", 404) + return fmt.Sprintf("[GET /policy][%d] getPolicyNotFound", 404) } func (o *GetPolicyNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_responses.go index a1cbec4a32..352f8431d1 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetPolicySelectorsReader) ReadResponse(response runtime.ClientResponse, } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /policy/selectors] GetPolicySelectors", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetPolicySelectorsOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get policy selectors o k response +func (o *GetPolicySelectorsOK) Code() int { + return 200 +} + func (o *GetPolicySelectorsOK) Error() string { - return fmt.Sprintf("[GET /policy/selectors][%d] getPolicySelectorsOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /policy/selectors][%d] getPolicySelectorsOK %s", 200, payload) } func (o *GetPolicySelectorsOK) String() string { - return fmt.Sprintf("[GET /policy/selectors][%d] getPolicySelectorsOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /policy/selectors][%d] getPolicySelectorsOK %s", 200, payload) } func (o *GetPolicySelectorsOK) GetPayload() models.SelectorCache { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/policy_client.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/policy_client.go index c729270345..dcf03aaf60 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/policy_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/policy_client.go @@ -12,6 +12,7 @@ import ( "fmt" "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -20,6 +21,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new policy API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new policy API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for policy API */ @@ -28,7 +54,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go index 769f669d83..ef09ec7960 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go @@ -17,6 +17,7 @@ import ( "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" ) // NewPutPolicyParams creates a new PutPolicyParams object, @@ -70,6 +71,18 @@ type PutPolicyParams struct { */ Policy string + /* Replace. + + If true, indicates that existing rules with identical labels should be replaced. + */ + Replace *bool + + /* ReplaceWithLabels. + + If present, indicates that existing rules with the given labels should be deleted. + */ + ReplaceWithLabels []string + timeout time.Duration Context context.Context HTTPClient *http.Client @@ -134,6 +147,28 @@ func (o *PutPolicyParams) SetPolicy(policy string) { o.Policy = policy } +// WithReplace adds the replace to the put policy params +func (o *PutPolicyParams) WithReplace(replace *bool) *PutPolicyParams { + o.SetReplace(replace) + return o +} + +// SetReplace adds the replace to the put policy params +func (o *PutPolicyParams) SetReplace(replace *bool) { + o.Replace = replace +} + +// WithReplaceWithLabels adds the replaceWithLabels to the put policy params +func (o *PutPolicyParams) WithReplaceWithLabels(replaceWithLabels []string) *PutPolicyParams { + o.SetReplaceWithLabels(replaceWithLabels) + return o +} + +// SetReplaceWithLabels adds the replaceWithLabels to the put policy params +func (o *PutPolicyParams) SetReplaceWithLabels(replaceWithLabels []string) { + o.ReplaceWithLabels = replaceWithLabels +} + // WriteToRequest writes these params to a swagger request func (o *PutPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { @@ -145,8 +180,53 @@ func (o *PutPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Reg return err } + if o.Replace != nil { + + // query param replace + var qrReplace bool + + if o.Replace != nil { + qrReplace = *o.Replace + } + qReplace := swag.FormatBool(qrReplace) + if qReplace != "" { + + if err := r.SetQueryParam("replace", qReplace); err != nil { + return err + } + } + } + + if o.ReplaceWithLabels != nil { + + // binding items for replace-with-labels + joinedReplaceWithLabels := o.bindParamReplaceWithLabels(reg) + + // query array param replace-with-labels + if err := r.SetQueryParam("replace-with-labels", joinedReplaceWithLabels...); err != nil { + return err + } + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } + +// bindParamPutPolicy binds the parameter replace-with-labels +func (o *PutPolicyParams) bindParamReplaceWithLabels(formats strfmt.Registry) []string { + replaceWithLabelsIR := o.ReplaceWithLabels + + var replaceWithLabelsIC []string + for _, replaceWithLabelsIIR := range replaceWithLabelsIR { // explode []string + + replaceWithLabelsIIV := replaceWithLabelsIIR // string as string + replaceWithLabelsIC = append(replaceWithLabelsIC, replaceWithLabelsIIV) + } + + // items.CollectionFormat: "" + replaceWithLabelsIS := swag.JoinByFormat(replaceWithLabelsIC, "") + + return replaceWithLabelsIS +} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_responses.go index f1104c418a..9a57d65f78 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_responses.go @@ -9,6 +9,7 @@ package policy // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -57,7 +58,7 @@ func (o *PutPolicyReader) ReadResponse(response runtime.ClientResponse, consumer } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PUT /policy] PutPolicy", response, response.Code()) } } @@ -100,12 +101,19 @@ func (o *PutPolicyOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the put policy o k response +func (o *PutPolicyOK) Code() int { + return 200 +} + func (o *PutPolicyOK) Error() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /policy][%d] putPolicyOK %s", 200, payload) } func (o *PutPolicyOK) String() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /policy][%d] putPolicyOK %s", 200, payload) } func (o *PutPolicyOK) GetPayload() *models.Policy { @@ -163,12 +171,19 @@ func (o *PutPolicyInvalidPolicy) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the put policy invalid policy response +func (o *PutPolicyInvalidPolicy) Code() int { + return 400 +} + func (o *PutPolicyInvalidPolicy) Error() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPolicy %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPolicy %s", 400, payload) } func (o *PutPolicyInvalidPolicy) String() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPolicy %+v", 400, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPolicy %s", 400, payload) } func (o *PutPolicyInvalidPolicy) GetPayload() models.Error { @@ -223,12 +238,17 @@ func (o *PutPolicyForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the put policy forbidden response +func (o *PutPolicyForbidden) Code() int { + return 403 +} + func (o *PutPolicyForbidden) Error() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyForbidden ", 403) + return fmt.Sprintf("[PUT /policy][%d] putPolicyForbidden", 403) } func (o *PutPolicyForbidden) String() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyForbidden ", 403) + return fmt.Sprintf("[PUT /policy][%d] putPolicyForbidden", 403) } func (o *PutPolicyForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -275,12 +295,19 @@ func (o *PutPolicyInvalidPath) IsCode(code int) bool { return code == 460 } +// Code gets the status code for the put policy invalid path response +func (o *PutPolicyInvalidPath) Code() int { + return 460 +} + func (o *PutPolicyInvalidPath) Error() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPath %+v", 460, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPath %s", 460, payload) } func (o *PutPolicyInvalidPath) String() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPath %+v", 460, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPath %s", 460, payload) } func (o *PutPolicyInvalidPath) GetPayload() models.Error { @@ -336,12 +363,19 @@ func (o *PutPolicyFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the put policy failure response +func (o *PutPolicyFailure) Code() int { + return 500 +} + func (o *PutPolicyFailure) Error() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /policy][%d] putPolicyFailure %s", 500, payload) } func (o *PutPolicyFailure) String() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /policy][%d] putPolicyFailure %s", 500, payload) } func (o *PutPolicyFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_responses.go index 0d5aea637d..24ce4c27f3 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_responses.go @@ -9,6 +9,7 @@ package prefilter // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -51,7 +52,7 @@ func (o *DeletePrefilterReader) ReadResponse(response runtime.ClientResponse, co } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[DELETE /prefilter] DeletePrefilter", response, response.Code()) } } @@ -94,12 +95,19 @@ func (o *DeletePrefilterOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the delete prefilter o k response +func (o *DeletePrefilterOK) Code() int { + return 200 +} + func (o *DeletePrefilterOK) Error() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterOK %s", 200, payload) } func (o *DeletePrefilterOK) String() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterOK %s", 200, payload) } func (o *DeletePrefilterOK) GetPayload() *models.Prefilter { @@ -156,12 +164,17 @@ func (o *DeletePrefilterForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the delete prefilter forbidden response +func (o *DeletePrefilterForbidden) Code() int { + return 403 +} + func (o *DeletePrefilterForbidden) Error() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterForbidden ", 403) + return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterForbidden", 403) } func (o *DeletePrefilterForbidden) String() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterForbidden ", 403) + return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterForbidden", 403) } func (o *DeletePrefilterForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -208,12 +221,19 @@ func (o *DeletePrefilterInvalidCIDR) IsCode(code int) bool { return code == 461 } +// Code gets the status code for the delete prefilter invalid c Id r response +func (o *DeletePrefilterInvalidCIDR) Code() int { + return 461 +} + func (o *DeletePrefilterInvalidCIDR) Error() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterInvalidCIdR %+v", 461, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterInvalidCIdR %s", 461, payload) } func (o *DeletePrefilterInvalidCIDR) String() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterInvalidCIdR %+v", 461, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterInvalidCIdR %s", 461, payload) } func (o *DeletePrefilterInvalidCIDR) GetPayload() models.Error { @@ -269,12 +289,19 @@ func (o *DeletePrefilterFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the delete prefilter failure response +func (o *DeletePrefilterFailure) Code() int { + return 500 +} + func (o *DeletePrefilterFailure) Error() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterFailure %s", 500, payload) } func (o *DeletePrefilterFailure) String() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterFailure %s", 500, payload) } func (o *DeletePrefilterFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_responses.go index 63e051e3ac..dea7395df9 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_responses.go @@ -9,6 +9,7 @@ package prefilter // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -39,7 +40,7 @@ func (o *GetPrefilterReader) ReadResponse(response runtime.ClientResponse, consu } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /prefilter] GetPrefilter", response, response.Code()) } } @@ -82,12 +83,19 @@ func (o *GetPrefilterOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get prefilter o k response +func (o *GetPrefilterOK) Code() int { + return 200 +} + func (o *GetPrefilterOK) Error() string { - return fmt.Sprintf("[GET /prefilter][%d] getPrefilterOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /prefilter][%d] getPrefilterOK %s", 200, payload) } func (o *GetPrefilterOK) String() string { - return fmt.Sprintf("[GET /prefilter][%d] getPrefilterOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /prefilter][%d] getPrefilterOK %s", 200, payload) } func (o *GetPrefilterOK) GetPayload() *models.Prefilter { @@ -145,12 +153,19 @@ func (o *GetPrefilterFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get prefilter failure response +func (o *GetPrefilterFailure) Code() int { + return 500 +} + func (o *GetPrefilterFailure) Error() string { - return fmt.Sprintf("[GET /prefilter][%d] getPrefilterFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /prefilter][%d] getPrefilterFailure %s", 500, payload) } func (o *GetPrefilterFailure) String() string { - return fmt.Sprintf("[GET /prefilter][%d] getPrefilterFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /prefilter][%d] getPrefilterFailure %s", 500, payload) } func (o *GetPrefilterFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_responses.go index 56db490a65..33f0730028 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_responses.go @@ -9,6 +9,7 @@ package prefilter // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -51,7 +52,7 @@ func (o *PatchPrefilterReader) ReadResponse(response runtime.ClientResponse, con } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PATCH /prefilter] PatchPrefilter", response, response.Code()) } } @@ -94,12 +95,19 @@ func (o *PatchPrefilterOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the patch prefilter o k response +func (o *PatchPrefilterOK) Code() int { + return 200 +} + func (o *PatchPrefilterOK) Error() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterOK %s", 200, payload) } func (o *PatchPrefilterOK) String() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterOK %s", 200, payload) } func (o *PatchPrefilterOK) GetPayload() *models.Prefilter { @@ -156,12 +164,17 @@ func (o *PatchPrefilterForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the patch prefilter forbidden response +func (o *PatchPrefilterForbidden) Code() int { + return 403 +} + func (o *PatchPrefilterForbidden) Error() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterForbidden ", 403) + return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterForbidden", 403) } func (o *PatchPrefilterForbidden) String() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterForbidden ", 403) + return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterForbidden", 403) } func (o *PatchPrefilterForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -208,12 +221,19 @@ func (o *PatchPrefilterInvalidCIDR) IsCode(code int) bool { return code == 461 } +// Code gets the status code for the patch prefilter invalid c Id r response +func (o *PatchPrefilterInvalidCIDR) Code() int { + return 461 +} + func (o *PatchPrefilterInvalidCIDR) Error() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterInvalidCIdR %+v", 461, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterInvalidCIdR %s", 461, payload) } func (o *PatchPrefilterInvalidCIDR) String() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterInvalidCIdR %+v", 461, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterInvalidCIdR %s", 461, payload) } func (o *PatchPrefilterInvalidCIDR) GetPayload() models.Error { @@ -269,12 +289,19 @@ func (o *PatchPrefilterFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the patch prefilter failure response +func (o *PatchPrefilterFailure) Code() int { + return 500 +} + func (o *PatchPrefilterFailure) Error() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterFailure %s", 500, payload) } func (o *PatchPrefilterFailure) String() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterFailure %s", 500, payload) } func (o *PatchPrefilterFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/prefilter_client.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/prefilter_client.go index c577a739a3..60ad391f50 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/prefilter_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/prefilter_client.go @@ -12,6 +12,7 @@ import ( "fmt" "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -20,6 +21,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new prefilter API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new prefilter API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for prefilter API */ @@ -28,7 +54,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_responses.go index cf7ab77a23..311eb24e9e 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_responses.go @@ -9,6 +9,7 @@ package recorder // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -51,7 +52,7 @@ func (o *DeleteRecorderIDReader) ReadResponse(response runtime.ClientResponse, c } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[DELETE /recorder/{id}] DeleteRecorderID", response, response.Code()) } } @@ -93,12 +94,17 @@ func (o *DeleteRecorderIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the delete recorder Id o k response +func (o *DeleteRecorderIDOK) Code() int { + return 200 +} + func (o *DeleteRecorderIDOK) Error() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdOK ", 200) + return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdOK", 200) } func (o *DeleteRecorderIDOK) String() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdOK ", 200) + return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdOK", 200) } func (o *DeleteRecorderIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -144,12 +150,17 @@ func (o *DeleteRecorderIDForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the delete recorder Id forbidden response +func (o *DeleteRecorderIDForbidden) Code() int { + return 403 +} + func (o *DeleteRecorderIDForbidden) Error() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdForbidden ", 403) + return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdForbidden", 403) } func (o *DeleteRecorderIDForbidden) String() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdForbidden ", 403) + return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdForbidden", 403) } func (o *DeleteRecorderIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -195,12 +206,17 @@ func (o *DeleteRecorderIDNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the delete recorder Id not found response +func (o *DeleteRecorderIDNotFound) Code() int { + return 404 +} + func (o *DeleteRecorderIDNotFound) Error() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdNotFound ", 404) + return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdNotFound", 404) } func (o *DeleteRecorderIDNotFound) String() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdNotFound ", 404) + return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdNotFound", 404) } func (o *DeleteRecorderIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -247,12 +263,19 @@ func (o *DeleteRecorderIDFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the delete recorder Id failure response +func (o *DeleteRecorderIDFailure) Code() int { + return 500 +} + func (o *DeleteRecorderIDFailure) Error() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdFailure %s", 500, payload) } func (o *DeleteRecorderIDFailure) String() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdFailure %s", 500, payload) } func (o *DeleteRecorderIDFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_responses.go index 07f4e60cdc..89996e14ad 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_responses.go @@ -9,6 +9,7 @@ package recorder // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -39,7 +40,7 @@ func (o *GetRecorderIDReader) ReadResponse(response runtime.ClientResponse, cons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /recorder/{id}] GetRecorderID", response, response.Code()) } } @@ -82,12 +83,19 @@ func (o *GetRecorderIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get recorder Id o k response +func (o *GetRecorderIDOK) Code() int { + return 200 +} + func (o *GetRecorderIDOK) Error() string { - return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdOK %s", 200, payload) } func (o *GetRecorderIDOK) String() string { - return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdOK %s", 200, payload) } func (o *GetRecorderIDOK) GetPayload() *models.Recorder { @@ -144,12 +152,17 @@ func (o *GetRecorderIDNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get recorder Id not found response +func (o *GetRecorderIDNotFound) Code() int { + return 404 +} + func (o *GetRecorderIDNotFound) Error() string { - return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdNotFound ", 404) + return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdNotFound", 404) } func (o *GetRecorderIDNotFound) String() string { - return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdNotFound ", 404) + return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdNotFound", 404) } func (o *GetRecorderIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_responses.go index 06b8555968..6e2d671c37 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_responses.go @@ -9,6 +9,7 @@ package recorder // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetRecorderMasksReader) ReadResponse(response runtime.ClientResponse, c } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /recorder/masks] GetRecorderMasks", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetRecorderMasksOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get recorder masks o k response +func (o *GetRecorderMasksOK) Code() int { + return 200 +} + func (o *GetRecorderMasksOK) Error() string { - return fmt.Sprintf("[GET /recorder/masks][%d] getRecorderMasksOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /recorder/masks][%d] getRecorderMasksOK %s", 200, payload) } func (o *GetRecorderMasksOK) String() string { - return fmt.Sprintf("[GET /recorder/masks][%d] getRecorderMasksOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /recorder/masks][%d] getRecorderMasksOK %s", 200, payload) } func (o *GetRecorderMasksOK) GetPayload() []*models.RecorderMask { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_responses.go index f45762c308..304fe1052f 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_responses.go @@ -9,6 +9,7 @@ package recorder // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetRecorderReader) ReadResponse(response runtime.ClientResponse, consum } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /recorder] GetRecorder", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetRecorderOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get recorder o k response +func (o *GetRecorderOK) Code() int { + return 200 +} + func (o *GetRecorderOK) Error() string { - return fmt.Sprintf("[GET /recorder][%d] getRecorderOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /recorder][%d] getRecorderOK %s", 200, payload) } func (o *GetRecorderOK) String() string { - return fmt.Sprintf("[GET /recorder][%d] getRecorderOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /recorder][%d] getRecorderOK %s", 200, payload) } func (o *GetRecorderOK) GetPayload() []*models.Recorder { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_responses.go index 996c87026d..32e6e13027 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_responses.go @@ -9,6 +9,7 @@ package recorder // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -51,7 +52,7 @@ func (o *PutRecorderIDReader) ReadResponse(response runtime.ClientResponse, cons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PUT /recorder/{id}] PutRecorderID", response, response.Code()) } } @@ -93,12 +94,17 @@ func (o *PutRecorderIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the put recorder Id o k response +func (o *PutRecorderIDOK) Code() int { + return 200 +} + func (o *PutRecorderIDOK) Error() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdOK ", 200) + return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdOK", 200) } func (o *PutRecorderIDOK) String() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdOK ", 200) + return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdOK", 200) } func (o *PutRecorderIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -144,12 +150,17 @@ func (o *PutRecorderIDCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the put recorder Id created response +func (o *PutRecorderIDCreated) Code() int { + return 201 +} + func (o *PutRecorderIDCreated) Error() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdCreated ", 201) + return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdCreated", 201) } func (o *PutRecorderIDCreated) String() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdCreated ", 201) + return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdCreated", 201) } func (o *PutRecorderIDCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -195,12 +206,17 @@ func (o *PutRecorderIDForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the put recorder Id forbidden response +func (o *PutRecorderIDForbidden) Code() int { + return 403 +} + func (o *PutRecorderIDForbidden) Error() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdForbidden ", 403) + return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdForbidden", 403) } func (o *PutRecorderIDForbidden) String() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdForbidden ", 403) + return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdForbidden", 403) } func (o *PutRecorderIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -247,12 +263,19 @@ func (o *PutRecorderIDFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the put recorder Id failure response +func (o *PutRecorderIDFailure) Code() int { + return 500 +} + func (o *PutRecorderIDFailure) Error() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdFailure %s", 500, payload) } func (o *PutRecorderIDFailure) String() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdFailure %s", 500, payload) } func (o *PutRecorderIDFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/recorder_client.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/recorder_client.go index 883e304be3..2f8af493ae 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/recorder_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/recorder/recorder_client.go @@ -12,6 +12,7 @@ import ( "fmt" "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -20,6 +21,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new recorder API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new recorder API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for recorder API */ @@ -28,7 +54,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_responses.go index 21926d9c47..b750a9ed90 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_responses.go @@ -9,6 +9,7 @@ package service // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -51,7 +52,7 @@ func (o *DeleteServiceIDReader) ReadResponse(response runtime.ClientResponse, co } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[DELETE /service/{id}] DeleteServiceID", response, response.Code()) } } @@ -93,12 +94,17 @@ func (o *DeleteServiceIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the delete service Id o k response +func (o *DeleteServiceIDOK) Code() int { + return 200 +} + func (o *DeleteServiceIDOK) Error() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdOK ", 200) + return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdOK", 200) } func (o *DeleteServiceIDOK) String() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdOK ", 200) + return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdOK", 200) } func (o *DeleteServiceIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -144,12 +150,17 @@ func (o *DeleteServiceIDForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the delete service Id forbidden response +func (o *DeleteServiceIDForbidden) Code() int { + return 403 +} + func (o *DeleteServiceIDForbidden) Error() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdForbidden ", 403) + return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdForbidden", 403) } func (o *DeleteServiceIDForbidden) String() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdForbidden ", 403) + return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdForbidden", 403) } func (o *DeleteServiceIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -195,12 +206,17 @@ func (o *DeleteServiceIDNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the delete service Id not found response +func (o *DeleteServiceIDNotFound) Code() int { + return 404 +} + func (o *DeleteServiceIDNotFound) Error() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdNotFound ", 404) + return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdNotFound", 404) } func (o *DeleteServiceIDNotFound) String() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdNotFound ", 404) + return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdNotFound", 404) } func (o *DeleteServiceIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -247,12 +263,19 @@ func (o *DeleteServiceIDFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the delete service Id failure response +func (o *DeleteServiceIDFailure) Code() int { + return 500 +} + func (o *DeleteServiceIDFailure) Error() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdFailure %s", 500, payload) } func (o *DeleteServiceIDFailure) String() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdFailure %s", 500, payload) } func (o *DeleteServiceIDFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_responses.go index ac5974ce51..91d375d081 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_responses.go @@ -9,6 +9,7 @@ package service // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetLrpReader) ReadResponse(response runtime.ClientResponse, consumer ru } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /lrp] GetLrp", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetLrpOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get lrp o k response +func (o *GetLrpOK) Code() int { + return 200 +} + func (o *GetLrpOK) Error() string { - return fmt.Sprintf("[GET /lrp][%d] getLrpOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /lrp][%d] getLrpOK %s", 200, payload) } func (o *GetLrpOK) String() string { - return fmt.Sprintf("[GET /lrp][%d] getLrpOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /lrp][%d] getLrpOK %s", 200, payload) } func (o *GetLrpOK) GetPayload() []*models.LRPSpec { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_responses.go index 4b198ced9d..784edf0e96 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_responses.go @@ -9,6 +9,7 @@ package service // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -39,7 +40,7 @@ func (o *GetServiceIDReader) ReadResponse(response runtime.ClientResponse, consu } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /service/{id}] GetServiceID", response, response.Code()) } } @@ -82,12 +83,19 @@ func (o *GetServiceIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get service Id o k response +func (o *GetServiceIDOK) Code() int { + return 200 +} + func (o *GetServiceIDOK) Error() string { - return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdOK %s", 200, payload) } func (o *GetServiceIDOK) String() string { - return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdOK %s", 200, payload) } func (o *GetServiceIDOK) GetPayload() *models.Service { @@ -144,12 +152,17 @@ func (o *GetServiceIDNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get service Id not found response +func (o *GetServiceIDNotFound) Code() int { + return 404 +} + func (o *GetServiceIDNotFound) Error() string { - return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdNotFound ", 404) + return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdNotFound", 404) } func (o *GetServiceIDNotFound) String() string { - return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdNotFound ", 404) + return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdNotFound", 404) } func (o *GetServiceIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_responses.go index dbf75ba847..18d8d0b46c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_responses.go @@ -9,6 +9,7 @@ package service // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetServiceReader) ReadResponse(response runtime.ClientResponse, consume } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /service] GetService", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetServiceOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get service o k response +func (o *GetServiceOK) Code() int { + return 200 +} + func (o *GetServiceOK) Error() string { - return fmt.Sprintf("[GET /service][%d] getServiceOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /service][%d] getServiceOK %s", 200, payload) } func (o *GetServiceOK) String() string { - return fmt.Sprintf("[GET /service][%d] getServiceOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /service][%d] getServiceOK %s", 200, payload) } func (o *GetServiceOK) GetPayload() []*models.Service { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_responses.go index af85fa32a6..48e42095f8 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_responses.go @@ -9,6 +9,7 @@ package service // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -69,7 +70,7 @@ func (o *PutServiceIDReader) ReadResponse(response runtime.ClientResponse, consu } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PUT /service/{id}] PutServiceID", response, response.Code()) } } @@ -111,12 +112,17 @@ func (o *PutServiceIDOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the put service Id o k response +func (o *PutServiceIDOK) Code() int { + return 200 +} + func (o *PutServiceIDOK) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdOK ", 200) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdOK", 200) } func (o *PutServiceIDOK) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdOK ", 200) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdOK", 200) } func (o *PutServiceIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -162,12 +168,17 @@ func (o *PutServiceIDCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the put service Id created response +func (o *PutServiceIDCreated) Code() int { + return 201 +} + func (o *PutServiceIDCreated) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdCreated ", 201) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdCreated", 201) } func (o *PutServiceIDCreated) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdCreated ", 201) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdCreated", 201) } func (o *PutServiceIDCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -213,12 +224,17 @@ func (o *PutServiceIDForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the put service Id forbidden response +func (o *PutServiceIDForbidden) Code() int { + return 403 +} + func (o *PutServiceIDForbidden) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdForbidden ", 403) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdForbidden", 403) } func (o *PutServiceIDForbidden) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdForbidden ", 403) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdForbidden", 403) } func (o *PutServiceIDForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -265,12 +281,19 @@ func (o *PutServiceIDInvalidFrontend) IsCode(code int) bool { return code == 460 } +// Code gets the status code for the put service Id invalid frontend response +func (o *PutServiceIDInvalidFrontend) Code() int { + return 460 +} + func (o *PutServiceIDInvalidFrontend) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidFrontend %+v", 460, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidFrontend %s", 460, payload) } func (o *PutServiceIDInvalidFrontend) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidFrontend %+v", 460, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidFrontend %s", 460, payload) } func (o *PutServiceIDInvalidFrontend) GetPayload() models.Error { @@ -326,12 +349,19 @@ func (o *PutServiceIDInvalidBackend) IsCode(code int) bool { return code == 461 } +// Code gets the status code for the put service Id invalid backend response +func (o *PutServiceIDInvalidBackend) Code() int { + return 461 +} + func (o *PutServiceIDInvalidBackend) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidBackend %+v", 461, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidBackend %s", 461, payload) } func (o *PutServiceIDInvalidBackend) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidBackend %+v", 461, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidBackend %s", 461, payload) } func (o *PutServiceIDInvalidBackend) GetPayload() models.Error { @@ -387,12 +417,19 @@ func (o *PutServiceIDFailure) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the put service Id failure response +func (o *PutServiceIDFailure) Code() int { + return 500 +} + func (o *PutServiceIDFailure) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdFailure %s", 500, payload) } func (o *PutServiceIDFailure) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdFailure %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdFailure %s", 500, payload) } func (o *PutServiceIDFailure) GetPayload() models.Error { @@ -448,12 +485,19 @@ func (o *PutServiceIDUpdateBackendFailure) IsCode(code int) bool { return code == 501 } +// Code gets the status code for the put service Id update backend failure response +func (o *PutServiceIDUpdateBackendFailure) Code() int { + return 501 +} + func (o *PutServiceIDUpdateBackendFailure) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdUpdateBackendFailure %+v", 501, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdUpdateBackendFailure %s", 501, payload) } func (o *PutServiceIDUpdateBackendFailure) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdUpdateBackendFailure %+v", 501, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdUpdateBackendFailure %s", 501, payload) } func (o *PutServiceIDUpdateBackendFailure) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/service_client.go b/vendor/github.com/cilium/cilium/api/v1/client/service/service_client.go index 332799a98b..2ace6b6773 100644 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/service_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/client/service/service_client.go @@ -12,6 +12,7 @@ import ( "fmt" "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -20,6 +21,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new service API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new service API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for service API */ @@ -28,7 +54,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_parameters.go deleted file mode 100644 index fe1c3ff1a3..0000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package statedb - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetStatedbDumpParams creates a new GetStatedbDumpParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetStatedbDumpParams() *GetStatedbDumpParams { - return &GetStatedbDumpParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetStatedbDumpParamsWithTimeout creates a new GetStatedbDumpParams object -// with the ability to set a timeout on a request. -func NewGetStatedbDumpParamsWithTimeout(timeout time.Duration) *GetStatedbDumpParams { - return &GetStatedbDumpParams{ - timeout: timeout, - } -} - -// NewGetStatedbDumpParamsWithContext creates a new GetStatedbDumpParams object -// with the ability to set a context for a request. -func NewGetStatedbDumpParamsWithContext(ctx context.Context) *GetStatedbDumpParams { - return &GetStatedbDumpParams{ - Context: ctx, - } -} - -// NewGetStatedbDumpParamsWithHTTPClient creates a new GetStatedbDumpParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetStatedbDumpParamsWithHTTPClient(client *http.Client) *GetStatedbDumpParams { - return &GetStatedbDumpParams{ - HTTPClient: client, - } -} - -/* -GetStatedbDumpParams contains all the parameters to send to the API endpoint - - for the get statedb dump operation. - - Typically these are written to a http.Request. -*/ -type GetStatedbDumpParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get statedb dump params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetStatedbDumpParams) WithDefaults() *GetStatedbDumpParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get statedb dump params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetStatedbDumpParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get statedb dump params -func (o *GetStatedbDumpParams) WithTimeout(timeout time.Duration) *GetStatedbDumpParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get statedb dump params -func (o *GetStatedbDumpParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get statedb dump params -func (o *GetStatedbDumpParams) WithContext(ctx context.Context) *GetStatedbDumpParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get statedb dump params -func (o *GetStatedbDumpParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get statedb dump params -func (o *GetStatedbDumpParams) WithHTTPClient(client *http.Client) *GetStatedbDumpParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get statedb dump params -func (o *GetStatedbDumpParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetStatedbDumpParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_responses.go deleted file mode 100644 index 3577f9c3b7..0000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/statedb/get_statedb_dump_responses.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package statedb - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// GetStatedbDumpReader is a Reader for the GetStatedbDump structure. -type GetStatedbDumpReader struct { - formats strfmt.Registry - writer io.Writer -} - -// ReadResponse reads a server response into the received o. -func (o *GetStatedbDumpReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetStatedbDumpOK(o.writer) - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetStatedbDumpOK creates a GetStatedbDumpOK with default headers values -func NewGetStatedbDumpOK(writer io.Writer) *GetStatedbDumpOK { - return &GetStatedbDumpOK{ - - Payload: writer, - } -} - -/* -GetStatedbDumpOK describes a response with status code 200, with default header values. - -Success -*/ -type GetStatedbDumpOK struct { - Payload io.Writer -} - -// IsSuccess returns true when this get statedb dump o k response has a 2xx status code -func (o *GetStatedbDumpOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get statedb dump o k response has a 3xx status code -func (o *GetStatedbDumpOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get statedb dump o k response has a 4xx status code -func (o *GetStatedbDumpOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get statedb dump o k response has a 5xx status code -func (o *GetStatedbDumpOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get statedb dump o k response a status code equal to that given -func (o *GetStatedbDumpOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetStatedbDumpOK) Error() string { - return fmt.Sprintf("[GET /statedb/dump][%d] getStatedbDumpOK %+v", 200, o.Payload) -} - -func (o *GetStatedbDumpOK) String() string { - return fmt.Sprintf("[GET /statedb/dump][%d] getStatedbDumpOK %+v", 200, o.Payload) -} - -func (o *GetStatedbDumpOK) GetPayload() io.Writer { - return o.Payload -} - -func (o *GetStatedbDumpOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go b/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go deleted file mode 100644 index 69365c913f..0000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/statedb/statedb_client.go +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package statedb - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new statedb API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for statedb API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - GetStatedbDump(params *GetStatedbDumpParams, writer io.Writer, opts ...ClientOption) (*GetStatedbDumpOK, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* -GetStatedbDump dumps state d b contents -*/ -func (a *Client) GetStatedbDump(params *GetStatedbDumpParams, writer io.Writer, opts ...ClientOption) (*GetStatedbDumpOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetStatedbDumpParams() - } - op := &runtime.ClientOperation{ - ID: "GetStatedbDump", - Method: "GET", - PathPattern: "/statedb/dump", - ProducesMediaTypes: []string{"application/octet-stream"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetStatedbDumpReader{formats: a.formats, writer: writer}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetStatedbDumpOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetStatedbDump: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/connectivity_client.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/connectivity_client.go index b8918f4b2b..b42c093496 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/connectivity_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/connectivity_client.go @@ -12,6 +12,7 @@ import ( "fmt" "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -20,6 +21,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new connectivity API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new connectivity API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for connectivity API */ @@ -28,7 +54,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_responses.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_responses.go index cf977a0df5..b02512d764 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_responses.go @@ -9,6 +9,7 @@ package connectivity // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -33,7 +34,7 @@ func (o *GetStatusReader) ReadResponse(response runtime.ClientResponse, consumer } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /status] GetStatus", response, response.Code()) } } @@ -76,12 +77,19 @@ func (o *GetStatusOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get status o k response +func (o *GetStatusOK) Code() int { + return 200 +} + func (o *GetStatusOK) Error() string { - return fmt.Sprintf("[GET /status][%d] getStatusOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /status][%d] getStatusOK %s", 200, payload) } func (o *GetStatusOK) String() string { - return fmt.Sprintf("[GET /status][%d] getStatusOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /status][%d] getStatusOK %s", 200, payload) } func (o *GetStatusOK) GetPayload() *models.HealthStatusResponse { diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_responses.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_responses.go index 68d6dadbd3..01ad86df93 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_responses.go @@ -9,6 +9,7 @@ package connectivity // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -45,7 +46,7 @@ func (o *PutStatusProbeReader) ReadResponse(response runtime.ClientResponse, con } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PUT /status/probe] PutStatusProbe", response, response.Code()) } } @@ -88,12 +89,19 @@ func (o *PutStatusProbeOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the put status probe o k response +func (o *PutStatusProbeOK) Code() int { + return 200 +} + func (o *PutStatusProbeOK) Error() string { - return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeOK %s", 200, payload) } func (o *PutStatusProbeOK) String() string { - return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeOK %s", 200, payload) } func (o *PutStatusProbeOK) GetPayload() *models.HealthStatusResponse { @@ -150,12 +158,17 @@ func (o *PutStatusProbeForbidden) IsCode(code int) bool { return code == 403 } +// Code gets the status code for the put status probe forbidden response +func (o *PutStatusProbeForbidden) Code() int { + return 403 +} + func (o *PutStatusProbeForbidden) Error() string { - return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeForbidden ", 403) + return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeForbidden", 403) } func (o *PutStatusProbeForbidden) String() string { - return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeForbidden ", 403) + return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeForbidden", 403) } func (o *PutStatusProbeForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { @@ -202,12 +215,19 @@ func (o *PutStatusProbeFailed) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the put status probe failed response +func (o *PutStatusProbeFailed) Code() int { + return 500 +} + func (o *PutStatusProbeFailed) Error() string { - return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeFailed %s", 500, payload) } func (o *PutStatusProbeFailed) String() string { - return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeFailed %s", 500, payload) } func (o *PutStatusProbeFailed) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_responses.go b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_responses.go index c30a211861..4d7fcb0aec 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_responses.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_responses.go @@ -9,6 +9,7 @@ package restapi // Editing this file might prove futile when you re-run the swagger generate command import ( + "encoding/json" "fmt" "io" @@ -39,7 +40,7 @@ func (o *GetHealthzReader) ReadResponse(response runtime.ClientResponse, consume } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /healthz] GetHealthz", response, response.Code()) } } @@ -82,12 +83,19 @@ func (o *GetHealthzOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get healthz o k response +func (o *GetHealthzOK) Code() int { + return 200 +} + func (o *GetHealthzOK) Error() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %s", 200, payload) } func (o *GetHealthzOK) String() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %s", 200, payload) } func (o *GetHealthzOK) GetPayload() *models.HealthResponse { @@ -145,12 +153,19 @@ func (o *GetHealthzFailed) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get healthz failed response +func (o *GetHealthzFailed) Code() int { + return 500 +} + func (o *GetHealthzFailed) Error() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %s", 500, payload) } func (o *GetHealthzFailed) String() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %+v", 500, o.Payload) + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %s", 500, payload) } func (o *GetHealthzFailed) GetPayload() models.Error { diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/restapi_client.go b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/restapi_client.go index 6e5b1614a6..4638a5e9be 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/restapi_client.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/restapi_client.go @@ -12,6 +12,7 @@ import ( "fmt" "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) @@ -20,6 +21,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi return &Client{transport: transport, formats: formats} } +// New creates a new restapi API client with basic auth credentials. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - user: user for basic authentication header. +// - password: password for basic authentication header. +func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BasicAuth(user, password) + return &Client{transport: transport, formats: strfmt.Default} +} + +// New creates a new restapi API client with a bearer token for authentication. +// It takes the following parameters: +// - host: http host (github.com). +// - basePath: any base path for the API client ("/v1", "/v3"). +// - scheme: http scheme ("http", "https"). +// - bearerToken: bearer token for Bearer authentication header. +func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService { + transport := httptransport.New(host, basePath, []string{scheme}) + transport.DefaultAuthentication = httptransport.BearerToken(bearerToken) + return &Client{transport: transport, formats: strfmt.Default} +} + /* Client for restapi API */ @@ -28,7 +54,7 @@ type Client struct { formats strfmt.Registry } -// ClientOption is the option for Client methods +// ClientOption may be used to customize the behavior of Client methods. type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/endpoint_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/endpoint_status.go index 4b8c9daa2a..b46fe8cdce 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/endpoint_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/models/endpoint_status.go @@ -113,6 +113,11 @@ func (m *EndpointStatus) ContextValidate(ctx context.Context, formats strfmt.Reg func (m *EndpointStatus) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error { if m.PrimaryAddress != nil { + + if swag.IsZero(m.PrimaryAddress) { // not required + return nil + } + if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("primary-address") @@ -131,6 +136,11 @@ func (m *EndpointStatus) contextValidateSecondaryAddresses(ctx context.Context, for i := 0; i < len(m.SecondaryAddresses); i++ { if m.SecondaryAddresses[i] != nil { + + if swag.IsZero(m.SecondaryAddresses[i]) { // not required + return nil + } + if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/health_response.go b/vendor/github.com/cilium/cilium/api/v1/health/models/health_response.go index d74c7044e8..b6108f41bd 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/health_response.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/models/health_response.go @@ -106,6 +106,10 @@ func (m *HealthResponse) ContextValidate(ctx context.Context, formats strfmt.Reg func (m *HealthResponse) contextValidateCilium(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.Cilium) { // not required + return nil + } + if err := m.Cilium.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cilium") @@ -121,6 +125,11 @@ func (m *HealthResponse) contextValidateCilium(ctx context.Context, formats strf func (m *HealthResponse) contextValidateSystemLoad(ctx context.Context, formats strfmt.Registry) error { if m.SystemLoad != nil { + + if swag.IsZero(m.SystemLoad) { // not required + return nil + } + if err := m.SystemLoad.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("system-load") diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/health_status_response.go b/vendor/github.com/cilium/cilium/api/v1/health/models/health_status_response.go index 9f4a29d783..4b189c09cb 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/health_status_response.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/models/health_status_response.go @@ -116,6 +116,11 @@ func (m *HealthStatusResponse) ContextValidate(ctx context.Context, formats strf func (m *HealthStatusResponse) contextValidateLocal(ctx context.Context, formats strfmt.Registry) error { if m.Local != nil { + + if swag.IsZero(m.Local) { // not required + return nil + } + if err := m.Local.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("local") @@ -134,6 +139,11 @@ func (m *HealthStatusResponse) contextValidateNodes(ctx context.Context, formats for i := 0; i < len(m.Nodes); i++ { if m.Nodes[i] != nil { + + if swag.IsZero(m.Nodes[i]) { // not required + return nil + } + if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("nodes" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/host_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/host_status.go index da114cf19a..c40da6d4ab 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/host_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/models/host_status.go @@ -114,6 +114,11 @@ func (m *HostStatus) ContextValidate(ctx context.Context, formats strfmt.Registr func (m *HostStatus) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error { if m.PrimaryAddress != nil { + + if swag.IsZero(m.PrimaryAddress) { // not required + return nil + } + if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("primary-address") @@ -132,6 +137,11 @@ func (m *HostStatus) contextValidateSecondaryAddresses(ctx context.Context, form for i := 0; i < len(m.SecondaryAddresses); i++ { if m.SecondaryAddresses[i] != nil { + + if swag.IsZero(m.SecondaryAddresses[i]) { // not required + return nil + } + if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/node_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/node_status.go index 1bbd995338..82f2b33fee 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/node_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/models/node_status.go @@ -140,6 +140,11 @@ func (m *NodeStatus) ContextValidate(ctx context.Context, formats strfmt.Registr func (m *NodeStatus) contextValidateEndpoint(ctx context.Context, formats strfmt.Registry) error { if m.Endpoint != nil { + + if swag.IsZero(m.Endpoint) { // not required + return nil + } + if err := m.Endpoint.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("endpoint") @@ -156,6 +161,11 @@ func (m *NodeStatus) contextValidateEndpoint(ctx context.Context, formats strfmt func (m *NodeStatus) contextValidateHealthEndpoint(ctx context.Context, formats strfmt.Registry) error { if m.HealthEndpoint != nil { + + if swag.IsZero(m.HealthEndpoint) { // not required + return nil + } + if err := m.HealthEndpoint.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("health-endpoint") @@ -172,6 +182,11 @@ func (m *NodeStatus) contextValidateHealthEndpoint(ctx context.Context, formats func (m *NodeStatus) contextValidateHost(ctx context.Context, formats strfmt.Registry) error { if m.Host != nil { + + if swag.IsZero(m.Host) { // not required + return nil + } + if err := m.Host.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("host") diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/path_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/path_status.go index 3a8669f3c6..b9b6fc7d0f 100644 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/path_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/health/models/path_status.go @@ -109,6 +109,11 @@ func (m *PathStatus) ContextValidate(ctx context.Context, formats strfmt.Registr func (m *PathStatus) contextValidateHTTP(ctx context.Context, formats strfmt.Registry) error { if m.HTTP != nil { + + if swag.IsZero(m.HTTP) { // not required + return nil + } + if err := m.HTTP.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("http") @@ -125,6 +130,11 @@ func (m *PathStatus) contextValidateHTTP(ctx context.Context, formats strfmt.Reg func (m *PathStatus) contextValidateIcmp(ctx context.Context, formats strfmt.Registry) error { if m.Icmp != nil { + + if swag.IsZero(m.Icmp) { // not required + return nil + } + if err := m.Icmp.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("icmp") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/attach_mode.go b/vendor/github.com/cilium/cilium/api/v1/models/attach_mode.go new file mode 100644 index 0000000000..d5ba71588c --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/attach_mode.go @@ -0,0 +1,81 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// AttachMode Core datapath attachment mode +// +// swagger:model AttachMode +type AttachMode string + +func NewAttachMode(value AttachMode) *AttachMode { + return &value +} + +// Pointer returns a pointer to a freshly-allocated AttachMode. +func (m AttachMode) Pointer() *AttachMode { + return &m +} + +const ( + + // AttachModeTc captures enum value "tc" + AttachModeTc AttachMode = "tc" + + // AttachModeTcx captures enum value "tcx" + AttachModeTcx AttachMode = "tcx" +) + +// for schema +var attachModeEnum []interface{} + +func init() { + var res []AttachMode + if err := json.Unmarshal([]byte(`["tc","tcx"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + attachModeEnum = append(attachModeEnum, v) + } +} + +func (m AttachMode) validateAttachModeEnum(path, location string, value AttachMode) error { + if err := validate.EnumCase(path, location, value, attachModeEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this attach mode +func (m AttachMode) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateAttachModeEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validates this attach mode based on context it is used +func (m AttachMode) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map.go b/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map.go index 2d3366ebac..3516aa913b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map.go @@ -88,6 +88,11 @@ func (m *BPFMap) contextValidateCache(ctx context.Context, formats strfmt.Regist for i := 0; i < len(m.Cache); i++ { if m.Cache[i] != nil { + + if swag.IsZero(m.Cache[i]) { // not required + return nil + } + if err := m.Cache[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cache" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_entry.go b/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_entry.go index 3b23cbde46..777235402c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_entry.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_entry.go @@ -24,7 +24,7 @@ import ( type BPFMapEntry struct { // Desired action to be performed - // Enum: [ok insert delete] + // Enum: ["ok","insert","delete"] DesiredAction string `json:"desired-action,omitempty"` // Key of map entry diff --git a/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_list.go b/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_list.go index a42c67b80f..f916187f6e 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_list.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_list.go @@ -85,6 +85,11 @@ func (m *BPFMapList) contextValidateMaps(ctx context.Context, formats strfmt.Reg for i := 0; i < len(m.Maps); i++ { if m.Maps[i] != nil { + + if swag.IsZero(m.Maps[i]) { // not required + return nil + } + if err := m.Maps[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("maps" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_status.go b/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_status.go index f48508e90c..a5c0813701 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/b_p_f_map_status.go @@ -90,6 +90,11 @@ func (m *BPFMapStatus) contextValidateMaps(ctx context.Context, formats strfmt.R for i := 0; i < len(m.Maps); i++ { if m.Maps[i] != nil { + + if swag.IsZero(m.Maps[i]) { // not required + return nil + } + if err := m.Maps[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("maps" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/backend_address.go b/vendor/github.com/cilium/cilium/api/v1/models/backend_address.go index 3f17b81f49..64b2bbf464 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/backend_address.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/backend_address.go @@ -37,12 +37,18 @@ type BackendAddress struct { // on related annotation of global service. Applicable for active state only. Preferred bool `json:"preferred,omitempty"` + // Layer 4 protocol (TCP, UDP, etc) + Protocol string `json:"protocol,omitempty"` + // State of the backend for load-balancing service traffic - // Enum: [active terminating quarantined maintenance] + // Enum: ["active","terminating","quarantined","maintenance"] State string `json:"state,omitempty"` // Backend weight Weight *uint16 `json:"weight,omitempty"` + + // Optional name of the zone in which this backend runs + Zone string `json:"zone,omitempty"` } // Validate validates this backend address diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bandwidth_manager.go b/vendor/github.com/cilium/cilium/api/v1/models/bandwidth_manager.go index c4f2890e4b..b496e452d2 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/bandwidth_manager.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/bandwidth_manager.go @@ -26,7 +26,7 @@ import ( type BandwidthManager struct { // congestion control - // Enum: [cubic bbr] + // Enum: ["cubic","bbr"] CongestionControl string `json:"congestionControl,omitempty"` // devices diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_family.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_family.go new file mode 100644 index 0000000000..5093e85ef7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_family.go @@ -0,0 +1,56 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpFamily Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path +// +// swagger:model BgpFamily +type BgpFamily struct { + + // Address Family Indicator (AFI) of the path + Afi string `json:"afi,omitempty"` + + // Subsequent Address Family Indicator (SAFI) of the path + Safi string `json:"safi,omitempty"` +} + +// Validate validates this bgp family +func (m *BgpFamily) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp family based on context it is used +func (m *BgpFamily) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpFamily) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpFamily) UnmarshalBinary(b []byte) error { + var res BgpFamily + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_nlri.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_nlri.go new file mode 100644 index 0000000000..a6455398f3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_nlri.go @@ -0,0 +1,53 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpNlri Network Layer Reachability Information (NLRI) of the path +// +// swagger:model BgpNlri +type BgpNlri struct { + + // Base64-encoded NLRI in the BGP UPDATE message format + Base64 string `json:"base64,omitempty"` +} + +// Validate validates this bgp nlri +func (m *BgpNlri) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp nlri based on context it is used +func (m *BgpNlri) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpNlri) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpNlri) UnmarshalBinary(b []byte) error { + var res BgpNlri + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_path.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path.go new file mode 100644 index 0000000000..2ac2c19401 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path.go @@ -0,0 +1,235 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpPath Single BGP routing Path containing BGP Network Layer Reachability Information (NLRI) and path attributes +// +// swagger:model BgpPath +type BgpPath struct { + + // Age of the path (time since its creation) in nanoseconds + AgeNanoseconds int64 `json:"age-nanoseconds,omitempty"` + + // True value flags the best path towards the destination prefix + Best bool `json:"best,omitempty"` + + // Address Family Indicator (AFI) and Subsequent Address Family Indicator (SAFI) of the path + Family *BgpFamily `json:"family,omitempty"` + + // Network Layer Reachability Information of the path + Nlri *BgpNlri `json:"nlri,omitempty"` + + // List of BGP path attributes specific for the path + PathAttributes []*BgpPathAttribute `json:"path-attributes"` + + // True value marks the path as stale + Stale bool `json:"stale,omitempty"` +} + +// Validate validates this bgp path +func (m *BgpPath) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFamily(formats); err != nil { + res = append(res, err) + } + + if err := m.validateNlri(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePathAttributes(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpPath) validateFamily(formats strfmt.Registry) error { + if swag.IsZero(m.Family) { // not required + return nil + } + + if m.Family != nil { + if err := m.Family.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("family") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("family") + } + return err + } + } + + return nil +} + +func (m *BgpPath) validateNlri(formats strfmt.Registry) error { + if swag.IsZero(m.Nlri) { // not required + return nil + } + + if m.Nlri != nil { + if err := m.Nlri.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nlri") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nlri") + } + return err + } + } + + return nil +} + +func (m *BgpPath) validatePathAttributes(formats strfmt.Registry) error { + if swag.IsZero(m.PathAttributes) { // not required + return nil + } + + for i := 0; i < len(m.PathAttributes); i++ { + if swag.IsZero(m.PathAttributes[i]) { // not required + continue + } + + if m.PathAttributes[i] != nil { + if err := m.PathAttributes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this bgp path based on the context it is used +func (m *BgpPath) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateFamily(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateNlri(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidatePathAttributes(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpPath) contextValidateFamily(ctx context.Context, formats strfmt.Registry) error { + + if m.Family != nil { + + if swag.IsZero(m.Family) { // not required + return nil + } + + if err := m.Family.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("family") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("family") + } + return err + } + } + + return nil +} + +func (m *BgpPath) contextValidateNlri(ctx context.Context, formats strfmt.Registry) error { + + if m.Nlri != nil { + + if swag.IsZero(m.Nlri) { // not required + return nil + } + + if err := m.Nlri.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("nlri") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("nlri") + } + return err + } + } + + return nil +} + +func (m *BgpPath) contextValidatePathAttributes(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.PathAttributes); i++ { + + if m.PathAttributes[i] != nil { + + if swag.IsZero(m.PathAttributes[i]) { // not required + return nil + } + + if err := m.PathAttributes[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("path-attributes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("path-attributes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpPath) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpPath) UnmarshalBinary(b []byte) error { + var res BgpPath + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_path_attribute.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path_attribute.go new file mode 100644 index 0000000000..cd92929473 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_path_attribute.go @@ -0,0 +1,53 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpPathAttribute Single BGP path attribute specific for the path +// +// swagger:model BgpPathAttribute +type BgpPathAttribute struct { + + // Base64-encoded BGP path attribute in the BGP UPDATE message format + Base64 string `json:"base64,omitempty"` +} + +// Validate validates this bgp path attribute +func (m *BgpPathAttribute) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp path attribute based on context it is used +func (m *BgpPathAttribute) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpPathAttribute) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpPathAttribute) UnmarshalBinary(b []byte) error { + var res BgpPathAttribute + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go index 7b22ce0502..0604c75c72 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go @@ -78,6 +78,9 @@ type BgpPeer struct { // SessionState string `json:"session-state,omitempty"` + // Set when a TCP password is configured for communications with this peer + TCPPasswordEnabled bool `json:"tcp-password-enabled,omitempty"` + // BGP peer connection uptime in nano seconds. UptimeNanoseconds int64 `json:"uptime-nanoseconds,omitempty"` } @@ -188,6 +191,11 @@ func (m *BgpPeer) contextValidateFamilies(ctx context.Context, formats strfmt.Re for i := 0; i < len(m.Families); i++ { if m.Families[i] != nil { + + if swag.IsZero(m.Families[i]) { // not required + return nil + } + if err := m.Families[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("families" + "." + strconv.Itoa(i)) @@ -206,6 +214,11 @@ func (m *BgpPeer) contextValidateFamilies(ctx context.Context, formats strfmt.Re func (m *BgpPeer) contextValidateGracefulRestart(ctx context.Context, formats strfmt.Registry) error { if m.GracefulRestart != nil { + + if swag.IsZero(m.GracefulRestart) { // not required + return nil + } + if err := m.GracefulRestart.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("graceful-restart") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route.go new file mode 100644 index 0000000000..f054267549 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route.go @@ -0,0 +1,133 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpRoute Single BGP route retrieved from the RIB of underlying router +// +// swagger:model BgpRoute +type BgpRoute struct { + + // IP address specifying a BGP neighbor if the source table type is adj-rib-in or adj-rib-out + Neighbor string `json:"neighbor,omitempty"` + + // List of routing paths leading towards the prefix + Paths []*BgpPath `json:"paths"` + + // IP prefix of the route + Prefix string `json:"prefix,omitempty"` + + // Autonomous System Number (ASN) identifying a BGP virtual router instance + RouterAsn int64 `json:"router-asn,omitempty"` +} + +// Validate validates this bgp route +func (m *BgpRoute) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validatePaths(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoute) validatePaths(formats strfmt.Registry) error { + if swag.IsZero(m.Paths) { // not required + return nil + } + + for i := 0; i < len(m.Paths); i++ { + if swag.IsZero(m.Paths[i]) { // not required + continue + } + + if m.Paths[i] != nil { + if err := m.Paths[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("paths" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("paths" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this bgp route based on the context it is used +func (m *BgpRoute) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidatePaths(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoute) contextValidatePaths(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Paths); i++ { + + if m.Paths[i] != nil { + + if swag.IsZero(m.Paths[i]) { // not required + return nil + } + + if err := m.Paths[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("paths" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("paths" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoute) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoute) UnmarshalBinary(b []byte) error { + var res BgpRoute + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy.go new file mode 100644 index 0000000000..2e3854dcc2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy.go @@ -0,0 +1,182 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BgpRoutePolicy Single BGP route policy retrieved from the underlying router +// +// swagger:model BgpRoutePolicy +type BgpRoutePolicy struct { + + // Name of the route policy + Name string `json:"name,omitempty"` + + // Autonomous System Number (ASN) identifying a BGP virtual router instance + RouterAsn int64 `json:"router-asn,omitempty"` + + // List of the route policy statements + Statements []*BgpRoutePolicyStatement `json:"statements"` + + // Type of the route policy + // Enum: ["export","import"] + Type string `json:"type,omitempty"` +} + +// Validate validates this bgp route policy +func (m *BgpRoutePolicy) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateStatements(formats); err != nil { + res = append(res, err) + } + + if err := m.validateType(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicy) validateStatements(formats strfmt.Registry) error { + if swag.IsZero(m.Statements) { // not required + return nil + } + + for i := 0; i < len(m.Statements); i++ { + if swag.IsZero(m.Statements[i]) { // not required + continue + } + + if m.Statements[i] != nil { + if err := m.Statements[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("statements" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("statements" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var bgpRoutePolicyTypeTypePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["export","import"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + bgpRoutePolicyTypeTypePropEnum = append(bgpRoutePolicyTypeTypePropEnum, v) + } +} + +const ( + + // BgpRoutePolicyTypeExport captures enum value "export" + BgpRoutePolicyTypeExport string = "export" + + // BgpRoutePolicyTypeImport captures enum value "import" + BgpRoutePolicyTypeImport string = "import" +) + +// prop value enum +func (m *BgpRoutePolicy) validateTypeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, bgpRoutePolicyTypeTypePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BgpRoutePolicy) validateType(formats strfmt.Registry) error { + if swag.IsZero(m.Type) { // not required + return nil + } + + // value enum + if err := m.validateTypeEnum("type", "body", m.Type); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this bgp route policy based on the context it is used +func (m *BgpRoutePolicy) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateStatements(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicy) contextValidateStatements(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Statements); i++ { + + if m.Statements[i] != nil { + + if swag.IsZero(m.Statements[i]) { // not required + return nil + } + + if err := m.Statements[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("statements" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("statements" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoutePolicy) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoutePolicy) UnmarshalBinary(b []byte) error { + var res BgpRoutePolicy + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_prefix_match.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_prefix_match.go new file mode 100644 index 0000000000..993f91dd6a --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_prefix_match.go @@ -0,0 +1,59 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpRoutePolicyPrefixMatch Matches a CIDR prefix in a BGP route policy +// +// swagger:model BgpRoutePolicyPrefixMatch +type BgpRoutePolicyPrefixMatch struct { + + // CIDR prefix to match with + Cidr string `json:"cidr,omitempty"` + + // Maximal prefix length that will match if it falls under CIDR + PrefixLenMax int64 `json:"prefix-len-max,omitempty"` + + // Minimal prefix length that will match if it falls under CIDR + PrefixLenMin int64 `json:"prefix-len-min,omitempty"` +} + +// Validate validates this bgp route policy prefix match +func (m *BgpRoutePolicyPrefixMatch) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp route policy prefix match based on context it is used +func (m *BgpRoutePolicyPrefixMatch) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoutePolicyPrefixMatch) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoutePolicyPrefixMatch) UnmarshalBinary(b []byte) error { + var res BgpRoutePolicyPrefixMatch + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_statement.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_statement.go new file mode 100644 index 0000000000..d36e3fca88 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_route_policy_statement.go @@ -0,0 +1,253 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BgpRoutePolicyStatement Single BGP route policy statement +// +// swagger:model BgpRoutePolicyStatement +type BgpRoutePolicyStatement struct { + + // List of BGP standard community values to be added to the matched route + AddCommunities []string `json:"add-communities"` + + // List of BGP large community values to be added to the matched route + AddLargeCommunities []string `json:"add-large-communities"` + + // Matches any of the provided address families. If empty matches all address families. + MatchFamilies []*BgpFamily `json:"match-families"` + + // Matches any of the provided BGP neighbor IP addresses. If empty matches all neighbors. + MatchNeighbors []string `json:"match-neighbors"` + + // Matches any of the provided prefixes. If empty matches all prefixes. + MatchPrefixes []*BgpRoutePolicyPrefixMatch `json:"match-prefixes"` + + // RIB processing action taken on the matched route + // Enum: ["none","accept","reject"] + RouteAction string `json:"route-action,omitempty"` + + // BGP local preference value to be set on the matched route + SetLocalPreference int64 `json:"set-local-preference,omitempty"` +} + +// Validate validates this bgp route policy statement +func (m *BgpRoutePolicyStatement) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateMatchFamilies(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMatchPrefixes(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRouteAction(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicyStatement) validateMatchFamilies(formats strfmt.Registry) error { + if swag.IsZero(m.MatchFamilies) { // not required + return nil + } + + for i := 0; i < len(m.MatchFamilies); i++ { + if swag.IsZero(m.MatchFamilies[i]) { // not required + continue + } + + if m.MatchFamilies[i] != nil { + if err := m.MatchFamilies[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match-families" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match-families" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *BgpRoutePolicyStatement) validateMatchPrefixes(formats strfmt.Registry) error { + if swag.IsZero(m.MatchPrefixes) { // not required + return nil + } + + for i := 0; i < len(m.MatchPrefixes); i++ { + if swag.IsZero(m.MatchPrefixes[i]) { // not required + continue + } + + if m.MatchPrefixes[i] != nil { + if err := m.MatchPrefixes[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +var bgpRoutePolicyStatementTypeRouteActionPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["none","accept","reject"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + bgpRoutePolicyStatementTypeRouteActionPropEnum = append(bgpRoutePolicyStatementTypeRouteActionPropEnum, v) + } +} + +const ( + + // BgpRoutePolicyStatementRouteActionNone captures enum value "none" + BgpRoutePolicyStatementRouteActionNone string = "none" + + // BgpRoutePolicyStatementRouteActionAccept captures enum value "accept" + BgpRoutePolicyStatementRouteActionAccept string = "accept" + + // BgpRoutePolicyStatementRouteActionReject captures enum value "reject" + BgpRoutePolicyStatementRouteActionReject string = "reject" +) + +// prop value enum +func (m *BgpRoutePolicyStatement) validateRouteActionEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, bgpRoutePolicyStatementTypeRouteActionPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *BgpRoutePolicyStatement) validateRouteAction(formats strfmt.Registry) error { + if swag.IsZero(m.RouteAction) { // not required + return nil + } + + // value enum + if err := m.validateRouteActionEnum("route-action", "body", m.RouteAction); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this bgp route policy statement based on the context it is used +func (m *BgpRoutePolicyStatement) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateMatchFamilies(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateMatchPrefixes(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpRoutePolicyStatement) contextValidateMatchFamilies(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.MatchFamilies); i++ { + + if m.MatchFamilies[i] != nil { + + if swag.IsZero(m.MatchFamilies[i]) { // not required + return nil + } + + if err := m.MatchFamilies[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match-families" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match-families" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *BgpRoutePolicyStatement) contextValidateMatchPrefixes(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.MatchPrefixes); i++ { + + if m.MatchPrefixes[i] != nil { + + if swag.IsZero(m.MatchPrefixes[i]) { // not required + return nil + } + + if err := m.MatchPrefixes[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("match-prefixes" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpRoutePolicyStatement) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpRoutePolicyStatement) UnmarshalBinary(b []byte) error { + var res BgpRoutePolicyStatement + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/c_id_r_policy.go b/vendor/github.com/cilium/cilium/api/v1/models/c_id_r_policy.go index 20befd1a05..b98e611a6a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/c_id_r_policy.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/c_id_r_policy.go @@ -124,6 +124,11 @@ func (m *CIDRPolicy) contextValidateEgress(ctx context.Context, formats strfmt.R for i := 0; i < len(m.Egress); i++ { if m.Egress[i] != nil { + + if swag.IsZero(m.Egress[i]) { // not required + return nil + } + if err := m.Egress[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("egress" + "." + strconv.Itoa(i)) @@ -144,6 +149,11 @@ func (m *CIDRPolicy) contextValidateIngress(ctx context.Context, formats strfmt. for i := 0; i < len(m.Ingress); i++ { if m.Ingress[i] != nil { + + if swag.IsZero(m.Ingress[i]) { // not required + return nil + } + if err := m.Ingress[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ingress" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/c_n_i_chaining_status.go b/vendor/github.com/cilium/cilium/api/v1/models/c_n_i_chaining_status.go index 9d707c80f9..26e8a7236c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/c_n_i_chaining_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/c_n_i_chaining_status.go @@ -26,7 +26,7 @@ import ( type CNIChainingStatus struct { // mode - // Enum: [none aws-cni flannel generic-veth portmap] + // Enum: ["none","aws-cni","flannel","generic-veth","portmap"] Mode string `json:"mode,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/cgroup_dump_metadata.go b/vendor/github.com/cilium/cilium/api/v1/models/cgroup_dump_metadata.go index 535eb54231..c4e34a976d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/cgroup_dump_metadata.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/cgroup_dump_metadata.go @@ -85,6 +85,11 @@ func (m *CgroupDumpMetadata) contextValidatePodMetadatas(ctx context.Context, fo for i := 0; i < len(m.PodMetadatas); i++ { if m.PodMetadatas[i] != nil { + + if swag.IsZero(m.PodMetadatas[i]) { // not required + return nil + } + if err := m.PodMetadatas[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("pod-metadatas" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/cgroup_pod_metadata.go b/vendor/github.com/cilium/cilium/api/v1/models/cgroup_pod_metadata.go index 74fb793657..13011e88a6 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/cgroup_pod_metadata.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/cgroup_pod_metadata.go @@ -94,6 +94,11 @@ func (m *CgroupPodMetadata) contextValidateContainers(ctx context.Context, forma for i := 0; i < len(m.Containers); i++ { if m.Containers[i] != nil { + + if swag.IsZero(m.Containers[i]) { // not required + return nil + } + if err := m.Containers[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("containers" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/clock_source.go b/vendor/github.com/cilium/cilium/api/v1/models/clock_source.go index 674ec19fda..c5106d8661 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/clock_source.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/clock_source.go @@ -29,7 +29,7 @@ type ClockSource struct { Hertz int64 `json:"hertz,omitempty"` // Datapath clock source - // Enum: [ktime jiffies] + // Enum: ["ktime","jiffies"] Mode string `json:"mode,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/cluster_mesh_status.go b/vendor/github.com/cilium/cilium/api/v1/models/cluster_mesh_status.go index c95c006350..3835bbbf56 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/cluster_mesh_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/cluster_mesh_status.go @@ -90,6 +90,11 @@ func (m *ClusterMeshStatus) contextValidateClusters(ctx context.Context, formats for i := 0; i < len(m.Clusters); i++ { if m.Clusters[i] != nil { + + if swag.IsZero(m.Clusters[i]) { // not required + return nil + } + if err := m.Clusters[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("clusters" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/cluster_node_status.go b/vendor/github.com/cilium/cilium/api/v1/models/cluster_node_status.go index 810c8a340d..6824ddb196 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/cluster_node_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/cluster_node_status.go @@ -128,6 +128,11 @@ func (m *ClusterNodeStatus) contextValidateNodesAdded(ctx context.Context, forma for i := 0; i < len(m.NodesAdded); i++ { if m.NodesAdded[i] != nil { + + if swag.IsZero(m.NodesAdded[i]) { // not required + return nil + } + if err := m.NodesAdded[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("nodes-added" + "." + strconv.Itoa(i)) @@ -148,6 +153,11 @@ func (m *ClusterNodeStatus) contextValidateNodesRemoved(ctx context.Context, for for i := 0; i < len(m.NodesRemoved); i++ { if m.NodesRemoved[i] != nil { + + if swag.IsZero(m.NodesRemoved[i]) { // not required + return nil + } + if err := m.NodesRemoved[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("nodes-removed" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/cluster_nodes_response.go b/vendor/github.com/cilium/cilium/api/v1/models/cluster_nodes_response.go index 7acf26fab0..232abceb39 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/cluster_nodes_response.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/cluster_nodes_response.go @@ -88,6 +88,11 @@ func (m *ClusterNodesResponse) contextValidateNodes(ctx context.Context, formats for i := 0; i < len(m.Nodes); i++ { if m.Nodes[i] != nil { + + if swag.IsZero(m.Nodes[i]) { // not required + return nil + } + if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("nodes" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/cluster_status.go b/vendor/github.com/cilium/cilium/api/v1/models/cluster_status.go index edac3d8110..6f1ca97cff 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/cluster_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/cluster_status.go @@ -118,6 +118,11 @@ func (m *ClusterStatus) ContextValidate(ctx context.Context, formats strfmt.Regi func (m *ClusterStatus) contextValidateCiliumHealth(ctx context.Context, formats strfmt.Registry) error { if m.CiliumHealth != nil { + + if swag.IsZero(m.CiliumHealth) { // not required + return nil + } + if err := m.CiliumHealth.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ciliumHealth") @@ -136,6 +141,11 @@ func (m *ClusterStatus) contextValidateNodes(ctx context.Context, formats strfmt for i := 0; i < len(m.Nodes); i++ { if m.Nodes[i] != nil { + + if swag.IsZero(m.Nodes[i]) { // not required + return nil + } + if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("nodes" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/controller_status.go b/vendor/github.com/cilium/cilium/api/v1/models/controller_status.go index 565da6f4a8..2b591bd3e4 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/controller_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/controller_status.go @@ -131,6 +131,11 @@ func (m *ControllerStatus) ContextValidate(ctx context.Context, formats strfmt.R func (m *ControllerStatus) contextValidateConfiguration(ctx context.Context, formats strfmt.Registry) error { if m.Configuration != nil { + + if swag.IsZero(m.Configuration) { // not required + return nil + } + if err := m.Configuration.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("configuration") @@ -147,6 +152,11 @@ func (m *ControllerStatus) contextValidateConfiguration(ctx context.Context, for func (m *ControllerStatus) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { if m.Status != nil { + + if swag.IsZero(m.Status) { // not required + return nil + } + if err := m.Status.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/controller_statuses.go b/vendor/github.com/cilium/cilium/api/v1/models/controller_statuses.go index cd482b70e4..3b02a21622 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/controller_statuses.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/controller_statuses.go @@ -57,6 +57,11 @@ func (m ControllerStatuses) ContextValidate(ctx context.Context, formats strfmt. for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration.go b/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration.go index 5c004dcf58..b7e5cf1959 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration.go @@ -108,6 +108,11 @@ func (m *DaemonConfiguration) ContextValidate(ctx context.Context, formats strfm func (m *DaemonConfiguration) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error { if m.Spec != nil { + + if swag.IsZero(m.Spec) { // not required + return nil + } + if err := m.Spec.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec") @@ -124,6 +129,11 @@ func (m *DaemonConfiguration) contextValidateSpec(ctx context.Context, formats s func (m *DaemonConfiguration) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { if m.Status != nil { + + if swag.IsZero(m.Status) { // not required + return nil + } + if err := m.Status.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_spec.go b/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_spec.go index d44a5481b5..a14211ac4c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_spec.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_spec.go @@ -27,7 +27,7 @@ type DaemonConfigurationSpec struct { Options ConfigurationMap `json:"options,omitempty"` // The policy-enforcement mode - // Enum: [default always never] + // Enum: ["default","always","never"] PolicyEnforcement string `json:"policy-enforcement,omitempty"` } @@ -129,6 +129,10 @@ func (m *DaemonConfigurationSpec) ContextValidate(ctx context.Context, formats s func (m *DaemonConfigurationSpec) contextValidateOptions(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.Options) { // not required + return nil + } + if err := m.Options.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("options") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_status.go b/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_status.go index f55612181d..40f9345ceb 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/daemon_configuration_status.go @@ -50,9 +50,20 @@ type DaemonConfigurationStatus struct { // Configured compatibility mode for --egress-multi-home-ip-rule-compat EgressMultiHomeIPRuleCompat bool `json:"egress-multi-home-ip-rule-compat,omitempty"` + // Enable route MTU for pod netns when CNI chaining is used + EnableRouteMTUForCNIChaining bool `json:"enableRouteMTUForCNIChaining,omitempty"` + // Immutable configuration (read-only) Immutable ConfigurationMap `json:"immutable,omitempty"` + // Install ingress/egress routes through uplink on host for Pods when working with + // delegated IPAM plugin. + // + InstallUplinkRoutesForDelegatedIPAM bool `json:"installUplinkRoutesForDelegatedIPAM,omitempty"` + + // Comma-separated list of IP ports should be reserved in the workload network namespace + IPLocalReservedPorts string `json:"ipLocalReservedPorts,omitempty"` + // Configured IPAM mode IpamMode string `json:"ipam-mode,omitempty"` @@ -291,6 +302,11 @@ func (m *DaemonConfigurationStatus) ContextValidate(ctx context.Context, formats func (m *DaemonConfigurationStatus) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error { if m.Addressing != nil { + + if swag.IsZero(m.Addressing) { // not required + return nil + } + if err := m.Addressing.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("addressing") @@ -306,6 +322,10 @@ func (m *DaemonConfigurationStatus) contextValidateAddressing(ctx context.Contex func (m *DaemonConfigurationStatus) contextValidateDatapathMode(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.DatapathMode) { // not required + return nil + } + if err := m.DatapathMode.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("datapathMode") @@ -320,6 +340,10 @@ func (m *DaemonConfigurationStatus) contextValidateDatapathMode(ctx context.Cont func (m *DaemonConfigurationStatus) contextValidateImmutable(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.Immutable) { // not required + return nil + } + if err := m.Immutable.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("immutable") @@ -335,6 +359,11 @@ func (m *DaemonConfigurationStatus) contextValidateImmutable(ctx context.Context func (m *DaemonConfigurationStatus) contextValidateKvstoreConfiguration(ctx context.Context, formats strfmt.Registry) error { if m.KvstoreConfiguration != nil { + + if swag.IsZero(m.KvstoreConfiguration) { // not required + return nil + } + if err := m.KvstoreConfiguration.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("kvstoreConfiguration") @@ -351,6 +380,11 @@ func (m *DaemonConfigurationStatus) contextValidateKvstoreConfiguration(ctx cont func (m *DaemonConfigurationStatus) contextValidateMasqueradeProtocols(ctx context.Context, formats strfmt.Registry) error { if m.MasqueradeProtocols != nil { + + if swag.IsZero(m.MasqueradeProtocols) { // not required + return nil + } + if err := m.MasqueradeProtocols.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("masqueradeProtocols") @@ -367,6 +401,11 @@ func (m *DaemonConfigurationStatus) contextValidateMasqueradeProtocols(ctx conte func (m *DaemonConfigurationStatus) contextValidateNodeMonitor(ctx context.Context, formats strfmt.Registry) error { if m.NodeMonitor != nil { + + if swag.IsZero(m.NodeMonitor) { // not required + return nil + } + if err := m.NodeMonitor.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("nodeMonitor") @@ -383,6 +422,11 @@ func (m *DaemonConfigurationStatus) contextValidateNodeMonitor(ctx context.Conte func (m *DaemonConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error { if m.Realized != nil { + + if swag.IsZero(m.Realized) { // not required + return nil + } + if err := m.Realized.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("realized") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/datapath_mode.go b/vendor/github.com/cilium/cilium/api/v1/models/datapath_mode.go index 7bb094dac7..a5ac8e87c4 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/datapath_mode.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/datapath_mode.go @@ -35,6 +35,12 @@ const ( // DatapathModeVeth captures enum value "veth" DatapathModeVeth DatapathMode = "veth" + + // DatapathModeNetkit captures enum value "netkit" + DatapathModeNetkit DatapathMode = "netkit" + + // DatapathModeNetkitDashL2 captures enum value "netkit-l2" + DatapathModeNetkitDashL2 DatapathMode = "netkit-l2" ) // for schema @@ -42,7 +48,7 @@ var datapathModeEnum []interface{} func init() { var res []DatapathMode - if err := json.Unmarshal([]byte(`["veth"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["veth","netkit","netkit-l2"]`), &res); err != nil { panic(err) } for _, v := range res { diff --git a/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go b/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go index 128dd8dd63..0504e09c6e 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/debug_info.go @@ -228,6 +228,11 @@ func (m *DebugInfo) ContextValidate(ctx context.Context, formats strfmt.Registry func (m *DebugInfo) contextValidateCiliumStatus(ctx context.Context, formats strfmt.Registry) error { if m.CiliumStatus != nil { + + if swag.IsZero(m.CiliumStatus) { // not required + return nil + } + if err := m.CiliumStatus.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cilium-status") @@ -244,6 +249,11 @@ func (m *DebugInfo) contextValidateCiliumStatus(ctx context.Context, formats str func (m *DebugInfo) contextValidateEncryption(ctx context.Context, formats strfmt.Registry) error { if m.Encryption != nil { + + if swag.IsZero(m.Encryption) { // not required + return nil + } + if err := m.Encryption.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("encryption") @@ -262,6 +272,11 @@ func (m *DebugInfo) contextValidateEndpointList(ctx context.Context, formats str for i := 0; i < len(m.EndpointList); i++ { if m.EndpointList[i] != nil { + + if swag.IsZero(m.EndpointList[i]) { // not required + return nil + } + if err := m.EndpointList[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("endpoint-list" + "." + strconv.Itoa(i)) @@ -280,6 +295,11 @@ func (m *DebugInfo) contextValidateEndpointList(ctx context.Context, formats str func (m *DebugInfo) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error { if m.Policy != nil { + + if swag.IsZero(m.Policy) { // not required + return nil + } + if err := m.Policy.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("policy") @@ -298,6 +318,11 @@ func (m *DebugInfo) contextValidateServiceList(ctx context.Context, formats strf for i := 0; i < len(m.ServiceList); i++ { if m.ServiceList[i] != nil { + + if swag.IsZero(m.ServiceList[i]) { // not required + return nil + } + if err := m.ServiceList[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("service-list" + "." + strconv.Itoa(i)) @@ -336,7 +361,7 @@ func (m *DebugInfo) UnmarshalBinary(b []byte) error { // swagger:model DebugInfoEncryption type DebugInfoEncryption struct { - // Status of the Wireguard agent + // Status of the WireGuard agent Wireguard *WireguardStatus `json:"wireguard,omitempty"` } @@ -390,6 +415,11 @@ func (m *DebugInfoEncryption) ContextValidate(ctx context.Context, formats strfm func (m *DebugInfoEncryption) contextValidateWireguard(ctx context.Context, formats strfmt.Registry) error { if m.Wireguard != nil { + + if swag.IsZero(m.Wireguard) { // not required + return nil + } + if err := m.Wireguard.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("encryption" + "." + "wireguard") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go b/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go index 4392fb0974..8e7ca1e970 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/encryption_status.go @@ -25,14 +25,17 @@ import ( // swagger:model EncryptionStatus type EncryptionStatus struct { + // Status of the IPsec agent + Ipsec *IPsecStatus `json:"ipsec,omitempty"` + // mode - // Enum: [Disabled IPsec Wireguard] + // Enum: ["Disabled","IPsec","Wireguard"] Mode string `json:"mode,omitempty"` - // Human readable status/error/warning message + // Human readable error/warning message Msg string `json:"msg,omitempty"` - // Status of the Wireguard agent + // Status of the WireGuard agent Wireguard *WireguardStatus `json:"wireguard,omitempty"` } @@ -40,6 +43,10 @@ type EncryptionStatus struct { func (m *EncryptionStatus) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateIpsec(formats); err != nil { + res = append(res, err) + } + if err := m.validateMode(formats); err != nil { res = append(res, err) } @@ -54,6 +61,25 @@ func (m *EncryptionStatus) Validate(formats strfmt.Registry) error { return nil } +func (m *EncryptionStatus) validateIpsec(formats strfmt.Registry) error { + if swag.IsZero(m.Ipsec) { // not required + return nil + } + + if m.Ipsec != nil { + if err := m.Ipsec.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ipsec") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("ipsec") + } + return err + } + } + + return nil +} + var encryptionStatusTypeModePropEnum []interface{} func init() { @@ -122,6 +148,10 @@ func (m *EncryptionStatus) validateWireguard(formats strfmt.Registry) error { func (m *EncryptionStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error + if err := m.contextValidateIpsec(ctx, formats); err != nil { + res = append(res, err) + } + if err := m.contextValidateWireguard(ctx, formats); err != nil { res = append(res, err) } @@ -132,9 +162,35 @@ func (m *EncryptionStatus) ContextValidate(ctx context.Context, formats strfmt.R return nil } +func (m *EncryptionStatus) contextValidateIpsec(ctx context.Context, formats strfmt.Registry) error { + + if m.Ipsec != nil { + + if swag.IsZero(m.Ipsec) { // not required + return nil + } + + if err := m.Ipsec.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ipsec") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("ipsec") + } + return err + } + } + + return nil +} + func (m *EncryptionStatus) contextValidateWireguard(ctx context.Context, formats strfmt.Registry) error { if m.Wireguard != nil { + + if swag.IsZero(m.Wireguard) { // not required + return nil + } + if err := m.Wireguard.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("wireguard") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint.go index 4773733214..cb9a7e943e 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint.go @@ -108,6 +108,11 @@ func (m *Endpoint) ContextValidate(ctx context.Context, formats strfmt.Registry) func (m *Endpoint) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error { if m.Spec != nil { + + if swag.IsZero(m.Spec) { // not required + return nil + } + if err := m.Spec.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec") @@ -124,6 +129,11 @@ func (m *Endpoint) contextValidateSpec(ctx context.Context, formats strfmt.Regis func (m *Endpoint) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { if m.Status != nil { + + if swag.IsZero(m.Status) { // not required + return nil + } + if err := m.Status.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_batch_delete_request.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_batch_delete_request.go new file mode 100644 index 0000000000..795e79d6bb --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_batch_delete_request.go @@ -0,0 +1,53 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// EndpointBatchDeleteRequest Properties selecting a batch of endpoints to delete. +// +// swagger:model EndpointBatchDeleteRequest +type EndpointBatchDeleteRequest struct { + + // ID assigned by container runtime + ContainerID string `json:"container-id,omitempty"` +} + +// Validate validates this endpoint batch delete request +func (m *EndpointBatchDeleteRequest) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this endpoint batch delete request based on context it is used +func (m *EndpointBatchDeleteRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *EndpointBatchDeleteRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *EndpointBatchDeleteRequest) UnmarshalBinary(b []byte) error { + var res EndpointBatchDeleteRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go index f70e9e43f5..015ca7ac43 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_change_request.go @@ -28,6 +28,9 @@ type EndpointChangeRequest struct { // ID assigned by container runtime ContainerID string `json:"container-id,omitempty"` + // Name of network device in container netns + ContainerInterfaceName string `json:"container-interface-name,omitempty"` + // Name assigned to container ContainerName string `json:"container-name,omitempty"` @@ -37,6 +40,9 @@ type EndpointChangeRequest struct { // ID of datapath tail call map DatapathMapID int64 `json:"datapath-map-id,omitempty"` + // Disables lookup using legacy endpoint identifiers (container name, container id, pod name) for this endpoint + DisableLegacyIdentifiers bool `json:"disable-legacy-identifiers,omitempty"` + // Docker endpoint ID DockerEndpointID string `json:"docker-endpoint-id,omitempty"` @@ -49,10 +55,10 @@ type EndpointChangeRequest struct { // Local endpoint ID ID int64 `json:"id,omitempty"` - // Index of network device + // Index of network device in host netns InterfaceIndex int64 `json:"interface-index,omitempty"` - // Name of network device + // Name of network device in host netns InterfaceName string `json:"interface-name,omitempty"` // Kubernetes namespace name @@ -61,18 +67,30 @@ type EndpointChangeRequest struct { // Kubernetes pod name K8sPodName string `json:"k8s-pod-name,omitempty"` + // Kubernetes pod UID + K8sUID string `json:"k8s-uid,omitempty"` + // Labels describing the identity Labels Labels `json:"labels,omitempty"` // MAC address Mac string `json:"mac,omitempty"` + // Network namespace cookie + NetnsCookie string `json:"netns-cookie,omitempty"` + + // Index of network device from which an IP was used as endpoint IP. Only relevant for ENI environments. + ParentInterfaceIndex int64 `json:"parent-interface-index,omitempty"` + // Process ID of the workload belonging to this endpoint Pid int64 `json:"pid,omitempty"` // Whether policy enforcement is enabled or not PolicyEnabled bool `json:"policy-enabled,omitempty"` + // Properties is used to store information about the endpoint at creation. Useful for tests. + Properties map[string]interface{} `json:"properties,omitempty"` + // Current state of endpoint // Required: true State *EndpointState `json:"state"` @@ -216,6 +234,11 @@ func (m *EndpointChangeRequest) ContextValidate(ctx context.Context, formats str func (m *EndpointChangeRequest) contextValidateAddressing(ctx context.Context, formats strfmt.Registry) error { if m.Addressing != nil { + + if swag.IsZero(m.Addressing) { // not required + return nil + } + if err := m.Addressing.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("addressing") @@ -232,6 +255,11 @@ func (m *EndpointChangeRequest) contextValidateAddressing(ctx context.Context, f func (m *EndpointChangeRequest) contextValidateDatapathConfiguration(ctx context.Context, formats strfmt.Registry) error { if m.DatapathConfiguration != nil { + + if swag.IsZero(m.DatapathConfiguration) { // not required + return nil + } + if err := m.DatapathConfiguration.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("datapath-configuration") @@ -262,6 +290,7 @@ func (m *EndpointChangeRequest) contextValidateLabels(ctx context.Context, forma func (m *EndpointChangeRequest) contextValidateState(ctx context.Context, formats strfmt.Registry) error { if m.State != nil { + if err := m.State.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("state") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_configuration_spec.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_configuration_spec.go index 6c6a75c9c9..2032d7f26f 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_configuration_spec.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_configuration_spec.go @@ -105,6 +105,11 @@ func (m *EndpointConfigurationSpec) ContextValidate(ctx context.Context, formats func (m *EndpointConfigurationSpec) contextValidateLabelConfiguration(ctx context.Context, formats strfmt.Registry) error { if m.LabelConfiguration != nil { + + if swag.IsZero(m.LabelConfiguration) { // not required + return nil + } + if err := m.LabelConfiguration.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("label-configuration") @@ -120,6 +125,10 @@ func (m *EndpointConfigurationSpec) contextValidateLabelConfiguration(ctx contex func (m *EndpointConfigurationSpec) contextValidateOptions(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.Options) { // not required + return nil + } + if err := m.Options.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("options") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_configuration_status.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_configuration_status.go index de4a9e8301..36b077465a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_configuration_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_configuration_status.go @@ -132,6 +132,10 @@ func (m *EndpointConfigurationStatus) ContextValidate(ctx context.Context, forma func (m *EndpointConfigurationStatus) contextValidateError(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.Error) { // not required + return nil + } + if err := m.Error.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("error") @@ -146,6 +150,10 @@ func (m *EndpointConfigurationStatus) contextValidateError(ctx context.Context, func (m *EndpointConfigurationStatus) contextValidateImmutable(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.Immutable) { // not required + return nil + } + if err := m.Immutable.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("immutable") @@ -161,6 +169,11 @@ func (m *EndpointConfigurationStatus) contextValidateImmutable(ctx context.Conte func (m *EndpointConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error { if m.Realized != nil { + + if swag.IsZero(m.Realized) { // not required + return nil + } + if err := m.Realized.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("realized") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_health.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_health.go index 1aa10adbd6..8eda1e5dfd 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_health.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_health.go @@ -133,6 +133,10 @@ func (m *EndpointHealth) ContextValidate(ctx context.Context, formats strfmt.Reg func (m *EndpointHealth) contextValidateBpf(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.Bpf) { // not required + return nil + } + if err := m.Bpf.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("bpf") @@ -147,6 +151,10 @@ func (m *EndpointHealth) contextValidateBpf(ctx context.Context, formats strfmt. func (m *EndpointHealth) contextValidateOverallHealth(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.OverallHealth) { // not required + return nil + } + if err := m.OverallHealth.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("overallHealth") @@ -161,6 +169,10 @@ func (m *EndpointHealth) contextValidateOverallHealth(ctx context.Context, forma func (m *EndpointHealth) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.Policy) { // not required + return nil + } + if err := m.Policy.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("policy") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go index 09a26aa38d..380d26784a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_identifiers.go @@ -22,10 +22,13 @@ import ( // swagger:model EndpointIdentifiers type EndpointIdentifiers struct { - // ID assigned by container runtime + // ID assigned to this attachment by container runtime + CniAttachmentID string `json:"cni-attachment-id,omitempty"` + + // ID assigned by container runtime (deprecated, may not be unique) ContainerID string `json:"container-id,omitempty"` - // Name assigned to container + // Name assigned to container (deprecated, may not be unique) ContainerName string `json:"container-name,omitempty"` // Docker endpoint ID @@ -34,13 +37,13 @@ type EndpointIdentifiers struct { // Docker network ID DockerNetworkID string `json:"docker-network-id,omitempty"` - // K8s namespace for this endpoint + // K8s namespace for this endpoint (deprecated, may not be unique) K8sNamespace string `json:"k8s-namespace,omitempty"` - // K8s pod name for this endpoint + // K8s pod name for this endpoint (deprecated, may not be unique) K8sPodName string `json:"k8s-pod-name,omitempty"` - // K8s pod for this endpoint(Deprecated, use K8sPodName and K8sNamespace instead) + // K8s pod for this endpoint (deprecated, may not be unique) PodName string `json:"pod-name,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go index d322ca6390..1335ab47f0 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_networking.go @@ -25,16 +25,19 @@ type EndpointNetworking struct { // IP4/6 addresses assigned to this Endpoint Addressing []*AddressPair `json:"addressing"` + // Name of network device in container netns + ContainerInterfaceName string `json:"container-interface-name,omitempty"` + // host addressing HostAddressing *NodeAddressing `json:"host-addressing,omitempty"` // MAC address HostMac string `json:"host-mac,omitempty"` - // Index of network device + // Index of network device in host netns InterfaceIndex int64 `json:"interface-index,omitempty"` - // Name of network device + // Name of network device in host netns InterfaceName string `json:"interface-name,omitempty"` // MAC address @@ -127,6 +130,11 @@ func (m *EndpointNetworking) contextValidateAddressing(ctx context.Context, form for i := 0; i < len(m.Addressing); i++ { if m.Addressing[i] != nil { + + if swag.IsZero(m.Addressing[i]) { // not required + return nil + } + if err := m.Addressing[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("addressing" + "." + strconv.Itoa(i)) @@ -145,6 +153,11 @@ func (m *EndpointNetworking) contextValidateAddressing(ctx context.Context, form func (m *EndpointNetworking) contextValidateHostAddressing(ctx context.Context, formats strfmt.Registry) error { if m.HostAddressing != nil { + + if swag.IsZero(m.HostAddressing) { // not required + return nil + } + if err := m.HostAddressing.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("host-addressing") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_policy.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_policy.go index 53e36f9729..5f6150600a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_policy.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_policy.go @@ -160,6 +160,11 @@ func (m *EndpointPolicy) ContextValidate(ctx context.Context, formats strfmt.Reg func (m *EndpointPolicy) contextValidateCidrPolicy(ctx context.Context, formats strfmt.Registry) error { if m.CidrPolicy != nil { + + if swag.IsZero(m.CidrPolicy) { // not required + return nil + } + if err := m.CidrPolicy.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cidr-policy") @@ -176,6 +181,11 @@ func (m *EndpointPolicy) contextValidateCidrPolicy(ctx context.Context, formats func (m *EndpointPolicy) contextValidateL4(ctx context.Context, formats strfmt.Registry) error { if m.L4 != nil { + + if swag.IsZero(m.L4) { // not required + return nil + } + if err := m.L4.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("l4") @@ -191,6 +201,10 @@ func (m *EndpointPolicy) contextValidateL4(ctx context.Context, formats strfmt.R func (m *EndpointPolicy) contextValidatePolicyEnabled(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.PolicyEnabled) { // not required + return nil + } + if err := m.PolicyEnabled.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("policy-enabled") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_policy_status.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_policy_status.go index 70341a30ed..d4a8b179af 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_policy_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_policy_status.go @@ -148,6 +148,11 @@ func (m *EndpointPolicyStatus) contextValidateProxyStatistics(ctx context.Contex for i := 0; i < len(m.ProxyStatistics); i++ { if m.ProxyStatistics[i] != nil { + + if swag.IsZero(m.ProxyStatistics[i]) { // not required + return nil + } + if err := m.ProxyStatistics[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("proxy-statistics" + "." + strconv.Itoa(i)) @@ -166,6 +171,11 @@ func (m *EndpointPolicyStatus) contextValidateProxyStatistics(ctx context.Contex func (m *EndpointPolicyStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error { if m.Realized != nil { + + if swag.IsZero(m.Realized) { // not required + return nil + } + if err := m.Realized.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("realized") @@ -182,6 +192,11 @@ func (m *EndpointPolicyStatus) contextValidateRealized(ctx context.Context, form func (m *EndpointPolicyStatus) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error { if m.Spec != nil { + + if swag.IsZero(m.Spec) { // not required + return nil + } + if err := m.Spec.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status.go index 546a7f2445..8ff62e19ae 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status.go @@ -390,6 +390,11 @@ func (m *EndpointStatus) contextValidateControllers(ctx context.Context, formats func (m *EndpointStatus) contextValidateExternalIdentifiers(ctx context.Context, formats strfmt.Registry) error { if m.ExternalIdentifiers != nil { + + if swag.IsZero(m.ExternalIdentifiers) { // not required + return nil + } + if err := m.ExternalIdentifiers.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("external-identifiers") @@ -406,6 +411,11 @@ func (m *EndpointStatus) contextValidateExternalIdentifiers(ctx context.Context, func (m *EndpointStatus) contextValidateHealth(ctx context.Context, formats strfmt.Registry) error { if m.Health != nil { + + if swag.IsZero(m.Health) { // not required + return nil + } + if err := m.Health.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("health") @@ -422,6 +432,11 @@ func (m *EndpointStatus) contextValidateHealth(ctx context.Context, formats strf func (m *EndpointStatus) contextValidateIdentity(ctx context.Context, formats strfmt.Registry) error { if m.Identity != nil { + + if swag.IsZero(m.Identity) { // not required + return nil + } + if err := m.Identity.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("identity") @@ -438,6 +453,11 @@ func (m *EndpointStatus) contextValidateIdentity(ctx context.Context, formats st func (m *EndpointStatus) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error { if m.Labels != nil { + + if swag.IsZero(m.Labels) { // not required + return nil + } + if err := m.Labels.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("labels") @@ -482,6 +502,11 @@ func (m *EndpointStatus) contextValidateNamedPorts(ctx context.Context, formats func (m *EndpointStatus) contextValidateNetworking(ctx context.Context, formats strfmt.Registry) error { if m.Networking != nil { + + if swag.IsZero(m.Networking) { // not required + return nil + } + if err := m.Networking.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("networking") @@ -498,6 +523,11 @@ func (m *EndpointStatus) contextValidateNetworking(ctx context.Context, formats func (m *EndpointStatus) contextValidatePolicy(ctx context.Context, formats strfmt.Registry) error { if m.Policy != nil { + + if swag.IsZero(m.Policy) { // not required + return nil + } + if err := m.Policy.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("policy") @@ -514,6 +544,11 @@ func (m *EndpointStatus) contextValidatePolicy(ctx context.Context, formats strf func (m *EndpointStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error { if m.Realized != nil { + + if swag.IsZero(m.Realized) { // not required + return nil + } + if err := m.Realized.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("realized") @@ -530,6 +565,7 @@ func (m *EndpointStatus) contextValidateRealized(ctx context.Context, formats st func (m *EndpointStatus) contextValidateState(ctx context.Context, formats strfmt.Registry) error { if m.State != nil { + if err := m.State.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("state") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status_change.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status_change.go index e347f28896..0da40bded9 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status_change.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status_change.go @@ -26,7 +26,7 @@ import ( type EndpointStatusChange struct { // Code indicate type of status change - // Enum: [ok failed] + // Enum: ["ok","failed"] Code string `json:"code,omitempty"` // Status message @@ -132,6 +132,10 @@ func (m *EndpointStatusChange) ContextValidate(ctx context.Context, formats strf func (m *EndpointStatusChange) contextValidateState(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.State) { // not required + return nil + } + if err := m.State.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("state") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status_log.go b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status_log.go index de32548bd6..a6ba5d38c5 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status_log.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/endpoint_status_log.go @@ -57,6 +57,11 @@ func (m EndpointStatusLog) ContextValidate(ctx context.Context, formats strfmt.R for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/frontend_address.go b/vendor/github.com/cilium/cilium/api/v1/models/frontend_address.go index cb4f48510b..cf2620fa2b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/frontend_address.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/frontend_address.go @@ -32,11 +32,11 @@ type FrontendAddress struct { Port uint16 `json:"port,omitempty"` // Layer 4 protocol - // Enum: [tcp udp any] + // Enum: ["tcp","udp","any"] Protocol string `json:"protocol,omitempty"` // Load balancing scope for frontend address - // Enum: [external internal] + // Enum: ["external","internal"] Scope string `json:"scope,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/frontend_mapping.go b/vendor/github.com/cilium/cilium/api/v1/models/frontend_mapping.go index ac26d0c444..7bab4735a4 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/frontend_mapping.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/frontend_mapping.go @@ -115,6 +115,11 @@ func (m *FrontendMapping) contextValidateBackends(ctx context.Context, formats s for i := 0; i < len(m.Backends); i++ { if m.Backends[i] != nil { + + if swag.IsZero(m.Backends[i]) { // not required + return nil + } + if err := m.Backends[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("backends" + "." + strconv.Itoa(i)) @@ -133,6 +138,11 @@ func (m *FrontendMapping) contextValidateBackends(ctx context.Context, formats s func (m *FrontendMapping) contextValidateFrontendAddress(ctx context.Context, formats strfmt.Registry) error { if m.FrontendAddress != nil { + + if swag.IsZero(m.FrontendAddress) { // not required + return nil + } + if err := m.FrontendAddress.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("frontend-address") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/host_firewall.go b/vendor/github.com/cilium/cilium/api/v1/models/host_firewall.go index b6a5058808..bf1e595bc9 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/host_firewall.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/host_firewall.go @@ -29,7 +29,7 @@ type HostFirewall struct { Devices []string `json:"devices"` // mode - // Enum: [Disabled Enabled] + // Enum: ["Disabled","Enabled"] Mode string `json:"mode,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/host_routing.go b/vendor/github.com/cilium/cilium/api/v1/models/host_routing.go deleted file mode 100644 index d80958e1ef..0000000000 --- a/vendor/github.com/cilium/cilium/api/v1/models/host_routing.go +++ /dev/null @@ -1,110 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "encoding/json" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" -) - -// HostRouting Status of host routing -// -// +k8s:deepcopy-gen=true -// -// swagger:model HostRouting -type HostRouting struct { - - // Datapath routing mode - // Enum: [BPF Legacy] - Mode string `json:"mode,omitempty"` -} - -// Validate validates this host routing -func (m *HostRouting) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateMode(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -var hostRoutingTypeModePropEnum []interface{} - -func init() { - var res []string - if err := json.Unmarshal([]byte(`["BPF","Legacy"]`), &res); err != nil { - panic(err) - } - for _, v := range res { - hostRoutingTypeModePropEnum = append(hostRoutingTypeModePropEnum, v) - } -} - -const ( - - // HostRoutingModeBPF captures enum value "BPF" - HostRoutingModeBPF string = "BPF" - - // HostRoutingModeLegacy captures enum value "Legacy" - HostRoutingModeLegacy string = "Legacy" -) - -// prop value enum -func (m *HostRouting) validateModeEnum(path, location string, value string) error { - if err := validate.EnumCase(path, location, value, hostRoutingTypeModePropEnum, true); err != nil { - return err - } - return nil -} - -func (m *HostRouting) validateMode(formats strfmt.Registry) error { - if swag.IsZero(m.Mode) { // not required - return nil - } - - // value enum - if err := m.validateModeEnum("mode", "body", m.Mode); err != nil { - return err - } - - return nil -} - -// ContextValidate validates this host routing based on context it is used -func (m *HostRouting) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *HostRouting) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HostRouting) UnmarshalBinary(b []byte) error { - var res HostRouting - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/hubble_status.go b/vendor/github.com/cilium/cilium/api/v1/models/hubble_status.go index 49754f9995..85c3dca29a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/hubble_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/hubble_status.go @@ -35,7 +35,7 @@ type HubbleStatus struct { Observer *HubbleStatusObserver `json:"observer,omitempty"` // State the component is in - // Enum: [Ok Warning Failure Disabled] + // Enum: ["Ok","Warning","Failure","Disabled"] State string `json:"state,omitempty"` } @@ -168,6 +168,11 @@ func (m *HubbleStatus) ContextValidate(ctx context.Context, formats strfmt.Regis func (m *HubbleStatus) contextValidateMetrics(ctx context.Context, formats strfmt.Registry) error { if m.Metrics != nil { + + if swag.IsZero(m.Metrics) { // not required + return nil + } + if err := m.Metrics.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("metrics") @@ -184,6 +189,11 @@ func (m *HubbleStatus) contextValidateMetrics(ctx context.Context, formats strfm func (m *HubbleStatus) contextValidateObserver(ctx context.Context, formats strfmt.Registry) error { if m.Observer != nil { + + if swag.IsZero(m.Observer) { // not required + return nil + } + if err := m.Observer.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("observer") @@ -221,7 +231,7 @@ func (m *HubbleStatus) UnmarshalBinary(b []byte) error { type HubbleStatusMetrics struct { // State of the Hubble metrics - // Enum: [Ok Warning Failure Disabled] + // Enum: ["Ok","Warning","Failure","Disabled"] State string `json:"state,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/i_psec_status.go b/vendor/github.com/cilium/cilium/api/v1/models/i_psec_status.go new file mode 100644 index 0000000000..5eafc226f9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/i_psec_status.go @@ -0,0 +1,67 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// IPsecStatus Status of the IPsec agent +// +// +k8s:deepcopy-gen=true +// +// swagger:model IPsecStatus +type IPsecStatus struct { + + // IPsec decryption interfaces + DecryptInterfaces []string `json:"decrypt-interfaces"` + + // IPsec error count + ErrorCount int64 `json:"error-count,omitempty"` + + // IPsec keys in use + KeysInUse int64 `json:"keys-in-use,omitempty"` + + // IPsec max sequence number + MaxSeqNumber string `json:"max-seq-number,omitempty"` + + // IPsec XFRM errors + XfrmErrors map[string]int64 `json:"xfrm-errors,omitempty"` +} + +// Validate validates this i psec status +func (m *IPsecStatus) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this i psec status based on context it is used +func (m *IPsecStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *IPsecStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *IPsecStatus) UnmarshalBinary(b []byte) error { + var res IPsecStatus + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/identity_endpoints.go b/vendor/github.com/cilium/cilium/api/v1/models/identity_endpoints.go index 438f443ff8..1a2a94b33b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/identity_endpoints.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/identity_endpoints.go @@ -78,6 +78,11 @@ func (m *IdentityEndpoints) ContextValidate(ctx context.Context, formats strfmt. func (m *IdentityEndpoints) contextValidateIdentity(ctx context.Context, formats strfmt.Registry) error { if m.Identity != nil { + + if swag.IsZero(m.Identity) { // not required + return nil + } + if err := m.Identity.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("identity") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/ip_a_m_response.go b/vendor/github.com/cilium/cilium/api/v1/models/ip_a_m_response.go index bf08751d56..25c729cac7 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/ip_a_m_response.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/ip_a_m_response.go @@ -170,6 +170,7 @@ func (m *IPAMResponse) ContextValidate(ctx context.Context, formats strfmt.Regis func (m *IPAMResponse) contextValidateAddress(ctx context.Context, formats strfmt.Registry) error { if m.Address != nil { + if err := m.Address.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("address") @@ -186,6 +187,7 @@ func (m *IPAMResponse) contextValidateAddress(ctx context.Context, formats strfm func (m *IPAMResponse) contextValidateHostAddressing(ctx context.Context, formats strfmt.Registry) error { if m.HostAddressing != nil { + if err := m.HostAddressing.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("host-addressing") @@ -202,6 +204,11 @@ func (m *IPAMResponse) contextValidateHostAddressing(ctx context.Context, format func (m *IPAMResponse) contextValidateIPV4(ctx context.Context, formats strfmt.Registry) error { if m.IPV4 != nil { + + if swag.IsZero(m.IPV4) { // not required + return nil + } + if err := m.IPV4.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ipv4") @@ -218,6 +225,11 @@ func (m *IPAMResponse) contextValidateIPV4(ctx context.Context, formats strfmt.R func (m *IPAMResponse) contextValidateIPV6(ctx context.Context, formats strfmt.Registry) error { if m.IPV6 != nil { + + if swag.IsZero(m.IPV6) { // not required + return nil + } + if err := m.IPV6.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ipv6") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/ip_a_m_status.go b/vendor/github.com/cilium/cilium/api/v1/models/ip_a_m_status.go index 0ae76cf6a0..a50f03c718 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/ip_a_m_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/ip_a_m_status.go @@ -85,6 +85,10 @@ func (m *IPAMStatus) ContextValidate(ctx context.Context, formats strfmt.Registr func (m *IPAMStatus) contextValidateAllocations(ctx context.Context, formats strfmt.Registry) error { + if swag.IsZero(m.Allocations) { // not required + return nil + } + if err := m.Allocations.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("allocations") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/ip_list_entry.go b/vendor/github.com/cilium/cilium/api/v1/models/ip_list_entry.go index 55def940ff..df8a2009cf 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/ip_list_entry.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/ip_list_entry.go @@ -116,6 +116,11 @@ func (m *IPListEntry) ContextValidate(ctx context.Context, formats strfmt.Regist func (m *IPListEntry) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error { if m.Metadata != nil { + + if swag.IsZero(m.Metadata) { // not required + return nil + } + if err := m.Metadata.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("metadata") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/k8s_status.go b/vendor/github.com/cilium/cilium/api/v1/models/k8s_status.go index f59a1c345e..9493820da6 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/k8s_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/k8s_status.go @@ -32,7 +32,7 @@ type K8sStatus struct { Msg string `json:"msg,omitempty"` // State the component is in - // Enum: [Ok Warning Failure Disabled] + // Enum: ["Ok","Warning","Failure","Disabled"] State string `json:"state,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go b/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go index d47308d560..6477cb154b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go @@ -41,7 +41,7 @@ type KubeProxyReplacement struct { Features *KubeProxyReplacementFeatures `json:"features,omitempty"` // mode - // Enum: [Disabled Strict Probe Partial True False] + // Enum: ["True","False"] Mode string `json:"mode,omitempty"` } @@ -116,7 +116,7 @@ var kubeProxyReplacementTypeModePropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["Disabled","Strict","Probe","Partial","True","False"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["True","False"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -126,18 +126,6 @@ func init() { const ( - // KubeProxyReplacementModeDisabled captures enum value "Disabled" - KubeProxyReplacementModeDisabled string = "Disabled" - - // KubeProxyReplacementModeStrict captures enum value "Strict" - KubeProxyReplacementModeStrict string = "Strict" - - // KubeProxyReplacementModeProbe captures enum value "Probe" - KubeProxyReplacementModeProbe string = "Probe" - - // KubeProxyReplacementModePartial captures enum value "Partial" - KubeProxyReplacementModePartial string = "Partial" - // KubeProxyReplacementModeTrue captures enum value "True" KubeProxyReplacementModeTrue string = "True" @@ -189,6 +177,11 @@ func (m *KubeProxyReplacement) contextValidateDeviceList(ctx context.Context, fo for i := 0; i < len(m.DeviceList); i++ { if m.DeviceList[i] != nil { + + if swag.IsZero(m.DeviceList[i]) { // not required + return nil + } + if err := m.DeviceList[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("deviceList" + "." + strconv.Itoa(i)) @@ -207,6 +200,11 @@ func (m *KubeProxyReplacement) contextValidateDeviceList(ctx context.Context, fo func (m *KubeProxyReplacement) contextValidateFeatures(ctx context.Context, formats strfmt.Registry) error { if m.Features != nil { + + if swag.IsZero(m.Features) { // not required + return nil + } + if err := m.Features.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features") @@ -289,6 +287,9 @@ func (m *KubeProxyReplacementDeviceListItems0) UnmarshalBinary(b []byte) error { // swagger:model KubeProxyReplacementFeatures type KubeProxyReplacementFeatures struct { + // annotations + Annotations []string `json:"annotations"` + // flag bpf-lb-sock-hostns-only BpfSocketLBHostnsOnly bool `json:"bpfSocketLBHostnsOnly,omitempty"` @@ -586,6 +587,11 @@ func (m *KubeProxyReplacementFeatures) ContextValidate(ctx context.Context, form func (m *KubeProxyReplacementFeatures) contextValidateExternalIPs(ctx context.Context, formats strfmt.Registry) error { if m.ExternalIPs != nil { + + if swag.IsZero(m.ExternalIPs) { // not required + return nil + } + if err := m.ExternalIPs.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "externalIPs") @@ -602,6 +608,11 @@ func (m *KubeProxyReplacementFeatures) contextValidateExternalIPs(ctx context.Co func (m *KubeProxyReplacementFeatures) contextValidateGracefulTermination(ctx context.Context, formats strfmt.Registry) error { if m.GracefulTermination != nil { + + if swag.IsZero(m.GracefulTermination) { // not required + return nil + } + if err := m.GracefulTermination.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "gracefulTermination") @@ -618,6 +629,11 @@ func (m *KubeProxyReplacementFeatures) contextValidateGracefulTermination(ctx co func (m *KubeProxyReplacementFeatures) contextValidateHostPort(ctx context.Context, formats strfmt.Registry) error { if m.HostPort != nil { + + if swag.IsZero(m.HostPort) { // not required + return nil + } + if err := m.HostPort.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "hostPort") @@ -634,6 +650,11 @@ func (m *KubeProxyReplacementFeatures) contextValidateHostPort(ctx context.Conte func (m *KubeProxyReplacementFeatures) contextValidateHostReachableServices(ctx context.Context, formats strfmt.Registry) error { if m.HostReachableServices != nil { + + if swag.IsZero(m.HostReachableServices) { // not required + return nil + } + if err := m.HostReachableServices.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "hostReachableServices") @@ -650,6 +671,11 @@ func (m *KubeProxyReplacementFeatures) contextValidateHostReachableServices(ctx func (m *KubeProxyReplacementFeatures) contextValidateNat46X64(ctx context.Context, formats strfmt.Registry) error { if m.Nat46X64 != nil { + + if swag.IsZero(m.Nat46X64) { // not required + return nil + } + if err := m.Nat46X64.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "nat46X64") @@ -666,6 +692,11 @@ func (m *KubeProxyReplacementFeatures) contextValidateNat46X64(ctx context.Conte func (m *KubeProxyReplacementFeatures) contextValidateNodePort(ctx context.Context, formats strfmt.Registry) error { if m.NodePort != nil { + + if swag.IsZero(m.NodePort) { // not required + return nil + } + if err := m.NodePort.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "nodePort") @@ -682,6 +713,11 @@ func (m *KubeProxyReplacementFeatures) contextValidateNodePort(ctx context.Conte func (m *KubeProxyReplacementFeatures) contextValidateSessionAffinity(ctx context.Context, formats strfmt.Registry) error { if m.SessionAffinity != nil { + + if swag.IsZero(m.SessionAffinity) { // not required + return nil + } + if err := m.SessionAffinity.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "sessionAffinity") @@ -698,6 +734,11 @@ func (m *KubeProxyReplacementFeatures) contextValidateSessionAffinity(ctx contex func (m *KubeProxyReplacementFeatures) contextValidateSocketLB(ctx context.Context, formats strfmt.Registry) error { if m.SocketLB != nil { + + if swag.IsZero(m.SocketLB) { // not required + return nil + } + if err := m.SocketLB.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "socketLB") @@ -714,6 +755,11 @@ func (m *KubeProxyReplacementFeatures) contextValidateSocketLB(ctx context.Conte func (m *KubeProxyReplacementFeatures) contextValidateSocketLBTracing(ctx context.Context, formats strfmt.Registry) error { if m.SocketLBTracing != nil { + + if swag.IsZero(m.SocketLBTracing) { // not required + return nil + } + if err := m.SocketLBTracing.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "socketLBTracing") @@ -998,6 +1044,11 @@ func (m *KubeProxyReplacementFeaturesNat46X64) ContextValidate(ctx context.Conte func (m *KubeProxyReplacementFeaturesNat46X64) contextValidateGateway(ctx context.Context, formats strfmt.Registry) error { if m.Gateway != nil { + + if swag.IsZero(m.Gateway) { // not required + return nil + } + if err := m.Gateway.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "nat46X64" + "." + "gateway") @@ -1014,6 +1065,11 @@ func (m *KubeProxyReplacementFeaturesNat46X64) contextValidateGateway(ctx contex func (m *KubeProxyReplacementFeaturesNat46X64) contextValidateService(ctx context.Context, formats strfmt.Registry) error { if m.Service != nil { + + if swag.IsZero(m.Service) { // not required + return nil + } + if err := m.Service.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("features" + "." + "nat46X64" + "." + "service") @@ -1134,13 +1190,17 @@ func (m *KubeProxyReplacementFeaturesNat46X64Service) UnmarshalBinary(b []byte) type KubeProxyReplacementFeaturesNodePort struct { // acceleration - // Enum: [None Native Generic] + // Enum: ["None","Native","Generic","Best-Effort"] Acceleration string `json:"acceleration,omitempty"` // algorithm - // Enum: [Random Maglev] + // Enum: ["Random","Maglev"] Algorithm string `json:"algorithm,omitempty"` + // dsr mode + // Enum: ["IP Option/Extension","IPIP","Geneve"] + DsrMode string `json:"dsrMode,omitempty"` + // enabled Enabled bool `json:"enabled,omitempty"` @@ -1148,7 +1208,7 @@ type KubeProxyReplacementFeaturesNodePort struct { LutSize int64 `json:"lutSize,omitempty"` // mode - // Enum: [SNAT DSR Hybrid] + // Enum: ["SNAT","DSR","Hybrid"] Mode string `json:"mode,omitempty"` // port max @@ -1170,6 +1230,10 @@ func (m *KubeProxyReplacementFeaturesNodePort) Validate(formats strfmt.Registry) res = append(res, err) } + if err := m.validateDsrMode(formats); err != nil { + res = append(res, err) + } + if err := m.validateMode(formats); err != nil { res = append(res, err) } @@ -1184,7 +1248,7 @@ var kubeProxyReplacementFeaturesNodePortTypeAccelerationPropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["None","Native","Generic"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["None","Native","Generic","Best-Effort"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -1202,6 +1266,9 @@ const ( // KubeProxyReplacementFeaturesNodePortAccelerationGeneric captures enum value "Generic" KubeProxyReplacementFeaturesNodePortAccelerationGeneric string = "Generic" + + // KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort captures enum value "Best-Effort" + KubeProxyReplacementFeaturesNodePortAccelerationBestDashEffort string = "Best-Effort" ) // prop value enum @@ -1267,6 +1334,51 @@ func (m *KubeProxyReplacementFeaturesNodePort) validateAlgorithm(formats strfmt. return nil } +var kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["IP Option/Extension","IPIP","Geneve"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum = append(kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, v) + } +} + +const ( + + // KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension captures enum value "IP Option/Extension" + KubeProxyReplacementFeaturesNodePortDsrModeIPOptionExtension string = "IP Option/Extension" + + // KubeProxyReplacementFeaturesNodePortDsrModeIPIP captures enum value "IPIP" + KubeProxyReplacementFeaturesNodePortDsrModeIPIP string = "IPIP" + + // KubeProxyReplacementFeaturesNodePortDsrModeGeneve captures enum value "Geneve" + KubeProxyReplacementFeaturesNodePortDsrModeGeneve string = "Geneve" +) + +// prop value enum +func (m *KubeProxyReplacementFeaturesNodePort) validateDsrModeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, kubeProxyReplacementFeaturesNodePortTypeDsrModePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *KubeProxyReplacementFeaturesNodePort) validateDsrMode(formats strfmt.Registry) error { + if swag.IsZero(m.DsrMode) { // not required + return nil + } + + // value enum + if err := m.validateDsrModeEnum("features"+"."+"nodePort"+"."+"dsrMode", "body", m.DsrMode); err != nil { + return err + } + + return nil +} + var kubeProxyReplacementFeaturesNodePortTypeModePropEnum []interface{} func init() { diff --git a/vendor/github.com/cilium/cilium/api/v1/models/l4_policy.go b/vendor/github.com/cilium/cilium/api/v1/models/l4_policy.go index 12b837d84a..b6c4c198b1 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/l4_policy.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/l4_policy.go @@ -124,6 +124,11 @@ func (m *L4Policy) contextValidateEgress(ctx context.Context, formats strfmt.Reg for i := 0; i < len(m.Egress); i++ { if m.Egress[i] != nil { + + if swag.IsZero(m.Egress[i]) { // not required + return nil + } + if err := m.Egress[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("egress" + "." + strconv.Itoa(i)) @@ -144,6 +149,11 @@ func (m *L4Policy) contextValidateIngress(ctx context.Context, formats strfmt.Re for i := 0; i < len(m.Ingress); i++ { if m.Ingress[i] != nil { + + if swag.IsZero(m.Ingress[i]) { // not required + return nil + } + if err := m.Ingress[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ingress" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/l_r_p_backend.go b/vendor/github.com/cilium/cilium/api/v1/models/l_r_p_backend.go index 3e844fefa2..5603f5191d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/l_r_p_backend.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/l_r_p_backend.go @@ -78,6 +78,11 @@ func (m *LRPBackend) ContextValidate(ctx context.Context, formats strfmt.Registr func (m *LRPBackend) contextValidateBackendAddress(ctx context.Context, formats strfmt.Registry) error { if m.BackendAddress != nil { + + if swag.IsZero(m.BackendAddress) { // not required + return nil + } + if err := m.BackendAddress.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("backend-address") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/l_r_p_spec.go b/vendor/github.com/cilium/cilium/api/v1/models/l_r_p_spec.go index 8d65810342..99a5387c46 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/l_r_p_spec.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/l_r_p_spec.go @@ -103,6 +103,11 @@ func (m *LRPSpec) contextValidateFrontendMappings(ctx context.Context, formats s for i := 0; i < len(m.FrontendMappings); i++ { if m.FrontendMappings[i] != nil { + + if swag.IsZero(m.FrontendMappings[i]) { // not required + return nil + } + if err := m.FrontendMappings[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("frontend-mappings" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/label.go b/vendor/github.com/cilium/cilium/api/v1/models/label.go new file mode 100644 index 0000000000..7e4225fee1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/label.go @@ -0,0 +1,59 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Label Label is the Cilium's representation of a container label +// +// swagger:model Label +type Label struct { + + // key + Key string `json:"key,omitempty"` + + // Source can be one of the above values (e.g. LabelSourceContainer) + Source string `json:"source,omitempty"` + + // value + Value string `json:"value,omitempty"` +} + +// Validate validates this label +func (m *Label) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this label based on context it is used +func (m *Label) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Label) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Label) UnmarshalBinary(b []byte) error { + var res Label + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/label_array.go b/vendor/github.com/cilium/cilium/api/v1/models/label_array.go new file mode 100644 index 0000000000..120753fdcb --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/label_array.go @@ -0,0 +1,81 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// LabelArray LabelArray is an array of labels forming a set +// +// swagger:model LabelArray +type LabelArray []*Label + +// Validate validates this label array +func (m LabelArray) Validate(formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + if swag.IsZero(m[i]) { // not required + continue + } + + if m[i] != nil { + if err := m[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +// ContextValidate validate this label array based on the context it is used +func (m LabelArray) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + for i := 0; i < len(m); i++ { + + if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + + if err := m[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName(strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName(strconv.Itoa(i)) + } + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/label_configuration.go b/vendor/github.com/cilium/cilium/api/v1/models/label_configuration.go index 7ca55a2233..f90386596a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/label_configuration.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/label_configuration.go @@ -105,6 +105,11 @@ func (m *LabelConfiguration) ContextValidate(ctx context.Context, formats strfmt func (m *LabelConfiguration) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error { if m.Spec != nil { + + if swag.IsZero(m.Spec) { // not required + return nil + } + if err := m.Spec.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec") @@ -121,6 +126,11 @@ func (m *LabelConfiguration) contextValidateSpec(ctx context.Context, formats st func (m *LabelConfiguration) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { if m.Status != nil { + + if swag.IsZero(m.Status) { // not required + return nil + } + if err := m.Status.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/label_configuration_status.go b/vendor/github.com/cilium/cilium/api/v1/models/label_configuration_status.go index b89fb4df09..07a199787f 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/label_configuration_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/label_configuration_status.go @@ -187,6 +187,11 @@ func (m *LabelConfigurationStatus) contextValidateDisabled(ctx context.Context, func (m *LabelConfigurationStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error { if m.Realized != nil { + + if swag.IsZero(m.Realized) { // not required + return nil + } + if err := m.Realized.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("realized") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/map_event.go b/vendor/github.com/cilium/cilium/api/v1/models/map_event.go index 8b0b8b76ac..0771b9e316 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/map_event.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/map_event.go @@ -24,11 +24,11 @@ import ( type MapEvent struct { // Action type for event - // Enum: [update delete] + // Enum: ["update","delete"] Action string `json:"action,omitempty"` // Desired action to be performed after this event - // Enum: [ok insert delete] + // Enum: ["ok","insert","delete"] DesiredAction string `json:"desired-action,omitempty"` // Map key on which the event occured diff --git a/vendor/github.com/cilium/cilium/api/v1/models/masquerading.go b/vendor/github.com/cilium/cilium/api/v1/models/masquerading.go index 80395c5b2b..eaca669075 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/masquerading.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/masquerading.go @@ -35,7 +35,7 @@ type Masquerading struct { IPMasqAgent bool `json:"ip-masq-agent,omitempty"` // mode - // Enum: [BPF iptables] + // Enum: ["BPF","iptables"] Mode string `json:"mode,omitempty"` // This field is obsolete, please use snat-exclusion-cidr-v4 or snat-exclusion-cidr-v6. @@ -147,6 +147,11 @@ func (m *Masquerading) ContextValidate(ctx context.Context, formats strfmt.Regis func (m *Masquerading) contextValidateEnabledProtocols(ctx context.Context, formats strfmt.Registry) error { if m.EnabledProtocols != nil { + + if swag.IsZero(m.EnabledProtocols) { // not required + return nil + } + if err := m.EnabledProtocols.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("enabledProtocols") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/name_manager.go b/vendor/github.com/cilium/cilium/api/v1/models/name_manager.go index 3140d01d13..0a1fbfb34d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/name_manager.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/name_manager.go @@ -88,6 +88,11 @@ func (m *NameManager) contextValidateFQDNPolicySelectors(ctx context.Context, fo for i := 0; i < len(m.FQDNPolicySelectors); i++ { if m.FQDNPolicySelectors[i] != nil { + + if swag.IsZero(m.FQDNPolicySelectors[i]) { // not required + return nil + } + if err := m.FQDNPolicySelectors[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("FQDNPolicySelectors" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/named_ports.go b/vendor/github.com/cilium/cilium/api/v1/models/named_ports.go index 89df87409b..7a22f0f08e 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/named_ports.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/named_ports.go @@ -61,6 +61,11 @@ func (m NamedPorts) ContextValidate(ctx context.Context, formats strfmt.Registry for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/node_addressing.go b/vendor/github.com/cilium/cilium/api/v1/models/node_addressing.go index 1dc57a4ede..5c10e4e92a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/node_addressing.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/node_addressing.go @@ -107,6 +107,11 @@ func (m *NodeAddressing) ContextValidate(ctx context.Context, formats strfmt.Reg func (m *NodeAddressing) contextValidateIPV4(ctx context.Context, formats strfmt.Registry) error { if m.IPV4 != nil { + + if swag.IsZero(m.IPV4) { // not required + return nil + } + if err := m.IPV4.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ipv4") @@ -123,6 +128,11 @@ func (m *NodeAddressing) contextValidateIPV4(ctx context.Context, formats strfmt func (m *NodeAddressing) contextValidateIPV6(ctx context.Context, formats strfmt.Registry) error { if m.IPV6 != nil { + + if swag.IsZero(m.IPV6) { // not required + return nil + } + if err := m.IPV6.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ipv6") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/node_element.go b/vendor/github.com/cilium/cilium/api/v1/models/node_element.go index a3d06e6605..c4ccfb7e7a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/node_element.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/node_element.go @@ -40,6 +40,9 @@ type NodeElement struct { // Alternative addresses assigned to the node SecondaryAddresses []*NodeAddressingElement `json:"secondary-addresses"` + + // Source of the node configuration + Source string `json:"source,omitempty"` } // Validate validates this node element @@ -180,6 +183,11 @@ func (m *NodeElement) ContextValidate(ctx context.Context, formats strfmt.Regist func (m *NodeElement) contextValidateHealthEndpointAddress(ctx context.Context, formats strfmt.Registry) error { if m.HealthEndpointAddress != nil { + + if swag.IsZero(m.HealthEndpointAddress) { // not required + return nil + } + if err := m.HealthEndpointAddress.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("health-endpoint-address") @@ -196,6 +204,11 @@ func (m *NodeElement) contextValidateHealthEndpointAddress(ctx context.Context, func (m *NodeElement) contextValidateIngressAddress(ctx context.Context, formats strfmt.Registry) error { if m.IngressAddress != nil { + + if swag.IsZero(m.IngressAddress) { // not required + return nil + } + if err := m.IngressAddress.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ingress-address") @@ -212,6 +225,11 @@ func (m *NodeElement) contextValidateIngressAddress(ctx context.Context, formats func (m *NodeElement) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error { if m.PrimaryAddress != nil { + + if swag.IsZero(m.PrimaryAddress) { // not required + return nil + } + if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("primary-address") @@ -230,6 +248,11 @@ func (m *NodeElement) contextValidateSecondaryAddresses(ctx context.Context, for for i := 0; i < len(m.SecondaryAddresses); i++ { if m.SecondaryAddresses[i] != nil { + + if swag.IsZero(m.SecondaryAddresses[i]) { // not required + return nil + } + if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/port.go b/vendor/github.com/cilium/cilium/api/v1/models/port.go index f89ef43efe..5a6fafca0e 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/port.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/port.go @@ -32,7 +32,7 @@ type Port struct { Port uint16 `json:"port,omitempty"` // Layer 4 protocol - // Enum: [TCP UDP SCTP ICMP ICMPV6 ANY] + // Enum: ["TCP","UDP","SCTP","ICMP","ICMPV6","ANY"] Protocol string `json:"protocol,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/prefilter.go b/vendor/github.com/cilium/cilium/api/v1/models/prefilter.go index dac7fbcbac..6488a18c55 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/prefilter.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/prefilter.go @@ -105,6 +105,11 @@ func (m *Prefilter) ContextValidate(ctx context.Context, formats strfmt.Registry func (m *Prefilter) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error { if m.Spec != nil { + + if swag.IsZero(m.Spec) { // not required + return nil + } + if err := m.Spec.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec") @@ -121,6 +126,11 @@ func (m *Prefilter) contextValidateSpec(ctx context.Context, formats strfmt.Regi func (m *Prefilter) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { if m.Status != nil { + + if swag.IsZero(m.Status) { // not required + return nil + } + if err := m.Status.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/prefilter_status.go b/vendor/github.com/cilium/cilium/api/v1/models/prefilter_status.go index 1f285a343d..b64c33e63b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/prefilter_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/prefilter_status.go @@ -75,6 +75,11 @@ func (m *PrefilterStatus) ContextValidate(ctx context.Context, formats strfmt.Re func (m *PrefilterStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error { if m.Realized != nil { + + if swag.IsZero(m.Realized) { // not required + return nil + } + if err := m.Realized.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("realized") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/proxy_statistics.go b/vendor/github.com/cilium/cilium/api/v1/models/proxy_statistics.go index 98dc9d17ef..34a5090d1d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/proxy_statistics.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/proxy_statistics.go @@ -29,7 +29,7 @@ type ProxyStatistics struct { AllocatedProxyPort int64 `json:"allocated-proxy-port,omitempty"` // Location of where the redirect is installed - // Enum: [ingress egress] + // Enum: ["ingress","egress"] Location string `json:"location,omitempty"` // The port subject to the redirect @@ -138,6 +138,11 @@ func (m *ProxyStatistics) ContextValidate(ctx context.Context, formats strfmt.Re func (m *ProxyStatistics) contextValidateStatistics(ctx context.Context, formats strfmt.Registry) error { if m.Statistics != nil { + + if swag.IsZero(m.Statistics) { // not required + return nil + } + if err := m.Statistics.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("statistics") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/proxy_status.go b/vendor/github.com/cilium/cilium/api/v1/models/proxy_status.go index d3283687d1..c0bcfe2e57 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/proxy_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/proxy_status.go @@ -27,7 +27,7 @@ import ( type ProxyStatus struct { // Deployment mode of Envoy L7 proxy - // Enum: [embedded external] + // Enum: ["embedded","external"] EnvoyDeploymentMode string `json:"envoy-deployment-mode,omitempty"` // IP address that the proxy listens on @@ -151,6 +151,11 @@ func (m *ProxyStatus) contextValidateRedirects(ctx context.Context, formats strf for i := 0; i < len(m.Redirects); i++ { if m.Redirects[i] != nil { + + if swag.IsZero(m.Redirects[i]) { // not required + return nil + } + if err := m.Redirects[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("redirects" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/recorder.go b/vendor/github.com/cilium/cilium/api/v1/models/recorder.go index 36d41dc0ce..4656e9856c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/recorder.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/recorder.go @@ -105,6 +105,11 @@ func (m *Recorder) ContextValidate(ctx context.Context, formats strfmt.Registry) func (m *Recorder) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error { if m.Spec != nil { + + if swag.IsZero(m.Spec) { // not required + return nil + } + if err := m.Spec.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec") @@ -121,6 +126,11 @@ func (m *Recorder) contextValidateSpec(ctx context.Context, formats strfmt.Regis func (m *Recorder) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { if m.Status != nil { + + if swag.IsZero(m.Status) { // not required + return nil + } + if err := m.Status.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/recorder_filter.go b/vendor/github.com/cilium/cilium/api/v1/models/recorder_filter.go index e438fbac02..88d460693a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/recorder_filter.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/recorder_filter.go @@ -30,7 +30,7 @@ type RecorderFilter struct { DstPrefix string `json:"dst-prefix,omitempty"` // Layer 4 protocol - // Enum: [TCP UDP SCTP ANY] + // Enum: ["TCP","UDP","SCTP","ANY"] Protocol string `json:"protocol,omitempty"` // Layer 4 source port, zero (or in future range) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/recorder_mask.go b/vendor/github.com/cilium/cilium/api/v1/models/recorder_mask.go index dec0cd20ba..b98d74a95e 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/recorder_mask.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/recorder_mask.go @@ -75,6 +75,11 @@ func (m *RecorderMask) ContextValidate(ctx context.Context, formats strfmt.Regis func (m *RecorderMask) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { if m.Status != nil { + + if swag.IsZero(m.Status) { // not required + return nil + } + if err := m.Status.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/recorder_mask_status.go b/vendor/github.com/cilium/cilium/api/v1/models/recorder_mask_status.go index e79be59f13..1ad4f7422a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/recorder_mask_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/recorder_mask_status.go @@ -75,6 +75,11 @@ func (m *RecorderMaskStatus) ContextValidate(ctx context.Context, formats strfmt func (m *RecorderMaskStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error { if m.Realized != nil { + + if swag.IsZero(m.Realized) { // not required + return nil + } + if err := m.Realized.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("realized") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/recorder_spec.go b/vendor/github.com/cilium/cilium/api/v1/models/recorder_spec.go index 362ee50e2a..95eb7aead9 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/recorder_spec.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/recorder_spec.go @@ -108,6 +108,11 @@ func (m *RecorderSpec) contextValidateFilters(ctx context.Context, formats strfm for i := 0; i < len(m.Filters); i++ { if m.Filters[i] != nil { + + if swag.IsZero(m.Filters[i]) { // not required + return nil + } + if err := m.Filters[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("filters" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/recorder_status.go b/vendor/github.com/cilium/cilium/api/v1/models/recorder_status.go index 5476b606fd..38749325c7 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/recorder_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/recorder_status.go @@ -75,6 +75,11 @@ func (m *RecorderStatus) ContextValidate(ctx context.Context, formats strfmt.Reg func (m *RecorderStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error { if m.Realized != nil { + + if swag.IsZero(m.Realized) { // not required + return nil + } + if err := m.Realized.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("realized") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster.go b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster.go index a8d8c2be95..8cb66d83a0 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster.go @@ -49,6 +49,9 @@ type RemoteCluster struct { // Number of nodes in the cluster NumNodes int64 `json:"num-nodes,omitempty"` + // Number of MCS-API service exports in the cluster + NumServiceExports int64 `json:"num-service-exports,omitempty"` + // Number of services in the cluster NumSharedServices int64 `json:"num-shared-services,omitempty"` @@ -155,6 +158,11 @@ func (m *RemoteCluster) ContextValidate(ctx context.Context, formats strfmt.Regi func (m *RemoteCluster) contextValidateConfig(ctx context.Context, formats strfmt.Registry) error { if m.Config != nil { + + if swag.IsZero(m.Config) { // not required + return nil + } + if err := m.Config.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("config") @@ -171,6 +179,11 @@ func (m *RemoteCluster) contextValidateConfig(ctx context.Context, formats strfm func (m *RemoteCluster) contextValidateSynced(ctx context.Context, formats strfmt.Registry) error { if m.Synced != nil { + + if swag.IsZero(m.Synced) { // not required + return nil + } + if err := m.Synced.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("synced") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_config.go b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_config.go index 0b2ae22dd8..a34246bd37 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_config.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_config.go @@ -34,6 +34,9 @@ type RemoteClusterConfig struct { // Whether the configuration has been correctly retrieved Retrieved bool `json:"retrieved,omitempty"` + // Whether or not MCS-API ServiceExports is enabled by the cluster (null means unsupported). + ServiceExportsEnabled *bool `json:"service-exports-enabled,omitempty"` + // Whether the remote cluster supports per-prefix "synced" canaries SyncCanaries bool `json:"sync-canaries,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_synced.go b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_synced.go index 8c1151ba38..acbc16592c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_synced.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/remote_cluster_synced.go @@ -34,6 +34,9 @@ type RemoteClusterSynced struct { // Nodes synchronization status Nodes bool `json:"nodes,omitempty"` + // MCS-API service exports synchronization status (null means that the component is not watching service exports) + ServiceExports *bool `json:"service-exports,omitempty"` + // Services synchronization status Services bool `json:"services,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/request_response_statistics.go b/vendor/github.com/cilium/cilium/api/v1/models/request_response_statistics.go index 8b3a5574eb..f6d5b96bde 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/request_response_statistics.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/request_response_statistics.go @@ -107,6 +107,11 @@ func (m *RequestResponseStatistics) ContextValidate(ctx context.Context, formats func (m *RequestResponseStatistics) contextValidateRequests(ctx context.Context, formats strfmt.Registry) error { if m.Requests != nil { + + if swag.IsZero(m.Requests) { // not required + return nil + } + if err := m.Requests.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("requests") @@ -123,6 +128,11 @@ func (m *RequestResponseStatistics) contextValidateRequests(ctx context.Context, func (m *RequestResponseStatistics) contextValidateResponses(ctx context.Context, formats strfmt.Registry) error { if m.Responses != nil { + + if swag.IsZero(m.Responses) { // not required + return nil + } + if err := m.Responses.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("responses") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/routing.go b/vendor/github.com/cilium/cilium/api/v1/models/routing.go new file mode 100644 index 0000000000..fe028cf6d7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/routing.go @@ -0,0 +1,163 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Routing Status of routing +// +// +k8s:deepcopy-gen=true +// +// swagger:model Routing +type Routing struct { + + // Datapath routing mode for cross-cluster connectivity + // Enum: ["Native","Tunnel"] + InterHostRoutingMode string `json:"inter-host-routing-mode,omitempty"` + + // Datapath routing mode for connectivity within the host + // Enum: ["BPF","Legacy"] + IntraHostRoutingMode string `json:"intra-host-routing-mode,omitempty"` + + // Tunnel protocol in use for cross-cluster connectivity + TunnelProtocol string `json:"tunnel-protocol,omitempty"` +} + +// Validate validates this routing +func (m *Routing) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateInterHostRoutingMode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateIntraHostRoutingMode(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var routingTypeInterHostRoutingModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["Native","Tunnel"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + routingTypeInterHostRoutingModePropEnum = append(routingTypeInterHostRoutingModePropEnum, v) + } +} + +const ( + + // RoutingInterHostRoutingModeNative captures enum value "Native" + RoutingInterHostRoutingModeNative string = "Native" + + // RoutingInterHostRoutingModeTunnel captures enum value "Tunnel" + RoutingInterHostRoutingModeTunnel string = "Tunnel" +) + +// prop value enum +func (m *Routing) validateInterHostRoutingModeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, routingTypeInterHostRoutingModePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Routing) validateInterHostRoutingMode(formats strfmt.Registry) error { + if swag.IsZero(m.InterHostRoutingMode) { // not required + return nil + } + + // value enum + if err := m.validateInterHostRoutingModeEnum("inter-host-routing-mode", "body", m.InterHostRoutingMode); err != nil { + return err + } + + return nil +} + +var routingTypeIntraHostRoutingModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["BPF","Legacy"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + routingTypeIntraHostRoutingModePropEnum = append(routingTypeIntraHostRoutingModePropEnum, v) + } +} + +const ( + + // RoutingIntraHostRoutingModeBPF captures enum value "BPF" + RoutingIntraHostRoutingModeBPF string = "BPF" + + // RoutingIntraHostRoutingModeLegacy captures enum value "Legacy" + RoutingIntraHostRoutingModeLegacy string = "Legacy" +) + +// prop value enum +func (m *Routing) validateIntraHostRoutingModeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, routingTypeIntraHostRoutingModePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Routing) validateIntraHostRoutingMode(formats strfmt.Registry) error { + if swag.IsZero(m.IntraHostRoutingMode) { // not required + return nil + } + + // value enum + if err := m.validateIntraHostRoutingModeEnum("intra-host-routing-mode", "body", m.IntraHostRoutingMode); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this routing based on context it is used +func (m *Routing) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Routing) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Routing) UnmarshalBinary(b []byte) error { + var res Routing + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/selector_cache.go b/vendor/github.com/cilium/cilium/api/v1/models/selector_cache.go index 3f941871e5..0fd85f207c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/selector_cache.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/selector_cache.go @@ -57,6 +57,11 @@ func (m SelectorCache) ContextValidate(ctx context.Context, formats strfmt.Regis for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go b/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go index 400dfc3346..80aa38532d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/selector_identity_mapping.go @@ -11,6 +11,7 @@ package models import ( "context" + "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" ) @@ -23,6 +24,9 @@ type SelectorIdentityMapping struct { // identities mapping to this selector Identities []int64 `json:"identities"` + // Labels are the metadata labels associated with the selector + Labels LabelArray `json:"labels,omitempty"` + // string form of selector Selector string `json:"selector,omitempty"` @@ -32,11 +36,60 @@ type SelectorIdentityMapping struct { // Validate validates this selector identity mapping func (m *SelectorIdentityMapping) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateLabels(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SelectorIdentityMapping) validateLabels(formats strfmt.Registry) error { + if swag.IsZero(m.Labels) { // not required + return nil + } + + if err := m.Labels.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + return nil } -// ContextValidate validates this selector identity mapping based on context it is used +// ContextValidate validate this selector identity mapping based on the context it is used func (m *SelectorIdentityMapping) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateLabels(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *SelectorIdentityMapping) contextValidateLabels(ctx context.Context, formats strfmt.Registry) error { + + if err := m.Labels.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("labels") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("labels") + } + return err + } + return nil } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/service.go b/vendor/github.com/cilium/cilium/api/v1/models/service.go index cef8a924a4..8c38db3fbc 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/service.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/service.go @@ -105,6 +105,11 @@ func (m *Service) ContextValidate(ctx context.Context, formats strfmt.Registry) func (m *Service) contextValidateSpec(ctx context.Context, formats strfmt.Registry) error { if m.Spec != nil { + + if swag.IsZero(m.Spec) { // not required + return nil + } + if err := m.Spec.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("spec") @@ -121,6 +126,11 @@ func (m *Service) contextValidateSpec(ctx context.Context, formats strfmt.Regist func (m *Service) contextValidateStatus(ctx context.Context, formats strfmt.Registry) error { if m.Status != nil { + + if swag.IsZero(m.Status) { // not required + return nil + } + if err := m.Status.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("status") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/service_spec.go b/vendor/github.com/cilium/cilium/api/v1/models/service_spec.go index eae7726a6a..7d83d13aff 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/service_spec.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/service_spec.go @@ -157,6 +157,11 @@ func (m *ServiceSpec) contextValidateBackendAddresses(ctx context.Context, forma for i := 0; i < len(m.BackendAddresses); i++ { if m.BackendAddresses[i] != nil { + + if swag.IsZero(m.BackendAddresses[i]) { // not required + return nil + } + if err := m.BackendAddresses[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("backend-addresses" + "." + strconv.Itoa(i)) @@ -175,6 +180,11 @@ func (m *ServiceSpec) contextValidateBackendAddresses(ctx context.Context, forma func (m *ServiceSpec) contextValidateFlags(ctx context.Context, formats strfmt.Registry) error { if m.Flags != nil { + + if swag.IsZero(m.Flags) { // not required + return nil + } + if err := m.Flags.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("flags") @@ -191,6 +201,7 @@ func (m *ServiceSpec) contextValidateFlags(ctx context.Context, formats strfmt.R func (m *ServiceSpec) contextValidateFrontendAddress(ctx context.Context, formats strfmt.Registry) error { if m.FrontendAddress != nil { + if err := m.FrontendAddress.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("frontend-address") @@ -231,14 +242,14 @@ type ServiceSpecFlags struct { Cluster string `json:"cluster,omitempty"` // Service external traffic policy - // Enum: [Cluster Local] + // Enum: ["Cluster","Local"] ExtTrafficPolicy string `json:"extTrafficPolicy,omitempty"` // Service health check node port HealthCheckNodePort uint16 `json:"healthCheckNodePort,omitempty"` // Service internal traffic policy - // Enum: [Cluster Local] + // Enum: ["Cluster","Local"] IntTrafficPolicy string `json:"intTrafficPolicy,omitempty"` // Service name (e.g. Kubernetes service name) @@ -248,15 +259,15 @@ type ServiceSpecFlags struct { Namespace string `json:"namespace,omitempty"` // Service protocol NAT policy - // Enum: [None Nat46 Nat64] + // Enum: ["None","Nat46","Nat64"] NatPolicy string `json:"natPolicy,omitempty"` // Service external traffic policy (deprecated in favor of extTrafficPolicy) - // Enum: [Cluster Local] + // Enum: ["Cluster","Local"] TrafficPolicy string `json:"trafficPolicy,omitempty"` // Service type - // Enum: [ClusterIP NodePort ExternalIPs HostPort LoadBalancer LocalRedirect] + // Enum: ["ClusterIP","NodePort","ExternalIPs","HostPort","LoadBalancer","LocalRedirect"] Type string `json:"type,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/service_status.go b/vendor/github.com/cilium/cilium/api/v1/models/service_status.go index c9bc87b196..32c65f32d5 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/service_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/service_status.go @@ -75,6 +75,11 @@ func (m *ServiceStatus) ContextValidate(ctx context.Context, formats strfmt.Regi func (m *ServiceStatus) contextValidateRealized(ctx context.Context, formats strfmt.Registry) error { if m.Realized != nil { + + if swag.IsZero(m.Realized) { // not required + return nil + } + if err := m.Realized.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("realized") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/srv6.go b/vendor/github.com/cilium/cilium/api/v1/models/srv6.go new file mode 100644 index 0000000000..1b3182a487 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/srv6.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Srv6 Status of the SRv6 +// +// +k8s:deepcopy-gen=true +// +// swagger:model Srv6 +type Srv6 struct { + + // enabled + Enabled bool `json:"enabled,omitempty"` + + // srv6 encap mode + // Enum: ["SRH","Reduced"] + Srv6EncapMode string `json:"srv6EncapMode,omitempty"` +} + +// Validate validates this srv6 +func (m *Srv6) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSrv6EncapMode(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var srv6TypeSrv6EncapModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["SRH","Reduced"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + srv6TypeSrv6EncapModePropEnum = append(srv6TypeSrv6EncapModePropEnum, v) + } +} + +const ( + + // Srv6Srv6EncapModeSRH captures enum value "SRH" + Srv6Srv6EncapModeSRH string = "SRH" + + // Srv6Srv6EncapModeReduced captures enum value "Reduced" + Srv6Srv6EncapModeReduced string = "Reduced" +) + +// prop value enum +func (m *Srv6) validateSrv6EncapModeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, srv6TypeSrv6EncapModePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *Srv6) validateSrv6EncapMode(formats strfmt.Registry) error { + if swag.IsZero(m.Srv6EncapMode) { // not required + return nil + } + + // value enum + if err := m.validateSrv6EncapModeEnum("srv6EncapMode", "body", m.Srv6EncapMode); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this srv6 based on context it is used +func (m *Srv6) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Srv6) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Srv6) UnmarshalBinary(b []byte) error { + var res Srv6 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/state_d_b_query.go b/vendor/github.com/cilium/cilium/api/v1/models/state_d_b_query.go new file mode 100644 index 0000000000..df5ca5a801 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/state_d_b_query.go @@ -0,0 +1,62 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StateDBQuery StateDB query +// +// swagger:model StateDBQuery +type StateDBQuery struct { + + // Index to query against + Index string `json:"index,omitempty"` + + // Key to query with. Base64 encoded. + Key string `json:"key,omitempty"` + + // LowerBound prefix search or full-matching Get + Lowerbound bool `json:"lowerbound,omitempty"` + + // Name of the table to query + Table string `json:"table,omitempty"` +} + +// Validate validates this state d b query +func (m *StateDBQuery) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this state d b query based on context it is used +func (m *StateDBQuery) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StateDBQuery) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StateDBQuery) UnmarshalBinary(b []byte) error { + var res StateDBQuery + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/status.go b/vendor/github.com/cilium/cilium/api/v1/models/status.go index 236b14567a..ad125a1ac5 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/status.go @@ -27,7 +27,7 @@ type Status struct { Msg string `json:"msg,omitempty"` // State the component is in - // Enum: [Ok Warning Failure Disabled] + // Enum: ["Ok","Warning","Failure","Disabled"] State string `json:"state,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/api/v1/models/status_response.go b/vendor/github.com/cilium/cilium/api/v1/models/status_response.go index 1073dd5276..beb6bd9694 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/status_response.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/status_response.go @@ -24,6 +24,12 @@ import ( // swagger:model StatusResponse type StatusResponse struct { + // Status of core datapath attachment mode + AttachMode AttachMode `json:"attach-mode,omitempty"` + + // Status of Mutual Authentication certificate provider + AuthCertificateProvider *Status `json:"auth-certificate-provider,omitempty"` + // Status of bandwidth manager BandwidthManager *BandwidthManager `json:"bandwidth-manager,omitempty"` @@ -60,15 +66,15 @@ type StatusResponse struct { // Status of all endpoint controllers Controllers ControllerStatuses `json:"controllers,omitempty"` + // Status of datapath mode + DatapathMode DatapathMode `json:"datapath-mode,omitempty"` + // Status of transparent encryption Encryption *EncryptionStatus `json:"encryption,omitempty"` // Status of the host firewall HostFirewall *HostFirewall `json:"host-firewall,omitempty"` - // Status of host routing - HostRouting *HostRouting `json:"host-routing,omitempty"` - // Status of Hubble server Hubble *HubbleStatus `json:"hubble,omitempty"` @@ -102,6 +108,12 @@ type StatusResponse struct { // Status of proxy Proxy *ProxyStatus `json:"proxy,omitempty"` + // Status of routing + Routing *Routing `json:"routing,omitempty"` + + // Status of SRv6 + Srv6 *Srv6 `json:"srv6,omitempty"` + // List of stale information in the status Stale map[string]strfmt.DateTime `json:"stale,omitempty"` } @@ -110,6 +122,14 @@ type StatusResponse struct { func (m *StatusResponse) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateAttachMode(formats); err != nil { + res = append(res, err) + } + + if err := m.validateAuthCertificateProvider(formats); err != nil { + res = append(res, err) + } + if err := m.validateBandwidthManager(formats); err != nil { res = append(res, err) } @@ -150,15 +170,15 @@ func (m *StatusResponse) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateEncryption(formats); err != nil { + if err := m.validateDatapathMode(formats); err != nil { res = append(res, err) } - if err := m.validateHostFirewall(formats); err != nil { + if err := m.validateEncryption(formats); err != nil { res = append(res, err) } - if err := m.validateHostRouting(formats); err != nil { + if err := m.validateHostFirewall(formats); err != nil { res = append(res, err) } @@ -206,6 +226,14 @@ func (m *StatusResponse) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateRouting(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSrv6(formats); err != nil { + res = append(res, err) + } + if err := m.validateStale(formats); err != nil { res = append(res, err) } @@ -216,6 +244,42 @@ func (m *StatusResponse) Validate(formats strfmt.Registry) error { return nil } +func (m *StatusResponse) validateAttachMode(formats strfmt.Registry) error { + if swag.IsZero(m.AttachMode) { // not required + return nil + } + + if err := m.AttachMode.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("attach-mode") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("attach-mode") + } + return err + } + + return nil +} + +func (m *StatusResponse) validateAuthCertificateProvider(formats strfmt.Registry) error { + if swag.IsZero(m.AuthCertificateProvider) { // not required + return nil + } + + if m.AuthCertificateProvider != nil { + if err := m.AuthCertificateProvider.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("auth-certificate-provider") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("auth-certificate-provider") + } + return err + } + } + + return nil +} + func (m *StatusResponse) validateBandwidthManager(formats strfmt.Registry) error { if swag.IsZero(m.BandwidthManager) { // not required return nil @@ -404,6 +468,23 @@ func (m *StatusResponse) validateControllers(formats strfmt.Registry) error { return nil } +func (m *StatusResponse) validateDatapathMode(formats strfmt.Registry) error { + if swag.IsZero(m.DatapathMode) { // not required + return nil + } + + if err := m.DatapathMode.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("datapath-mode") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("datapath-mode") + } + return err + } + + return nil +} + func (m *StatusResponse) validateEncryption(formats strfmt.Registry) error { if swag.IsZero(m.Encryption) { // not required return nil @@ -442,25 +523,6 @@ func (m *StatusResponse) validateHostFirewall(formats strfmt.Registry) error { return nil } -func (m *StatusResponse) validateHostRouting(formats strfmt.Registry) error { - if swag.IsZero(m.HostRouting) { // not required - return nil - } - - if m.HostRouting != nil { - if err := m.HostRouting.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("host-routing") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("host-routing") - } - return err - } - } - - return nil -} - func (m *StatusResponse) validateHubble(formats strfmt.Registry) error { if swag.IsZero(m.Hubble) { // not required return nil @@ -670,6 +732,44 @@ func (m *StatusResponse) validateProxy(formats strfmt.Registry) error { return nil } +func (m *StatusResponse) validateRouting(formats strfmt.Registry) error { + if swag.IsZero(m.Routing) { // not required + return nil + } + + if m.Routing != nil { + if err := m.Routing.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("routing") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("routing") + } + return err + } + } + + return nil +} + +func (m *StatusResponse) validateSrv6(formats strfmt.Registry) error { + if swag.IsZero(m.Srv6) { // not required + return nil + } + + if m.Srv6 != nil { + if err := m.Srv6.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("srv6") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("srv6") + } + return err + } + } + + return nil +} + func (m *StatusResponse) validateStale(formats strfmt.Registry) error { if swag.IsZero(m.Stale) { // not required return nil @@ -690,6 +790,14 @@ func (m *StatusResponse) validateStale(formats strfmt.Registry) error { func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error + if err := m.contextValidateAttachMode(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateAuthCertificateProvider(ctx, formats); err != nil { + res = append(res, err) + } + if err := m.contextValidateBandwidthManager(ctx, formats); err != nil { res = append(res, err) } @@ -730,15 +838,15 @@ func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Reg res = append(res, err) } - if err := m.contextValidateEncryption(ctx, formats); err != nil { + if err := m.contextValidateDatapathMode(ctx, formats); err != nil { res = append(res, err) } - if err := m.contextValidateHostFirewall(ctx, formats); err != nil { + if err := m.contextValidateEncryption(ctx, formats); err != nil { res = append(res, err) } - if err := m.contextValidateHostRouting(ctx, formats); err != nil { + if err := m.contextValidateHostFirewall(ctx, formats); err != nil { res = append(res, err) } @@ -786,15 +894,67 @@ func (m *StatusResponse) ContextValidate(ctx context.Context, formats strfmt.Reg res = append(res, err) } + if err := m.contextValidateRouting(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateSrv6(ctx, formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil } +func (m *StatusResponse) contextValidateAttachMode(ctx context.Context, formats strfmt.Registry) error { + + if swag.IsZero(m.AttachMode) { // not required + return nil + } + + if err := m.AttachMode.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("attach-mode") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("attach-mode") + } + return err + } + + return nil +} + +func (m *StatusResponse) contextValidateAuthCertificateProvider(ctx context.Context, formats strfmt.Registry) error { + + if m.AuthCertificateProvider != nil { + + if swag.IsZero(m.AuthCertificateProvider) { // not required + return nil + } + + if err := m.AuthCertificateProvider.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("auth-certificate-provider") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("auth-certificate-provider") + } + return err + } + } + + return nil +} + func (m *StatusResponse) contextValidateBandwidthManager(ctx context.Context, formats strfmt.Registry) error { if m.BandwidthManager != nil { + + if swag.IsZero(m.BandwidthManager) { // not required + return nil + } + if err := m.BandwidthManager.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("bandwidth-manager") @@ -811,6 +971,11 @@ func (m *StatusResponse) contextValidateBandwidthManager(ctx context.Context, fo func (m *StatusResponse) contextValidateBpfMaps(ctx context.Context, formats strfmt.Registry) error { if m.BpfMaps != nil { + + if swag.IsZero(m.BpfMaps) { // not required + return nil + } + if err := m.BpfMaps.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("bpf-maps") @@ -827,6 +992,11 @@ func (m *StatusResponse) contextValidateBpfMaps(ctx context.Context, formats str func (m *StatusResponse) contextValidateCilium(ctx context.Context, formats strfmt.Registry) error { if m.Cilium != nil { + + if swag.IsZero(m.Cilium) { // not required + return nil + } + if err := m.Cilium.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cilium") @@ -843,6 +1013,11 @@ func (m *StatusResponse) contextValidateCilium(ctx context.Context, formats strf func (m *StatusResponse) contextValidateClockSource(ctx context.Context, formats strfmt.Registry) error { if m.ClockSource != nil { + + if swag.IsZero(m.ClockSource) { // not required + return nil + } + if err := m.ClockSource.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("clock-source") @@ -859,6 +1034,11 @@ func (m *StatusResponse) contextValidateClockSource(ctx context.Context, formats func (m *StatusResponse) contextValidateCluster(ctx context.Context, formats strfmt.Registry) error { if m.Cluster != nil { + + if swag.IsZero(m.Cluster) { // not required + return nil + } + if err := m.Cluster.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cluster") @@ -875,6 +1055,11 @@ func (m *StatusResponse) contextValidateCluster(ctx context.Context, formats str func (m *StatusResponse) contextValidateClusterMesh(ctx context.Context, formats strfmt.Registry) error { if m.ClusterMesh != nil { + + if swag.IsZero(m.ClusterMesh) { // not required + return nil + } + if err := m.ClusterMesh.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cluster-mesh") @@ -891,6 +1076,11 @@ func (m *StatusResponse) contextValidateClusterMesh(ctx context.Context, formats func (m *StatusResponse) contextValidateCniChaining(ctx context.Context, formats strfmt.Registry) error { if m.CniChaining != nil { + + if swag.IsZero(m.CniChaining) { // not required + return nil + } + if err := m.CniChaining.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cni-chaining") @@ -907,6 +1097,11 @@ func (m *StatusResponse) contextValidateCniChaining(ctx context.Context, formats func (m *StatusResponse) contextValidateCniFile(ctx context.Context, formats strfmt.Registry) error { if m.CniFile != nil { + + if swag.IsZero(m.CniFile) { // not required + return nil + } + if err := m.CniFile.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("cni-file") @@ -923,6 +1118,11 @@ func (m *StatusResponse) contextValidateCniFile(ctx context.Context, formats str func (m *StatusResponse) contextValidateContainerRuntime(ctx context.Context, formats strfmt.Registry) error { if m.ContainerRuntime != nil { + + if swag.IsZero(m.ContainerRuntime) { // not required + return nil + } + if err := m.ContainerRuntime.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("container-runtime") @@ -950,9 +1150,32 @@ func (m *StatusResponse) contextValidateControllers(ctx context.Context, formats return nil } +func (m *StatusResponse) contextValidateDatapathMode(ctx context.Context, formats strfmt.Registry) error { + + if swag.IsZero(m.DatapathMode) { // not required + return nil + } + + if err := m.DatapathMode.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("datapath-mode") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("datapath-mode") + } + return err + } + + return nil +} + func (m *StatusResponse) contextValidateEncryption(ctx context.Context, formats strfmt.Registry) error { if m.Encryption != nil { + + if swag.IsZero(m.Encryption) { // not required + return nil + } + if err := m.Encryption.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("encryption") @@ -969,6 +1192,11 @@ func (m *StatusResponse) contextValidateEncryption(ctx context.Context, formats func (m *StatusResponse) contextValidateHostFirewall(ctx context.Context, formats strfmt.Registry) error { if m.HostFirewall != nil { + + if swag.IsZero(m.HostFirewall) { // not required + return nil + } + if err := m.HostFirewall.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("host-firewall") @@ -982,25 +1210,14 @@ func (m *StatusResponse) contextValidateHostFirewall(ctx context.Context, format return nil } -func (m *StatusResponse) contextValidateHostRouting(ctx context.Context, formats strfmt.Registry) error { - - if m.HostRouting != nil { - if err := m.HostRouting.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("host-routing") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("host-routing") - } - return err - } - } - - return nil -} - func (m *StatusResponse) contextValidateHubble(ctx context.Context, formats strfmt.Registry) error { if m.Hubble != nil { + + if swag.IsZero(m.Hubble) { // not required + return nil + } + if err := m.Hubble.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("hubble") @@ -1017,6 +1234,11 @@ func (m *StatusResponse) contextValidateHubble(ctx context.Context, formats strf func (m *StatusResponse) contextValidateIdentityRange(ctx context.Context, formats strfmt.Registry) error { if m.IdentityRange != nil { + + if swag.IsZero(m.IdentityRange) { // not required + return nil + } + if err := m.IdentityRange.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("identity-range") @@ -1033,6 +1255,11 @@ func (m *StatusResponse) contextValidateIdentityRange(ctx context.Context, forma func (m *StatusResponse) contextValidateIpam(ctx context.Context, formats strfmt.Registry) error { if m.Ipam != nil { + + if swag.IsZero(m.Ipam) { // not required + return nil + } + if err := m.Ipam.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ipam") @@ -1049,6 +1276,11 @@ func (m *StatusResponse) contextValidateIpam(ctx context.Context, formats strfmt func (m *StatusResponse) contextValidateIPV4BigTCP(ctx context.Context, formats strfmt.Registry) error { if m.IPV4BigTCP != nil { + + if swag.IsZero(m.IPV4BigTCP) { // not required + return nil + } + if err := m.IPV4BigTCP.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ipv4-big-tcp") @@ -1065,6 +1297,11 @@ func (m *StatusResponse) contextValidateIPV4BigTCP(ctx context.Context, formats func (m *StatusResponse) contextValidateIPV6BigTCP(ctx context.Context, formats strfmt.Registry) error { if m.IPV6BigTCP != nil { + + if swag.IsZero(m.IPV6BigTCP) { // not required + return nil + } + if err := m.IPV6BigTCP.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("ipv6-big-tcp") @@ -1081,6 +1318,11 @@ func (m *StatusResponse) contextValidateIPV6BigTCP(ctx context.Context, formats func (m *StatusResponse) contextValidateKubeProxyReplacement(ctx context.Context, formats strfmt.Registry) error { if m.KubeProxyReplacement != nil { + + if swag.IsZero(m.KubeProxyReplacement) { // not required + return nil + } + if err := m.KubeProxyReplacement.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("kube-proxy-replacement") @@ -1097,6 +1339,11 @@ func (m *StatusResponse) contextValidateKubeProxyReplacement(ctx context.Context func (m *StatusResponse) contextValidateKubernetes(ctx context.Context, formats strfmt.Registry) error { if m.Kubernetes != nil { + + if swag.IsZero(m.Kubernetes) { // not required + return nil + } + if err := m.Kubernetes.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("kubernetes") @@ -1113,6 +1360,11 @@ func (m *StatusResponse) contextValidateKubernetes(ctx context.Context, formats func (m *StatusResponse) contextValidateKvstore(ctx context.Context, formats strfmt.Registry) error { if m.Kvstore != nil { + + if swag.IsZero(m.Kvstore) { // not required + return nil + } + if err := m.Kvstore.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("kvstore") @@ -1129,6 +1381,11 @@ func (m *StatusResponse) contextValidateKvstore(ctx context.Context, formats str func (m *StatusResponse) contextValidateMasquerading(ctx context.Context, formats strfmt.Registry) error { if m.Masquerading != nil { + + if swag.IsZero(m.Masquerading) { // not required + return nil + } + if err := m.Masquerading.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("masquerading") @@ -1145,6 +1402,11 @@ func (m *StatusResponse) contextValidateMasquerading(ctx context.Context, format func (m *StatusResponse) contextValidateNodeMonitor(ctx context.Context, formats strfmt.Registry) error { if m.NodeMonitor != nil { + + if swag.IsZero(m.NodeMonitor) { // not required + return nil + } + if err := m.NodeMonitor.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("nodeMonitor") @@ -1161,6 +1423,11 @@ func (m *StatusResponse) contextValidateNodeMonitor(ctx context.Context, formats func (m *StatusResponse) contextValidateProxy(ctx context.Context, formats strfmt.Registry) error { if m.Proxy != nil { + + if swag.IsZero(m.Proxy) { // not required + return nil + } + if err := m.Proxy.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("proxy") @@ -1174,6 +1441,48 @@ func (m *StatusResponse) contextValidateProxy(ctx context.Context, formats strfm return nil } +func (m *StatusResponse) contextValidateRouting(ctx context.Context, formats strfmt.Registry) error { + + if m.Routing != nil { + + if swag.IsZero(m.Routing) { // not required + return nil + } + + if err := m.Routing.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("routing") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("routing") + } + return err + } + } + + return nil +} + +func (m *StatusResponse) contextValidateSrv6(ctx context.Context, formats strfmt.Registry) error { + + if m.Srv6 != nil { + + if swag.IsZero(m.Srv6) { // not required + return nil + } + + if err := m.Srv6.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("srv6") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("srv6") + } + return err + } + } + + return nil +} + // MarshalBinary interface implementation func (m *StatusResponse) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/vendor/github.com/cilium/cilium/api/v1/models/trace_selector.go b/vendor/github.com/cilium/cilium/api/v1/models/trace_selector.go index 6ecd360326..4c1b514f52 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/trace_selector.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/trace_selector.go @@ -109,6 +109,11 @@ func (m *TraceSelector) ContextValidate(ctx context.Context, formats strfmt.Regi func (m *TraceSelector) contextValidateFrom(ctx context.Context, formats strfmt.Registry) error { if m.From != nil { + + if swag.IsZero(m.From) { // not required + return nil + } + if err := m.From.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("from") @@ -125,6 +130,11 @@ func (m *TraceSelector) contextValidateFrom(ctx context.Context, formats strfmt. func (m *TraceSelector) contextValidateTo(ctx context.Context, formats strfmt.Registry) error { if m.To != nil { + + if swag.IsZero(m.To) { // not required + return nil + } + if err := m.To.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("to") diff --git a/vendor/github.com/cilium/cilium/api/v1/models/trace_to.go b/vendor/github.com/cilium/cilium/api/v1/models/trace_to.go index 6455e21073..e1dcc453f6 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/trace_to.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/trace_to.go @@ -115,6 +115,11 @@ func (m *TraceTo) contextValidateDports(ctx context.Context, formats strfmt.Regi for i := 0; i < len(m.Dports); i++ { if m.Dports[i] != nil { + + if swag.IsZero(m.Dports[i]) { // not required + return nil + } + if err := m.Dports[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("dports" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go index f73b640add..0325eba0a9 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_interface.go @@ -17,14 +17,14 @@ import ( "github.com/go-openapi/swag" ) -// WireguardInterface Status of a Wireguard interface +// WireguardInterface Status of a WireGuard interface // // +k8s:deepcopy-gen=true // // swagger:model WireguardInterface type WireguardInterface struct { - // Port on which the Wireguard endpoint is exposed + // Port on which the WireGuard endpoint is exposed ListenPort int64 `json:"listen-port,omitempty"` // Name of the interface @@ -33,7 +33,7 @@ type WireguardInterface struct { // Number of peers configured on this interface PeerCount int64 `json:"peer-count,omitempty"` - // Optional list of wireguard peers + // Optional list of WireGuard peers Peers []*WireguardPeer `json:"peers"` // Public key of this interface @@ -99,6 +99,11 @@ func (m *WireguardInterface) contextValidatePeers(ctx context.Context, formats s for i := 0; i < len(m.Peers); i++ { if m.Peers[i] != nil { + + if swag.IsZero(m.Peers[i]) { // not required + return nil + } + if err := m.Peers[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("peers" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go index f1c7c12354..7d5664e2ef 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_peer.go @@ -17,7 +17,7 @@ import ( "github.com/go-openapi/validate" ) -// WireguardPeer Status of a Wireguard peer +// WireguardPeer Status of a WireGuard peer // // +k8s:deepcopy-gen=true // diff --git a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go index 98285fb5b1..753ad1754e 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go @@ -17,14 +17,14 @@ import ( "github.com/go-openapi/swag" ) -// WireguardStatus Status of the Wireguard agent +// WireguardStatus Status of the WireGuard agent // // +k8s:deepcopy-gen=true // // swagger:model WireguardStatus type WireguardStatus struct { - // Wireguard interfaces managed by this Cilium instance + // WireGuard interfaces managed by this Cilium instance Interfaces []*WireguardInterface `json:"interfaces"` // Node Encryption status @@ -90,6 +90,11 @@ func (m *WireguardStatus) contextValidateInterfaces(ctx context.Context, formats for i := 0; i < len(m.Interfaces); i++ { if m.Interfaces[i] != nil { + + if swag.IsZero(m.Interfaces[i]) { // not required + return nil + } + if err := m.Interfaces[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("interfaces" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go index 0c59fe6adf..d93fea4d1b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go @@ -319,6 +319,11 @@ func (in *ControllerStatusStatus) DeepCopy() *ControllerStatusStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EncryptionStatus) DeepCopyInto(out *EncryptionStatus) { *out = *in + if in.Ipsec != nil { + in, out := &in.Ipsec, &out.Ipsec + *out = new(IPsecStatus) + (*in).DeepCopyInto(*out) + } if in.Wireguard != nil { in, out := &in.Wireguard, &out.Wireguard *out = new(WireguardStatus) @@ -404,22 +409,6 @@ func (in *HostFirewall) DeepCopy() *HostFirewall { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostRouting) DeepCopyInto(out *HostRouting) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostRouting. -func (in *HostRouting) DeepCopy() *HostRouting { - if in == nil { - return nil - } - out := new(HostRouting) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HubbleStatus) DeepCopyInto(out *HubbleStatus) { *out = *in @@ -528,6 +517,34 @@ func (in *IPV6BigTCP) DeepCopy() *IPV6BigTCP { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPsecStatus) DeepCopyInto(out *IPsecStatus) { + *out = *in + if in.DecryptInterfaces != nil { + in, out := &in.DecryptInterfaces, &out.DecryptInterfaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.XfrmErrors != nil { + in, out := &in.XfrmErrors, &out.XfrmErrors + *out = make(map[string]int64, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPsecStatus. +func (in *IPsecStatus) DeepCopy() *IPsecStatus { + if in == nil { + return nil + } + out := new(IPsecStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IdentityRange) DeepCopyInto(out *IdentityRange) { *out = *in @@ -626,6 +643,11 @@ func (in *KubeProxyReplacementDeviceListItems0) DeepCopy() *KubeProxyReplacement // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeProxyReplacementFeatures) DeepCopyInto(out *KubeProxyReplacementFeatures) { *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.ExternalIPs != nil { in, out := &in.ExternalIPs, &out.ExternalIPs *out = new(KubeProxyReplacementFeaturesExternalIPs) @@ -1135,13 +1157,13 @@ func (in *RemoteCluster) DeepCopyInto(out *RemoteCluster) { if in.Config != nil { in, out := &in.Config, &out.Config *out = new(RemoteClusterConfig) - **out = **in + (*in).DeepCopyInto(*out) } in.LastFailure.DeepCopyInto(&out.LastFailure) if in.Synced != nil { in, out := &in.Synced, &out.Synced *out = new(RemoteClusterSynced) - **out = **in + (*in).DeepCopyInto(*out) } return } @@ -1159,6 +1181,11 @@ func (in *RemoteCluster) DeepCopy() *RemoteCluster { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RemoteClusterConfig) DeepCopyInto(out *RemoteClusterConfig) { *out = *in + if in.ServiceExportsEnabled != nil { + in, out := &in.ServiceExportsEnabled, &out.ServiceExportsEnabled + *out = new(bool) + **out = **in + } return } @@ -1175,6 +1202,11 @@ func (in *RemoteClusterConfig) DeepCopy() *RemoteClusterConfig { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RemoteClusterSynced) DeepCopyInto(out *RemoteClusterSynced) { *out = *in + if in.ServiceExports != nil { + in, out := &in.ServiceExports, &out.ServiceExports + *out = new(bool) + **out = **in + } return } @@ -1214,9 +1246,46 @@ func (in *RequestResponseStatistics) DeepCopy() *RequestResponseStatistics { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Routing) DeepCopyInto(out *Routing) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Routing. +func (in *Routing) DeepCopy() *Routing { + if in == nil { + return nil + } + out := new(Routing) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Srv6) DeepCopyInto(out *Srv6) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Srv6. +func (in *Srv6) DeepCopy() *Srv6 { + if in == nil { + return nil + } + out := new(Srv6) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatusResponse) DeepCopyInto(out *StatusResponse) { *out = *in + if in.AuthCertificateProvider != nil { + in, out := &in.AuthCertificateProvider, &out.AuthCertificateProvider + *out = new(Status) + **out = **in + } if in.BandwidthManager != nil { in, out := &in.BandwidthManager, &out.BandwidthManager *out = new(BandwidthManager) @@ -1283,11 +1352,6 @@ func (in *StatusResponse) DeepCopyInto(out *StatusResponse) { *out = new(HostFirewall) (*in).DeepCopyInto(*out) } - if in.HostRouting != nil { - in, out := &in.HostRouting, &out.HostRouting - *out = new(HostRouting) - **out = **in - } if in.Hubble != nil { in, out := &in.Hubble, &out.Hubble *out = new(HubbleStatus) @@ -1343,6 +1407,16 @@ func (in *StatusResponse) DeepCopyInto(out *StatusResponse) { *out = new(ProxyStatus) (*in).DeepCopyInto(*out) } + if in.Routing != nil { + in, out := &in.Routing, &out.Routing + *out = new(Routing) + **out = **in + } + if in.Srv6 != nil { + in, out := &in.Srv6, &out.Srv6 + *out = new(Srv6) + **out = **in + } if in.Stale != nil { in, out := &in.Stale, &out.Stale *out = make(map[string]strfmt.DateTime, len(*in)) diff --git a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go index 3996ca8382..130475ae4b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepequal.go @@ -58,6 +58,9 @@ func (in *EndpointIdentifiers) DeepEqual(other *EndpointIdentifiers) bool { return false } + if in.CniAttachmentID != other.CniAttachmentID { + return false + } if in.ContainerID != other.ContainerID { return false } diff --git a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go index e0b6f9b070..b579758133 100644 --- a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/types.go @@ -126,6 +126,10 @@ type ENI struct { Tags map[string]string `json:"tags,omitempty"` } +func (e *ENI) DeepCopyInterface() types.Interface { + return e.DeepCopy() +} + // InterfaceID returns the identifier of the interface func (e *ENI) InterfaceID() string { return e.NetworkInterfaceID @@ -174,6 +178,11 @@ type VPC struct { // // +optional IPv6CIDRBlock string `json:"ipv6-cidr,omitempty"` + + // SecondaryCIDRs is the list of Secondary CIDRs associated with the VPC + // + // +optional + SecondaryCIDRs []string `json:"secondary-cidrs,omitempty"` } type VSwitch struct { diff --git a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go index ac246f5d18..309101578c 100644 --- a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepcopy.go @@ -16,7 +16,7 @@ func (in *ENI) DeepCopyInto(out *ENI) { *out = make([]string, len(*in)) copy(*out, *in) } - out.VPC = in.VPC + in.VPC.DeepCopyInto(&out.VPC) out.VSwitch = in.VSwitch if in.PrivateIPSets != nil { in, out := &in.PrivateIPSets, &out.PrivateIPSets @@ -125,6 +125,11 @@ func (in *Spec) DeepCopy() *Spec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VPC) DeepCopyInto(out *VPC) { *out = *in + if in.SecondaryCIDRs != nil { + in, out := &in.SecondaryCIDRs, &out.SecondaryCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go index 04c8e81576..55fea351a1 100644 --- a/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/alibabacloud/eni/types/zz_generated.deepequal.go @@ -44,7 +44,7 @@ func (in *ENI) DeepEqual(other *ENI) bool { } } - if in.VPC != other.VPC { + if !in.VPC.DeepEqual(&other.VPC) { return false } @@ -261,6 +261,22 @@ func (in *VPC) DeepEqual(other *VPC) bool { if in.IPv6CIDRBlock != other.IPv6CIDRBlock { return false } + if ((in.SecondaryCIDRs != nil) && (other.SecondaryCIDRs != nil)) || ((in.SecondaryCIDRs == nil) != (other.SecondaryCIDRs == nil)) { + in, other := &in.SecondaryCIDRs, &other.SecondaryCIDRs + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } return true } diff --git a/vendor/github.com/cilium/cilium/pkg/api/apierror.go b/vendor/github.com/cilium/cilium/pkg/api/apierror.go index 83c44eeba1..dd17174437 100644 --- a/vendor/github.com/cilium/cilium/pkg/api/apierror.go +++ b/vendor/github.com/cilium/cilium/pkg/api/apierror.go @@ -4,6 +4,7 @@ package api import ( + "errors" "fmt" "net/http" @@ -30,13 +31,18 @@ func New(code int, msg string, args ...interface{}) *APIError { return &APIError{code: code, msg: msg} } +// GetCode returns the code for the API Error. +func (a *APIError) GetCode() int { + return a.code +} + // Error creates a new API error from the code and error. func Error(code int, err error) *APIError { if err == nil { - err = fmt.Errorf("Error pointer was nil") + err = errors.New("Error pointer was nil") } - return New(code, err.Error()) + return New(code, err.Error()) //nolint:govet } // Error returns the API error message. diff --git a/vendor/github.com/cilium/cilium/pkg/api/apipanic.go b/vendor/github.com/cilium/cilium/pkg/api/apipanic.go index 38481b26d2..2ff6209d0e 100644 --- a/vendor/github.com/cilium/cilium/pkg/api/apipanic.go +++ b/vendor/github.com/cilium/cilium/pkg/api/apipanic.go @@ -4,9 +4,11 @@ package api import ( + "errors" "net/http" "os" "runtime/debug" + "syscall" "github.com/sirupsen/logrus" @@ -24,12 +26,17 @@ func (h *APIPanicHandler) ServeHTTP(wr http.ResponseWriter, req *http.Request) { defer func() { if r := recover(); r != nil { fields := logrus.Fields{ - "panic_message": r, - "url": req.URL.String(), - "method": req.Method, - "client": req.RemoteAddr, + "url": req.URL.String(), + "method": req.Method, + "client": req.RemoteAddr, } - log.WithFields(fields).Warn("Cilium API handler panicked") + + if err, ok := r.(error); ok && errors.Is(err, syscall.EPIPE) { + log.WithError(err).WithFields(fields).Debug("Failed to write API response: client connection closed") + return + } + + log.WithFields(fields).WithField("panic_message", r).Warn("Cilium API handler panicked") if logging.DefaultLogger.IsLevelEnabled(logrus.DebugLevel) { os.Stdout.Write(debug.Stack()) } diff --git a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go index aec96efcb2..5fecd2467a 100644 --- a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/types.go @@ -206,6 +206,15 @@ type ENI struct { // // +optional Tags map[string]string `json:"tags,omitempty"` + + // PublicIP is the public IP associated with the ENI + // + // +optional + PublicIP string `json:"public-ip,omitempty"` +} + +func (e *ENI) DeepCopyInterface() types.Interface { + return e.DeepCopy() } // InterfaceID returns the identifier of the interface diff --git a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go index 4c1c93fc0e..00b50c30d5 100644 --- a/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/aws/eni/types/zz_generated.deepequal.go @@ -163,6 +163,10 @@ func (in *ENI) DeepEqual(other *ENI) bool { } } + if in.PublicIP != other.PublicIP { + return false + } + return true } diff --git a/vendor/github.com/cilium/cilium/pkg/azure/types/types.go b/vendor/github.com/cilium/cilium/pkg/azure/types/types.go index 0d42674e87..ba7419a24b 100644 --- a/vendor/github.com/cilium/cilium/pkg/azure/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/azure/types/types.go @@ -126,6 +126,10 @@ type AzureInterface struct { resourceGroup string `json:"-"` } +func (a *AzureInterface) DeepCopyInterface() types.Interface { + return a.DeepCopy() +} + // SetID sets the Azure interface ID, as well as extracting other fields from // the ID itself. func (a *AzureInterface) SetID(id string) { diff --git a/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go b/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go index a7e974ff64..13ff20da17 100644 --- a/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/cidr/cidr.go @@ -7,6 +7,7 @@ import ( "bytes" "fmt" "net" + "slices" ) // NewCIDR returns a new CIDR using a net.IPNet @@ -18,6 +19,30 @@ func NewCIDR(ipnet *net.IPNet) *CIDR { return &CIDR{ipnet} } +func NewCIDRSlice(ipnets []*net.IPNet) []*CIDR { + if ipnets == nil { + return nil + } + + cidrs := make([]*CIDR, len(ipnets)) + for i, ipnet := range ipnets { + cidrs[i] = NewCIDR(ipnet) + } + return cidrs +} + +func CIDRsToIPNets(cidrs []*CIDR) []*net.IPNet { + if cidrs == nil { + return nil + } + + ipnets := make([]*net.IPNet, len(cidrs)) + for i, cidr := range cidrs { + ipnets[i] = cidr.IPNet + } + return ipnets +} + // CIDR is a network CIDR representation based on net.IPNet type CIDR struct { *net.IPNet @@ -72,7 +97,6 @@ func (in *CIDR) DeepCopyInto(out *CIDR) { *out = make(net.IPMask, len(*in)) copy(*out, *in) } - return } // AvailableIPs returns the number of IPs available in a CIDR @@ -89,7 +113,7 @@ func (n *CIDR) Equal(o *CIDR) bool { return Equal(n.IPNet, o.IPNet) } -// Equal returns true if the n and o net.IPNet CIDRs arr Equal. +// Equal returns true if the n and o net.IPNet CIDRs are Equal. func Equal(n, o *net.IPNet) bool { if n == nil || o == nil { return n == o @@ -101,43 +125,33 @@ func Equal(n, o *net.IPNet) bool { bytes.Equal(n.Mask, o.Mask) } -// ContainsAll returns true if 'ipNets1' contains all net.IPNet of 'ipNets2' -func ContainsAll(ipNets1, ipNets2 []*net.IPNet) bool { - for _, n := range ipNets2 { - if !Contains(ipNets1, n) { - return false +// ZeroNet generates a zero net.IPNet object for the given address family +func ZeroNet(family int) *net.IPNet { + switch family { + case FAMILY_V4: + return &net.IPNet{ + IP: net.IPv4zero, + Mask: net.CIDRMask(0, 8*net.IPv4len), } - } - return true -} - -// Contains returns true if 'ipNets' contains ipNet. -func Contains(ipNets []*net.IPNet, ipNet *net.IPNet) bool { - for _, n := range ipNets { - if Equal(n, ipNet) { - return true + case FAMILY_V6: + return &net.IPNet{ + IP: net.IPv6zero, + Mask: net.CIDRMask(0, 8*net.IPv6len), } } - return false + return nil } -// RemoveAll removes all cidrs specified in 'toRemove' from 'ipNets'. ipNets -// is clobbered (to ensure removed CIDRs can be garbage collected) and -// must not be used after this function has been called. -// Example usage: -// -// cidrs = cidr.RemoveAll(cidrs, toRemove) -func RemoveAll(ipNets, toRemove []*net.IPNet) []*net.IPNet { - newIPNets := ipNets[:0] - for _, n := range ipNets { - if !Contains(toRemove, n) { - newIPNets = append(newIPNets, n) +// ContainsAll returns true if 'ipNets1' contains all net.IPNet of 'ipNets2' +func ContainsAll(ipNets1, ipNets2 []*net.IPNet) bool { + for _, n2 := range ipNets2 { + if !slices.ContainsFunc(ipNets1, func(n1 *net.IPNet) bool { + return Equal(n2, n1) + }) { + return false } } - for i := len(newIPNets); i < len(ipNets); i++ { - ipNets[i] = nil // or the zero value of T - } - return newIPNets + return true } // ParseCIDR parses the CIDR string using net.ParseCIDR diff --git a/vendor/github.com/cilium/cilium/pkg/cidr/cidr_linux.go b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_linux.go new file mode 100644 index 0000000000..a43d9b46af --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_linux.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cidr + +import "github.com/vishvananda/netlink/nl" + +// Family type definitions +const ( + FAMILY_ALL = nl.FAMILY_ALL + FAMILY_V4 = nl.FAMILY_V4 + FAMILY_V6 = nl.FAMILY_V6 + FAMILY_MPLS = nl.FAMILY_MPLS +) diff --git a/vendor/github.com/cilium/cilium/pkg/cidr/cidr_unspecified.go b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_unspecified.go new file mode 100644 index 0000000000..dfe393960f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/cidr/cidr_unspecified.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !linux + +package cidr + +// Dummy values on non-linux platform +const ( + FAMILY_V4 = iota + FAMILY_V6 +) diff --git a/vendor/github.com/cilium/cilium/pkg/client/client.go b/vendor/github.com/cilium/cilium/pkg/client/client.go index a1af5a888c..fef6a84f44 100644 --- a/vendor/github.com/cilium/cilium/pkg/client/client.go +++ b/vendor/github.com/cilium/cilium/pkg/client/client.go @@ -12,7 +12,7 @@ import ( "net/http" "net/url" "os" - "sort" + "slices" "strings" "text/tabwriter" "time" @@ -39,7 +39,6 @@ func DefaultSockPath() string { e = defaults.SockPath } return "unix://" + e - } func configureTransport(tr *http.Transport, proto, addr string) *http.Transport { @@ -75,7 +74,7 @@ func NewDefaultClientWithTimeout(timeout time.Duration) (*Client, error) { for { select { case <-timeoutAfter: - return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %s", timeout.Seconds(), err) + return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %w", timeout.Seconds(), err) default: } @@ -88,7 +87,7 @@ func NewDefaultClientWithTimeout(timeout time.Duration) (*Client, error) { for { select { case <-timeoutAfter: - return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %s", timeout.Seconds(), err) + return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %w", timeout.Seconds(), err) default: } // This is an API call that we do to the cilium-agent to check @@ -107,38 +106,79 @@ func NewDefaultClientWithTimeout(timeout time.Duration) (*Client, error) { // If host is nil then use SockPath provided by CILIUM_SOCK // or the cilium default SockPath func NewClient(host string) (*Client, error) { - clientTrans, err := NewRuntime(host) + clientTrans, err := NewRuntime(WithHost(host)) return &Client{*clientapi.New(clientTrans, strfmt.Default)}, err } -func NewRuntime(host string) (*runtime_client.Runtime, error) { +type runtimeOptions struct { + host string + basePath string +} + +func WithHost(host string) func(options *runtimeOptions) { + return func(options *runtimeOptions) { + options.host = host + } +} + +func WithBasePath(basePath string) func(options *runtimeOptions) { + return func(options *runtimeOptions) { + options.basePath = basePath + } +} + +func NewTransport(host string) (*http.Transport, error) { if host == "" { host = DefaultSockPath() } - tmp := strings.SplitN(host, "://", 2) - if len(tmp) != 2 { + schema, host, found := strings.Cut(host, "://") + if !found { return nil, fmt.Errorf("invalid host format '%s'", host) } - - hostHeader := tmp[1] - - switch tmp[0] { + switch schema { case "tcp": - if _, err := url.Parse("tcp://" + tmp[1]); err != nil { + if _, err := url.Parse("tcp://" + host); err != nil { return nil, err } - host = "http://" + tmp[1] + host = "http://" + host case "unix": - host = tmp[1] + } + return configureTransport(nil, schema, host), nil +} + +func NewRuntime(opts ...func(options *runtimeOptions)) (*runtime_client.Runtime, error) { + r := runtimeOptions{} + for _, opt := range opts { + opt(&r) + } + + basePath := r.basePath + if basePath == "" { + basePath = clientapi.DefaultBasePath + } + + host := r.host + if host == "" { + host = DefaultSockPath() + } + + _, hostHeader, found := strings.Cut(host, "://") + if !found { + return nil, fmt.Errorf("invalid host format '%s'", host) + } + if strings.HasPrefix(host, "unix") { // For local communication (unix domain sockets), the hostname is not used. Leave // Host header empty because otherwise it would be rejected by net/http client-side // sanitization, see https://go.dev/issue/60374. hostHeader = "localhost" } - transport := configureTransport(nil, tmp[0], host) + transport, err := NewTransport(host) + if err != nil { + return nil, err + } httpClient := &http.Client{Transport: transport} - clientTrans := runtime_client.NewWithClient(hostHeader, clientapi.DefaultBasePath, + clientTrans := runtime_client.NewWithClient(hostHeader, basePath, clientapi.DefaultSchemes, httpClient) return clientTrans, nil } @@ -228,9 +268,9 @@ func clusterReadiness(cluster *models.RemoteCluster) string { return "ready" } -func numReadyClusters(clustermesh *models.ClusterMeshStatus) int { +func NumReadyClusters(clusters []*models.RemoteCluster) int { numReady := 0 - for _, cluster := range clustermesh.Clusters { + for _, cluster := range clusters { if cluster.Ready { numReady++ } @@ -290,14 +330,14 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai if sr.Kubernetes != nil { fmt.Fprintf(w, "Kubernetes:\t%s\t%s\n", sr.Kubernetes.State, sr.Kubernetes.Msg) if sr.Kubernetes.State != models.K8sStatusStateDisabled { - sort.Strings(sr.Kubernetes.K8sAPIVersions) + slices.Sort(sr.Kubernetes.K8sAPIVersions) fmt.Fprintf(w, "Kubernetes APIs:\t[\"%s\"]\n", strings.Join(sr.Kubernetes.K8sAPIVersions, "\", \"")) } } if sr.KubeProxyReplacement != nil { devices := "" - if sr.KubeProxyReplacement.Mode != models.KubeProxyReplacementModeDisabled { + if sr.KubeProxyReplacement.Mode != models.KubeProxyReplacementModeFalse { for i, dev := range sr.KubeProxyReplacement.DeviceList { kubeProxyDevices += fmt.Sprintf("%s %s", dev.Name, strings.Join(dev.IP, " ")) if dev.Name == sr.KubeProxyReplacement.DirectRoutingDevice { @@ -322,6 +362,20 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai fmt.Fprintf(w, "\n") } + if sr.Srv6 != nil { + var fields []string + + status := "Disabled" + fields = append(fields, status) + + if sr.Srv6.Enabled { + fields[0] = "Enabled" + fields = append(fields, fmt.Sprintf("[encap-mode: %s]", sr.Srv6.Srv6EncapMode)) + } + + fmt.Fprintf(w, "SRv6:\t%s\n", strings.Join(fields, "\t")) + } + if sr.CniChaining != nil { fmt.Fprintf(w, "CNI Chaining:\t%s\n", sr.CniChaining.Mode) } @@ -339,7 +393,7 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai for probe := range sr.Stale { sortedProbes = append(sortedProbes, probe) } - sort.Strings(sortedProbes) + slices.Sort(sortedProbes) stalesStr := make([]string, 0, len(sr.Stale)) for _, probe := range sortedProbes { @@ -374,7 +428,7 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai for ip, owner := range sr.Ipam.Allocations { out = append(out, fmt.Sprintf(" %s (%s)", ip, owner)) } - sort.Strings(out) + slices.Sort(out) for _, line := range out { fmt.Fprintln(w, line) } @@ -382,35 +436,15 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai } if sr.ClusterMesh != nil { - fmt.Fprintf(w, "ClusterMesh:\t%d/%d clusters ready, %d global-services\n", - numReadyClusters(sr.ClusterMesh), len(sr.ClusterMesh.Clusters), sr.ClusterMesh.NumGlobalServices) - - for _, cluster := range sr.ClusterMesh.Clusters { - if sd.AllClusters || !cluster.Ready { - fmt.Fprintf(w, " %s: %s, %d nodes, %d endpoints, %d identities, %d services, %d failures (last: %s)\n", - cluster.Name, clusterReadiness(cluster), cluster.NumNodes, - cluster.NumEndpoints, cluster.NumIdentities, cluster.NumSharedServices, - cluster.NumFailures, timeSince(time.Time(cluster.LastFailure))) - fmt.Fprintf(w, " └ %s\n", cluster.Status) - - fmt.Fprint(w, " └ remote configuration: ") - if cluster.Config != nil { - fmt.Fprintf(w, "expected=%t, retrieved=%t", cluster.Config.Required, cluster.Config.Retrieved) - if cluster.Config.Retrieved { - fmt.Fprintf(w, ", cluster-id=%d, kvstoremesh=%t, sync-canaries=%t", - cluster.Config.ClusterID, cluster.Config.Kvstoremesh, cluster.Config.SyncCanaries) - } - } else { - fmt.Fprint(w, "expected=unknown, retrieved=unknown") - } - fmt.Fprint(w, "\n") + fmt.Fprintf(w, "ClusterMesh:\t%d/%d remote clusters ready, %d global-services\n", + NumReadyClusters(sr.ClusterMesh.Clusters), len(sr.ClusterMesh.Clusters), sr.ClusterMesh.NumGlobalServices) - if cluster.Synced != nil { - fmt.Fprintf(w, " └ synchronization status: nodes=%v, endpoints=%v, identities=%v, services=%v\n", - cluster.Synced.Nodes, cluster.Synced.Endpoints, cluster.Synced.Identities, cluster.Synced.Services) - } - } + verbosity := RemoteClustersStatusNotReadyOnly + if sd.AllClusters { + verbosity = RemoteClustersStatusVerbose } + + FormatStatusResponseRemoteClusters(w, sr.ClusterMesh.Clusters, verbosity) } if sr.IPV4BigTCP != nil { @@ -449,8 +483,34 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai fmt.Fprintf(w, "BandwidthManager:\t%s\n", status) } - if sr.HostRouting != nil { - fmt.Fprintf(w, "Host Routing:\t%s\n", sr.HostRouting.Mode) + if sr.Routing != nil { + status := "Network: " + sr.Routing.InterHostRoutingMode + if sr.Routing.InterHostRoutingMode == models.RoutingInterHostRoutingModeTunnel { + status = status + " [" + sr.Routing.TunnelProtocol + "]" + } + status = status + "\tHost: " + sr.Routing.IntraHostRoutingMode + + fmt.Fprintf(w, "Routing:\t%s\n", status) + } + + if sr.AttachMode != "" { + status := "Legacy TC" + if sr.AttachMode == models.AttachModeTcx { + status = "TCX" + } + fmt.Fprintf(w, "Attach Mode:\t%s\n", status) + } + + if sr.DatapathMode != "" { + status := "?" + if sr.DatapathMode == models.DatapathModeVeth { + status = "veth" + } else if sr.DatapathMode == models.DatapathModeNetkitDashL2 { + status = "netkit-l2" + } else if sr.DatapathMode == models.DatapathModeNetkit { + status = "netkit" + } + fmt.Fprintf(w, "Device Mode:\t%s\n", status) } if sr.Masquerading != nil { @@ -538,7 +598,7 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai fmt.Fprintf(w, "Controller Status:\t%d/%d healthy\n", nOK, len(sr.Controllers)) if len(out) > 1 { tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) - sort.Strings(out) + slices.Sort(out) for _, s := range out { fmt.Fprint(tab, s) } @@ -557,7 +617,7 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai } tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) fmt.Fprint(tab, " Protocol\tRedirect\tProxy Port\n") - sort.Strings(out) + slices.Sort(out) for _, s := range out { fmt.Fprint(tab, s) } @@ -606,7 +666,7 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai } if sd.KubeProxyReplacementDetails && sr.Kubernetes != nil && sr.KubeProxyReplacement != nil { - var selection, mode, xdp string + var selection, mode, dsrMode, xdp string lb := "Disabled" cIP := "Enabled" @@ -618,6 +678,10 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai } xdp = np.Acceleration mode = np.Mode + if mode == models.KubeProxyReplacementFeaturesNodePortModeDSR || + mode == models.KubeProxyReplacementFeaturesNodePortModeHybrid { + dsrMode = np.DsrMode + } nPort = fmt.Sprintf("Enabled (Range: %d-%d)", np.PortMin, np.PortMax) lb = "Enabled" } @@ -684,6 +748,9 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai if mode != "" { fmt.Fprintf(tab, " Mode:\t%s\n", mode) } + if dsrMode != "" { + fmt.Fprintf(tab, " DSR Dispatch Mode:\t%s\n", dsrMode) + } if selection != "" { fmt.Fprintf(tab, " Backend Selection:\t%s\n", selection) } @@ -708,6 +775,10 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai fmt.Fprintf(tab, " - LoadBalancer:\t%s \n", lb) fmt.Fprintf(tab, " - externalIPs:\t%s \n", eIP) fmt.Fprintf(tab, " - HostPort:\t%s\n", hPort) + fmt.Fprintf(tab, " Annotations:\n") + for _, annotation := range sr.KubeProxyReplacement.Features.Annotations { + fmt.Fprintf(tab, " - %s\n", annotation) + } tab.Flush() } @@ -745,3 +816,61 @@ func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetai fmt.Fprintf(w, "Encryption:\t%s\t%s\n", sr.Encryption.Mode, strings.Join(fields, ", ")) } } + +// RemoteClustersStatusVerbosity specifies the verbosity when formatting the remote clusters status information. +type RemoteClustersStatusVerbosity uint + +const ( + // RemoteClustersStatusVerbose outputs all remote clusters information. + RemoteClustersStatusVerbose RemoteClustersStatusVerbosity = iota + // RemoteClustersStatusBrief outputs a one-line summary only for ready clusters. + RemoteClustersStatusBrief + // RemoteClustersStatusNotReadyOnly outputs the remote clusters information for non-ready clusters only. + RemoteClustersStatusNotReadyOnly +) + +func FormatStatusResponseRemoteClusters(w io.Writer, clusters []*models.RemoteCluster, verbosity RemoteClustersStatusVerbosity) { + for _, cluster := range clusters { + if verbosity != RemoteClustersStatusNotReadyOnly || !cluster.Ready { + fmt.Fprintf(w, " %s: %s, %d nodes, %d endpoints, %d identities, %d services, %d MCS-API service exports, %d reconnections (last: %s)\n", + cluster.Name, clusterReadiness(cluster), cluster.NumNodes, + cluster.NumEndpoints, cluster.NumIdentities, cluster.NumSharedServices, cluster.NumServiceExports, + cluster.NumFailures, timeSince(time.Time(cluster.LastFailure))) + + if verbosity == RemoteClustersStatusBrief && cluster.Ready { + continue + } + + fmt.Fprintf(w, " └ %s\n", cluster.Status) + + fmt.Fprint(w, " └ remote configuration: ") + if cluster.Config != nil { + fmt.Fprintf(w, "expected=%t, retrieved=%t", cluster.Config.Required, cluster.Config.Retrieved) + serviceExportsConfig := "unsupported" + if cluster.Config.ServiceExportsEnabled != nil { + if *cluster.Config.ServiceExportsEnabled { + serviceExportsConfig = "enabled" + } else { + serviceExportsConfig = "disabled" + } + } + if cluster.Config.Retrieved { + fmt.Fprintf(w, ", cluster-id=%d, kvstoremesh=%t, sync-canaries=%t, service-exports=%s", + cluster.Config.ClusterID, cluster.Config.Kvstoremesh, cluster.Config.SyncCanaries, serviceExportsConfig) + } + } else { + fmt.Fprint(w, "expected=unknown, retrieved=unknown") + } + fmt.Fprint(w, "\n") + + if cluster.Synced != nil { + fmt.Fprintf(w, " └ synchronization status: nodes=%v, endpoints=%v, identities=%v, services=%v", + cluster.Synced.Nodes, cluster.Synced.Endpoints, cluster.Synced.Identities, cluster.Synced.Services) + if cluster.Synced.ServiceExports != nil { + fmt.Fprintf(w, ", service-exports=%v", *cluster.Synced.ServiceExports) + } + fmt.Fprintln(w) + } + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/client/endpoint.go b/vendor/github.com/cilium/cilium/pkg/client/endpoint.go index 6fb289e451..7563a8ce28 100644 --- a/vendor/github.com/cilium/cilium/pkg/client/endpoint.go +++ b/vendor/github.com/cilium/cilium/pkg/client/endpoint.go @@ -20,6 +20,13 @@ func (c *Client) EndpointList() ([]*models.Endpoint, error) { return resp.Payload, nil } +// EndpointDeleteMany deletes multiple endpoints +func (c *Client) EndpointDeleteMany(req *models.EndpointBatchDeleteRequest) error { + params := endpoint.NewDeleteEndpointParams().WithEndpoint(req).WithTimeout(api.ClientTimeout) + _, _, err := c.Endpoint.DeleteEndpoint(params) + return Hint(err) +} + // EndpointGet returns endpoint by ID func (c *Client) EndpointGet(id string) (*models.Endpoint, error) { params := endpoint.NewGetEndpointIDParams().WithID(id).WithTimeout(api.ClientTimeout) @@ -34,11 +41,14 @@ func (c *Client) EndpointGet(id string) (*models.Endpoint, error) { } // EndpointCreate creates a new endpoint -func (c *Client) EndpointCreate(ep *models.EndpointChangeRequest) error { +func (c *Client) EndpointCreate(ep *models.EndpointChangeRequest) (*models.Endpoint, error) { id := pkgEndpointID.NewCiliumID(ep.ID) params := endpoint.NewPutEndpointIDParams().WithID(id).WithEndpoint(ep).WithTimeout(api.ClientTimeout) - _, err := c.Endpoint.PutEndpointID(params) - return Hint(err) + resp, err := c.Endpoint.PutEndpointID(params) + if err != nil { + return nil, Hint(err) + } + return resp.Payload, nil } // EndpointPatch modifies the endpoint diff --git a/vendor/github.com/cilium/cilium/pkg/client/policy.go b/vendor/github.com/cilium/cilium/pkg/client/policy.go index 5930c17666..f90cd86a7a 100644 --- a/vendor/github.com/cilium/cilium/pkg/client/policy.go +++ b/vendor/github.com/cilium/cilium/pkg/client/policy.go @@ -19,6 +19,16 @@ func (c *Client) PolicyPut(policyJSON string) (*models.Policy, error) { return resp.Payload, nil } +// PolicyReplace replaces the `policyJSON` +func (c *Client) PolicyReplace(policyJSON string, replace bool, replaceWithLabels []string) (*models.Policy, error) { + params := policy.NewPutPolicyParams().WithPolicy(policyJSON).WithReplace(&replace).WithReplaceWithLabels(replaceWithLabels).WithTimeout(api.ClientTimeout) + resp, err := c.Policy.PutPolicy(params) + if err != nil { + return nil, Hint(err) + } + return resp.Payload, nil +} + // PolicyGet returns policy rules func (c *Client) PolicyGet(labels []string) (*models.Policy, error) { params := policy.NewGetPolicyParams().WithLabels(labels).WithTimeout(api.ClientTimeout) diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go index 176bee29b7..354ec8acae 100644 --- a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go +++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/addressing.go @@ -4,14 +4,17 @@ package types import ( + "bytes" + "errors" "fmt" "net" "net/netip" "strconv" "strings" + "go4.org/netipx" + "github.com/cilium/cilium/pkg/cidr" - ippkg "github.com/cilium/cilium/pkg/ip" ) // @@ -40,6 +43,55 @@ type AddrCluster struct { const AddrClusterLen = 20 +var ( + errUnmarshalBadAddress = errors.New("AddrCluster.UnmarshalJSON: bad address") + errMarshalInvalidAddress = errors.New("AddrCluster.MarshalJSON: invalid address") + + jsonZeroAddress = []byte("\"\"") +) + +// MarshalJSON marshals the address as a string in the form +// @, e.g. "1.2.3.4@1" +func (a *AddrCluster) MarshalJSON() ([]byte, error) { + if !a.addr.IsValid() { + if a.clusterID != 0 { + return nil, errMarshalInvalidAddress + } + + // AddrCluster{} is the zero value. Preserve this across the + // marshalling by returning an empty string. + return jsonZeroAddress, nil + } + + var b bytes.Buffer + b.WriteByte('"') + b.WriteString(a.String()) + b.WriteByte('"') + return b.Bytes(), nil +} + +func (a *AddrCluster) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonZeroAddress) { + return nil + } + + if len(data) <= 2 || data[0] != '"' || data[len(data)-1] != '"' { + return errUnmarshalBadAddress + } + + // Drop the parens + data = data[1 : len(data)-1] + + a2, err := ParseAddrCluster(string(data)) + if err != nil { + return err + } + a.addr = a2.addr + a.clusterID = a2.clusterID + + return nil +} + // ParseAddrCluster parses s as an IP + ClusterID and returns AddrCluster. // The string s can be a bare IP string (any IP address format allowed in // netip.ParseAddr()) or IP string + @ + ClusterID with decimal. Bare IP @@ -94,10 +146,10 @@ func MustParseAddrCluster(s string) AddrCluster { return addrCluster } -// AddrClusterFromIP parses the given net.IP using ip.AddrFromIP and returns +// AddrClusterFromIP parses the given net.IP using netipx.FromStdIP and returns // AddrCluster with ClusterID = 0. func AddrClusterFromIP(ip net.IP) (AddrCluster, bool) { - addr, ok := ippkg.AddrFromIP(ip) + addr, ok := netipx.FromStdIP(ip) if !ok { return AddrCluster{}, false } @@ -221,7 +273,7 @@ func (ac AddrCluster) AsNetIP() net.IP { } func (ac AddrCluster) AsPrefixCluster() PrefixCluster { - return PrefixClusterFrom(ac.addr, ac.addr.BitLen(), ac.clusterID) + return PrefixClusterFrom(ac.addr, ac.addr.BitLen(), WithClusterID(ac.clusterID)) } // PrefixCluster is a type that holds a pair of prefix and ClusterID. @@ -292,28 +344,32 @@ func (pc PrefixCluster) IsSingleIP() bool { return pc.prefix.IsSingleIP() } -func PrefixClusterFrom(addr netip.Addr, bits int, clusterID uint32) PrefixCluster { - return PrefixCluster{ - prefix: netip.PrefixFrom(addr, bits), - clusterID: clusterID, +type PrefixClusterOpts func(*PrefixCluster) + +func WithClusterID(id uint32) PrefixClusterOpts { + return func(pc *PrefixCluster) { pc.clusterID = id } +} + +func PrefixClusterFrom(addr netip.Addr, bits int, opts ...PrefixClusterOpts) PrefixCluster { + pc := PrefixCluster{prefix: netip.PrefixFrom(addr, bits)} + for _, opt := range opts { + opt(&pc) } + return pc } -func PrefixClusterFromCIDR(c *cidr.CIDR, clusterID uint32) PrefixCluster { +func PrefixClusterFromCIDR(c *cidr.CIDR, opts ...PrefixClusterOpts) PrefixCluster { if c == nil { return PrefixCluster{} } - addr, ok := ippkg.AddrFromIP(c.IP) + addr, ok := netipx.FromStdIP(c.IP) if !ok { return PrefixCluster{} } ones, _ := c.Mask.Size() - return PrefixCluster{ - prefix: netip.PrefixFrom(addr, ones), - clusterID: clusterID, - } + return PrefixClusterFrom(addr, ones, opts...) } func (pc0 PrefixCluster) Equal(pc1 PrefixCluster) bool { @@ -339,17 +395,22 @@ func (pc PrefixCluster) String() string { return pc.prefix.String() + "@" + strconv.FormatUint(uint64(pc.clusterID), 10) } +// AsPrefix returns the IP prefix part of PrefixCluster as a netip.Prefix type. +// This function exists for keeping backward compatibility between the existing +// components which are not aware of the cluster-aware addressing. Calling +// this function against the PrefixCluster which has non-zero clusterID will +// lose the ClusterID information. It should be used with an extra care. +func (pc PrefixCluster) AsPrefix() netip.Prefix { + return netip.PrefixFrom(pc.prefix.Addr(), pc.prefix.Bits()) +} + // AsIPNet returns the IP prefix part of PrefixCluster as a net.IPNet type. This // function exists for keeping backward compatibility between the existing // components which are not aware of the cluster-aware addressing. Calling // this function against the PrefixCluster which has non-zero clusterID will // lose the ClusterID information. It should be used with an extra care. func (pc PrefixCluster) AsIPNet() net.IPNet { - addr := pc.prefix.Addr() - return net.IPNet{ - IP: addr.AsSlice(), - Mask: net.CIDRMask(pc.prefix.Bits(), addr.BitLen()), - } + return *netipx.PrefixIPNet(pc.AsPrefix()) } // This function is solely exists for annotating IPCache's key string with ClusterID. diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go new file mode 100644 index 0000000000..fe746e82b8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/option.go @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "errors" + "fmt" + + "github.com/spf13/pflag" + + "github.com/cilium/cilium/pkg/defaults" + ipamOption "github.com/cilium/cilium/pkg/ipam/option" +) + +const ( + // OptClusterName is the name of the OptClusterName option + OptClusterName = "cluster-name" + + // OptClusterID is the name of the OptClusterID option + OptClusterID = "cluster-id" + + // OptMaxConnectedClusters is the name of the OptMaxConnectedClusters option + OptMaxConnectedClusters = "max-connected-clusters" +) + +// ClusterInfo groups together the ClusterID and the ClusterName +type ClusterInfo struct { + ID uint32 `mapstructure:"cluster-id"` + Name string `mapstructure:"cluster-name"` + MaxConnectedClusters uint32 `mapstructure:"max-connected-clusters"` +} + +// DefaultClusterInfo represents the default ClusterInfo values. +var DefaultClusterInfo = ClusterInfo{ + ID: 0, + Name: defaults.ClusterName, + MaxConnectedClusters: defaults.MaxConnectedClusters, +} + +// Flags implements the cell.Flagger interface, to register the given flags. +func (def ClusterInfo) Flags(flags *pflag.FlagSet) { + flags.Uint32(OptClusterID, def.ID, "Unique identifier of the cluster") + flags.String(OptClusterName, def.Name, "Name of the cluster. It must consist of at most 32 lower case alphanumeric characters and '-', start and end with an alphanumeric character.") + flags.Uint32(OptMaxConnectedClusters, def.MaxConnectedClusters, "Maximum number of clusters to be connected in a clustermesh. Increasing this value will reduce the maximum number of identities available. Valid configurations are [255, 511].") +} + +// Validate validates that the ClusterID is in the valid range (including ClusterID == 0), +// and that the ClusterName is different from the default value if the ClusterID != 0. +func (c ClusterInfo) Validate() error { + if c.ID < ClusterIDMin || c.ID > ClusterIDMax { + return fmt.Errorf("invalid cluster id %d: must be in range %d..%d", + c.ID, ClusterIDMin, ClusterIDMax) + } + + return c.validateName() +} + +// ValidateStrict validates that the ClusterID is in the valid range, but not 0, +// and that the ClusterName is different from the default value. +func (c ClusterInfo) ValidateStrict() error { + if err := ValidateClusterID(c.ID); err != nil { + return err + } + + return c.validateName() +} + +// ValidateBuggyClusterID returns an error if a buggy cluster ID (i.e., with the +// 7th bit set) is used in combination with ENI IPAM mode or AWS CNI chaining. +func (c ClusterInfo) ValidateBuggyClusterID(ipamMode, chainingMode string) error { + if (c.ID&0x80) != 0 && (ipamMode == ipamOption.IPAMENI || ipamMode == ipamOption.IPAMAlibabaCloud || chainingMode == "aws-cni") { + return errors.New("Cilium is currently affected by a bug that causes traffic matched " + + "by network policies to be incorrectly dropped when running in either ENI mode (both " + + "AWS and AlibabaCloud) or AWS VPC CNI chaining mode, if the cluster ID is 128-255 (and " + + "384-511 when max-connected-clusters=511). " + + "Please refer to https://github.com/cilium/cilium/issues/21330 for additional details.") + } + + return nil +} + +func (c ClusterInfo) validateName() error { + if err := ValidateClusterName(c.Name); err != nil { + return fmt.Errorf("invalid cluster name: %w", err) + } + + if c.ID != 0 && c.Name == defaults.ClusterName { + return fmt.Errorf("cannot use default cluster name (%s) with option %s", + defaults.ClusterName, OptClusterID) + } + + return nil +} + +// ExtendedClusterMeshEnabled returns true if MaxConnectedClusters value has +// been set to a value larger than the default 255. +func (c ClusterInfo) ExtendedClusterMeshEnabled() bool { + return c.MaxConnectedClusters != defaults.MaxConnectedClusters +} + +// ValidateRemoteConfig validates the remote CiliumClusterConfig to ensure +// compatibility with this cluster's configuration. +func (c ClusterInfo) ValidateRemoteConfig(config CiliumClusterConfig) error { + if err := ValidateClusterID(config.ID); err != nil { + return err + } + + if c.ExtendedClusterMeshEnabled() && (c.MaxConnectedClusters != config.Capabilities.MaxConnectedClusters) { + return fmt.Errorf("mismatched MaxConnectedClusters; local=%d, remote=%d", c.MaxConnectedClusters, config.Capabilities.MaxConnectedClusters) + } + + return nil +} + +// QuirksConfig allows the user to configure how Cilium behaves when a set +// of incompatible options are configured together into the agent. +type QuirksConfig struct { + // AllowUnsafePolicySKBUsage determines whether to hard-fail startup + // due to detection of a configuration combination that may trigger + // connection impact in the dataplane due to clustermesh IDs + // conflicting with other usage of skb->mark field. See GH-21330. + AllowUnsafePolicySKBUsage bool +} + +var DefaultQuirks = QuirksConfig{ + AllowUnsafePolicySKBUsage: false, +} + +func (_ QuirksConfig) Flags(flags *pflag.FlagSet) { + flags.Bool("allow-unsafe-policy-skb-usage", false, + "Allow the daemon to continue to operate even if conflicting "+ + "clustermesh ID configuration is detected which may "+ + "impact the ability for Cilium to enforce network "+ + "policy both within and across clusters") + flags.MarkHidden("allow-unsafe-policy-skb-usage") +} diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go index e92f269fd7..930e98335f 100644 --- a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go @@ -4,17 +4,50 @@ package types import ( + "errors" "fmt" + "regexp" + + "github.com/cilium/cilium/pkg/defaults" ) const ( // ClusterIDMin is the minimum value of the cluster ID - ClusterIDMin = 0 + ClusterIDMin = 0 + ClusterIDExt511 = 511 - // ClusterIDMax is the maximum value of the cluster ID - ClusterIDMax = 255 + ClusterIDUnset = ClusterIDMin ) +// ClusterIDMax is the maximum value of the cluster ID +var ClusterIDMax uint32 = defaults.MaxConnectedClusters + +// A cluster name must respect the following constraints: +// * It must contain at most 32 characters; +// * It must begin and end with a lower case alphanumeric character; +// * It may contain lower case alphanumeric characters and dashes between. +const ( + // clusterNameMaxLength is the maximum allowed length of a cluster name. + clusterNameMaxLength = 32 + // clusterNameRegexStr is the regex to validate a cluster name. + clusterNameRegexStr = `^([a-z0-9][-a-z0-9]*)?[a-z0-9]$` +) + +var clusterNameRegex = regexp.MustCompile(clusterNameRegexStr) + +// InitClusterIDMax validates and sets the ClusterIDMax package level variable. +func (c ClusterInfo) InitClusterIDMax() error { + switch c.MaxConnectedClusters { + case defaults.MaxConnectedClusters, ClusterIDExt511: + ClusterIDMax = c.MaxConnectedClusters + default: + return fmt.Errorf("--%s=%d is invalid; supported values are [%d, %d]", OptMaxConnectedClusters, c.MaxConnectedClusters, defaults.MaxConnectedClusters, ClusterIDExt511) + } + return nil +} + +// ValidateClusterID ensures that the given clusterID is within the configured +// range of the ClusterMesh. func ValidateClusterID(clusterID uint32) error { if clusterID == ClusterIDMin { return fmt.Errorf("ClusterID %d is reserved", ClusterIDMin) @@ -27,6 +60,23 @@ func ValidateClusterID(clusterID uint32) error { return nil } +// ValidateClusterName validates that the given name matches the cluster name specifications. +func ValidateClusterName(name string) error { + if name == "" { + return errors.New("must not be empty") + } + + if len(name) > clusterNameMaxLength { + return fmt.Errorf("must not be more than %d characters", clusterNameMaxLength) + } + + if !clusterNameRegex.MatchString(name) { + return errors.New("must consist of lower case alphanumeric characters and '-', and must start and end with an alphanumeric character") + } + + return nil +} + type CiliumClusterConfig struct { ID uint32 `json:"id,omitempty"` @@ -41,30 +91,11 @@ type CiliumClusterConfigCapabilities struct { // kvstore (for instance, by kvstoremesh). This implies that keys are stored // under the dedicated "cilium/cache" prefix, and all are cluster-scoped. Cached bool `json:"cached,omitempty"` -} -func (c *CiliumClusterConfig) Validate() error { - if c == nil || c.ID == 0 { - // When remote cluster doesn't have cluster config, we - // currently just bypass the validation for compatibility. - // Otherwise, we cannot connect with older cluster which - // doesn't support cluster config feature. - // - // When we introduce a new cluster config can't be ignored, - // we should properly check it here and return error. Now - // we only have ClusterID which used to be ignored. - return nil - } - - if err := ValidateClusterID(c.ID); err != nil { - return err - } - - return nil -} + // The maximum number of clusters the given cluster can support in a ClusterMesh. + MaxConnectedClusters uint32 `json:"maxConnectedClusters,omitempty"` -// ClusterIDName groups together the ClusterID and the ClusterName -type ClusterIDName struct { - ClusterID uint32 - ClusterName string + // Whether or not MCS-API ServiceExports is enabled by the cluster. + // Additionally a nil values means that it's not supported. + ServiceExportsEnabled *bool `json:"serviceExportsEnabled,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go b/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go new file mode 100644 index 0000000000..959e903730 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/command/exec/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package exec provides useful wrappers around the standard "exec" library. +package exec diff --git a/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go b/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go new file mode 100644 index 0000000000..1762e5dbba --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/command/exec/exec.go @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package exec + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "os/exec" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/time" +) + +func warnToLog(cmd *exec.Cmd, out []byte, scopedLog *logrus.Entry, err error) { + scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed") + scanner := bufio.NewScanner(bytes.NewReader(out)) + for scanner.Scan() { + scopedLog.Warn(scanner.Text()) + } +} + +// combinedOutput is the core implementation of catching deadline exceeded +// options and logging errors. +func combinedOutput(ctx context.Context, cmd *exec.Cmd, scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := cmd.CombinedOutput() + if ctx.Err() != nil { + if !errors.Is(ctx.Err(), context.Canceled) { + scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed") + } + return nil, fmt.Errorf("Command execution failed for %s: %w", cmd.Args, ctx.Err()) + } + if err != nil && verbose { + warnToLog(cmd, out, scopedLog, err) + } + return out, err +} + +// output is the equivalent to combinedOutput with only capturing stdout +func output(ctx context.Context, cmd *exec.Cmd, scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := cmd.Output() + if ctx.Err() != nil { + if !errors.Is(ctx.Err(), context.Canceled) { + scopedLog.WithError(err).WithField("cmd", cmd.Args).Error("Command execution failed") + } + return nil, fmt.Errorf("Command execution failed for %s: %w", cmd.Args, ctx.Err()) + } + if err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + err = fmt.Errorf("%w stderr=%q", exitErr, exitErr.Stderr) + } + if verbose { + warnToLog(cmd, out, scopedLog, err) + } + } + return out, err +} + +// Cmd wraps exec.Cmd with a context to provide convenient execution of a +// command with nice checking of the context timeout in the form: +// +// err := exec.Prog().WithTimeout(5*time.Second, myprog, myargs...).CombinedOutput(log, verbose) +type Cmd struct { + *exec.Cmd + ctx context.Context + cancelFn func() +} + +// CommandContext wraps exec.CommandContext to allow this package to be used as +// a drop-in replacement for the standard exec library. +func CommandContext(ctx context.Context, prog string, args ...string) *Cmd { + return &Cmd{ + Cmd: exec.CommandContext(ctx, prog, args...), + ctx: ctx, + } +} + +// WithTimeout creates a Cmd with a context that times out after the specified +// duration. +func WithTimeout(timeout time.Duration, prog string, args ...string) *Cmd { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + cmd := CommandContext(ctx, prog, args...) + cmd.cancelFn = cancel + return cmd +} + +// WithCancel creates a Cmd with a context that can be cancelled by calling the +// resulting Cancel() function. +func WithCancel(ctx context.Context, prog string, args ...string) (*Cmd, context.CancelFunc) { + newCtx, cancel := context.WithCancel(ctx) + cmd := CommandContext(newCtx, prog, args...) + return cmd, cancel +} + +// CombinedOutput runs the command and returns its combined standard output and +// standard error. Unlike the standard library, if the context is exceeded, it +// will return an error indicating so. +// +// Logs any errors that occur to the specified logger. +func (c *Cmd) CombinedOutput(scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := combinedOutput(c.ctx, c.Cmd, scopedLog, verbose) + if c.cancelFn != nil { + c.cancelFn() + } + return out, err +} + +// Output runs the command and returns only standard output, but not the +// standard error. Unlike the standard library, if the context is exceeded, +// it will return an error indicating so. +// +// Logs any errors that occur to the specified logger. +func (c *Cmd) Output(scopedLog *logrus.Entry, verbose bool) ([]byte, error) { + out, err := output(c.ctx, c.Cmd, scopedLog, verbose) + if c.cancelFn != nil { + c.cancelFn() + } + return out, err +} diff --git a/vendor/github.com/cilium/cilium/pkg/command/output.go b/vendor/github.com/cilium/cilium/pkg/command/output.go index a3d0490df5..f6196048c5 100644 --- a/vendor/github.com/cilium/cilium/pkg/command/output.go +++ b/vendor/github.com/cilium/cilium/pkg/command/output.go @@ -58,7 +58,7 @@ func PrintOutput(data interface{}) error { func PrintOutputWithPatch(data interface{}, patch interface{}) error { mergedInterface, err := mergeInterfaces(data, patch) if err != nil { - return fmt.Errorf("Unable to merge Interfaces:%v", err) + return fmt.Errorf("Unable to merge Interfaces: %w", err) } return PrintOutputWithType(mergedInterface, outputOpt) } diff --git a/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go b/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go index c9ff7746d2..49ccc8dd1e 100644 --- a/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go +++ b/vendor/github.com/cilium/cilium/pkg/comparator/comparator.go @@ -3,71 +3,6 @@ package comparator -import ( - "github.com/kr/pretty" - "github.com/pmezard/go-difflib/difflib" -) - -// Compare compares two interfaces and emits a unified diff as string -func Compare(a, b interface{}) string { - return CompareWithNames(a, b, "a", "b") -} - -// CompareWithNames compares two interfaces and emits a unified diff as string -func CompareWithNames(a, b interface{}, nameA, nameB string) string { - stringA := pretty.Sprintf("%# v", a) - stringB := pretty.Sprintf("%# v", b) - diff := difflib.UnifiedDiff{ - A: difflib.SplitLines(stringA), - B: difflib.SplitLines(stringB), - FromFile: nameA, - ToFile: nameB, - Context: 32, - } - - out, err := difflib.GetUnifiedDiffString(diff) - if err != nil { - return err.Error() - } - return "Unified diff:\n" + out -} - -// MapStringEquals returns true if both maps are equal. -func MapStringEquals(m1, m2 map[string]string) bool { - switch { - case m1 == nil && m2 == nil: - return true - case m1 == nil && m2 != nil, - m1 != nil && m2 == nil, - len(m1) != len(m2): - return false - } - for k1, v1 := range m1 { - if v2, ok := m2[k1]; !ok || v2 != v1 { - return false - } - } - return true -} - -// MapBoolEquals returns true if both maps are equal. -func MapBoolEquals(m1, m2 map[string]bool) bool { - switch { - case m1 == nil && m2 == nil: - return true - case m1 == nil && m2 != nil, - m1 != nil && m2 == nil, - len(m1) != len(m2): - return false - } - for k1, v1 := range m1 { - if v2, ok := m2[k1]; !ok || v2 != v1 { - return false - } - } - return true -} - // MapStringEqualsIgnoreKeys returns true if both maps have the same values for // the keys that are not present in the 'ignoreKeys'. func MapStringEqualsIgnoreKeys(m1, m2 map[string]string, ignoreKeys []string) bool { @@ -102,8 +37,5 @@ func MapStringEqualsIgnoreKeys(m1, m2 map[string]string, ignoreKeys []string) bo ignoredM2++ } } - if len(m1)-ignoredM1 != len(m2)-ignoredM2 { - return false - } - return true + return len(m1)-ignoredM1 == len(m2)-ignoredM2 } diff --git a/vendor/github.com/cilium/cilium/pkg/components/components.go b/vendor/github.com/cilium/cilium/pkg/components/components.go deleted file mode 100644 index 38f483a07e..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/components/components.go +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package components - -import ( - "os" - "strings" -) - -const ( - // CiliumAgentName is the name of cilium-agent (daemon) process name. - CiliumAgentName = "cilium-agent" - // CiliumOperatortName is the name of cilium-operator process name. - CiliumOperatortName = "cilium-operator" - // CiliumDaemonTestName is the name of test binary for daemon package. - CiliumDaemonTestName = "cmd.test" -) - -// IsCiliumAgent checks whether the current process is cilium-agent (daemon). -func IsCiliumAgent() bool { - binaryName := os.Args[0] - return strings.HasSuffix(binaryName, CiliumAgentName) || - strings.HasSuffix(binaryName, CiliumDaemonTestName) -} diff --git a/vendor/github.com/cilium/cilium/pkg/container/cache/cache.go b/vendor/github.com/cilium/cilium/pkg/container/cache/cache.go new file mode 100644 index 0000000000..dcca33247d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/container/cache/cache.go @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cache + +import ( + "sync" +) + +const ( + cacheSize = 512 + cacheMask = cacheSize - 1 +) + +func New[T any](hashfn func(T) uint64, skipfn func(x T) bool, eqfn func(a, b T) bool) *Cache[T] { + return &Cache[T]{ + hashfn: hashfn, + eqfn: eqfn, + skipfn: skipfn, + pool: sync.Pool{New: func() any { + var arr [cacheSize]T + return &arr + }}, + } +} + +// Cache is a simple fixed size cache for efficient deduplication of objects. +type Cache[T any] struct { + // pool of cache arrays. Pool is used here as it provides a very efficient + // shared access to a set of "cache arrays", and under low memory scenarios + // allows the Go runtime to drop the caches. + pool sync.Pool + + skipfn func(T) bool + hashfn func(T) uint64 + eqfn func(a, b T) bool +} + +// Get a cached object if any. If Get() was called previously with an object equal to [x] +// and it is found from the cache then it is returned, otherwise [x] is inserted into +// cache. +func (c *Cache[T]) Get(x T) T { + if c.skipfn != nil && c.skipfn(x) { + return x + } + x, _ = c.get(x) + return x +} + +func (c *Cache[T]) get(x T) (T, uint64) { + hash := c.hashfn(x) + arr := c.pool.Get().(*[cacheSize]T) + idx := hash & cacheMask + v := (*arr)[idx] + if !c.eqfn(x, v) { + (*arr)[idx] = x + v = x + } + c.pool.Put(arr) + return v, hash +} diff --git a/vendor/github.com/cilium/cilium/pkg/container/cache/caches.go b/vendor/github.com/cilium/cilium/pkg/container/cache/caches.go new file mode 100644 index 0000000000..f1d2d792fc --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/container/cache/caches.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cache + +import ( + "maps" + + "github.com/cespare/xxhash/v2" +) + +var ( + Strings = New( + xxhash.Sum64String, + func(s string) bool { + // Skip caching of long strings + return len(s) > 256 + }, + func(a, b string) bool { return a == b }, + ) + + StringMaps = New( + func(m map[string]string) (hash uint64) { + for k, v := range m { + _, hashk := Strings.get(k) + _, hashv := Strings.get(v) + hash = hash ^ hashk ^ hashv + } + return + }, + func(m map[string]string) bool { + // Skip caching of large maps + return len(m) > 32 + }, + maps.Equal, + ) +) diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go new file mode 100644 index 0000000000..33ca884264 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_cgroup.go @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "errors" + "fmt" + "sync" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/link" + "golang.org/x/sys/unix" +) + +// HaveAttachCgroup returns nil if the kernel is compiled with +// CONFIG_CGROUP_BPF. +// +// It's only an approximation and doesn't execute a successful cgroup attachment +// under the hood. If any unexpected errors are encountered, the original error +// is returned. +func HaveAttachCgroup() error { + attachCgroupOnce.Do(func() { + attachCgroupResult = haveAttachCgroup() + }) + + return attachCgroupResult +} + +func haveAttachCgroup() error { + // Load known-good program supported by the earliest kernels with cgroup + // support. + spec := &ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + AttachType: ebpf.AttachCGroupInetIngress, + Instructions: asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + } + + p, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err != nil { + return fmt.Errorf("create cgroup program: %w: %w", err, ebpf.ErrNotSupported) + } + defer p.Close() + + // Attaching to a non-cgroup node should result in EBADF when creating the + // link, compared to EINVAL if the kernel does not support or was compiled + // without CONFIG_CGROUP_BPF. + _, err = link.AttachCgroup(link.CgroupOptions{Path: "/dev/null", Program: p, Attach: spec.AttachType}) + if errors.Is(err, unix.EBADF) { + // The kernel checked the given file descriptor from within the cgroup prog + // attach handler. Assume it supports attaching cgroup progs. + return nil + } + if err != nil { + // Preserve the original error in the error string. Needs Go 1.20. + return fmt.Errorf("link cgroup program to /dev/null: %w: %w", err, ebpf.ErrNotSupported) + } + + return errors.New("attaching prog to /dev/null did not result in error") +} + +var attachCgroupOnce sync.Once +var attachCgroupResult error diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go new file mode 100644 index 0000000000..67ac02c8ab --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/attach_type.go @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "errors" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/features" + "golang.org/x/sys/unix" + + "github.com/cilium/cilium/pkg/lock" +) + +// HaveAttachType returns nil if the given program/attach type combination is +// supported by the underlying kernel. Returns ebpf.ErrNotSupported if loading a +// program with the given Program/AttachType fails. If the probe is inconclusive +// due to an unrecognized return code, the original error is returned. +// +// Note that program types that don't use attach types will silently succeed if +// an attach type is specified. +// +// Probe results are cached by the package and shouldn't be memoized by the +// caller. +func HaveAttachType(pt ebpf.ProgramType, at ebpf.AttachType) (err error) { + if err := features.HaveProgramType(pt); err != nil { + return err + } + + attachProbesMu.Lock() + defer attachProbesMu.Unlock() + if err, ok := attachProbes[attachProbe{pt, at}]; ok { + return err + } + + defer func() { + // Closes over named return variable err to cache any returned errors. + attachProbes[attachProbe{pt, at}] = err + }() + + spec := &ebpf.ProgramSpec{ + Type: pt, + AttachType: at, + Instructions: asm.Instructions{ + // recvmsg and peername require a return value of 1, use it for all probes. + asm.LoadImm(asm.R0, 1, asm.DWord), + asm.Return(), + }, + } + + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err == nil { + prog.Close() + } + + // EINVAL occurs when attempting to create a program with an unknown type. + // E2BIG occurs when ProgLoadAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given prog type. + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.E2BIG) { + err = ebpf.ErrNotSupported + } + if err != nil { + return err + } + + return nil +} + +type attachProbe struct { + pt ebpf.ProgramType + at ebpf.AttachType +} + +var attachProbesMu lock.Mutex +var attachProbes map[attachProbe]error = make(map[attachProbe]error) diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go new file mode 100644 index 0000000000..285c8851d5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package probes provides BPF features checks based on bpftool. +package probes diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go new file mode 100644 index 0000000000..c815eb729e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/kernel_hz.go @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "bufio" + "errors" + "fmt" + "io" + "math" + "os" + "time" +) + +// Available CONFIG_HZ values, sorted from highest to lowest. +var hzValues = []uint16{1000, 300, 250, 100} + +// KernelHZ attempts to estimate the kernel's CONFIG_HZ compile-time value by +// making snapshots of the kernel timestamp with a time interval in between. +// +// Blocks for at least 100ms while the measurement is in progress. Can block +// significantly longer under some hypervisors like VirtualBox due to buggy +// clocks, interrupt coalescing and low timer resolution. +func KernelHZ() (uint16, error) { + f, err := os.Open("/proc/schedstat") + if err != nil { + return 0, err + } + defer f.Close() + + // Measure the kernel timestamp at least 100ms apart, giving kernel timer and + // wall clock ample opportunity to advance for adequate sample size. + j1, err := readSchedstat(f) + if err != nil { + return 0, err + } + + // On some platforms, this can put the goroutine to sleep for significantly + // longer than 100ms. Do not rely on readings being anywhere near 100ms apart. + time.Sleep(time.Millisecond * 100) + + j2, err := readSchedstat(f) + if err != nil { + return 0, err + } + + hz, err := j1.interpolate(j2) + if err != nil { + return 0, fmt.Errorf("interpolating hz value: %w", err) + } + + return nearest(hz, hzValues) +} + +// Jiffies returns the kernel's internal timestamp in jiffies read from +// /proc/schedstat. +func Jiffies() (uint64, error) { + f, err := os.Open("/proc/schedstat") + if err != nil { + return 0, err + } + defer f.Close() + + k, err := readSchedstat(f) + if err != nil { + return 0, err + } + + return k.k, nil +} + +// readSchedstat expects to read /proc/schedstat and returns the first line +// matching 'timestamp %d'. Upon return, f is rewound to allow reuse. +// +// Should not be called concurrently. +func readSchedstat(f io.ReadSeeker) (ktime, error) { + // Rewind the file when done so the next call gets fresh data. + defer func() { _, _ = f.Seek(0, 0) }() + + var j uint64 + var t = time.Now() + + s := bufio.NewScanner(f) + for s.Scan() { + if _, err := fmt.Sscanf(s.Text(), "timestamp %d", &j); err == nil { + return ktime{j, t}, nil + } + } + + return ktime{}, errors.New("no kernel timestamp found") +} + +type ktime struct { + k uint64 + t time.Time +} + +// interpolate returns the amount of jiffies (ktime) that would have elapsed if +// both ktimes were measured exactly 1 second apart. Using linear interpolation, +// the delta between both kernel timestamps is adjusted based on the elapsed +// wall time between both measurements. +func (old ktime) interpolate(new ktime) (uint16, error) { + if old.t.After(new.t) { + return 0, fmt.Errorf("old wall time %v is more recent than %v", old.t, new.t) + } + if old.k > new.k { + return 0, fmt.Errorf("old kernel timer %d is higher than %d", old.k, new.k) + } + + // Jiffy and duration delta. + kd := new.k - old.k + td := new.t.Sub(old.t) + + // Linear interpolation to represent elapsed jiffies as a per-second value. + hz := float64(kd) / td.Seconds() + hz = math.Round(hz) + if hz > math.MaxUint16 { + return 0, fmt.Errorf("interpolated hz value would overflow uint16: %f", hz) + } + + return uint16(hz), nil +} + +// nearest returns the entry from values that's closest to in. If in has an +// equal distance to multiple values, the value that appears the earliest in +// values wins. Returns error if values is empty. +func nearest(in uint16, values []uint16) (uint16, error) { + if len(values) == 0 { + return 0, errors.New("values cannot be empty") + } + + var out uint16 + min := ^uint16(0) + for _, v := range values { + // Get absolute distance between in and v. + d := uint16(in - v) + if in < v { + d = v - in + } + + // Check if the distance to the current number is smaller than to the + // previous number. + if d < min { + min = d + out = v + } + } + + return out, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go new file mode 100644 index 0000000000..d80f27b97b --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/managed_neighbors.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "errors" + "fmt" + "net" + "sync" + + "github.com/vishvananda/netlink" + + "github.com/cilium/cilium/pkg/datapath/linux/safenetlink" + "github.com/cilium/cilium/pkg/netns" +) + +var ( + managedNeighborOnce sync.Once + managedNeighborResult error +) + +// HaveManagedNeighbors returns nil if the host supports managed neighbor entries (NTF_EXT_MANAGED). +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveManagedNeighbors() error { + managedNeighborOnce.Do(func() { + ns, err := netns.New() + if err != nil { + managedNeighborResult = fmt.Errorf("create netns: %w", err) + return + } + defer ns.Close() + + // In order to call haveManagedNeighbors safely, it has to be started + // in a standalone netns + managedNeighborResult = ns.Do(func() error { + return haveManagedNeighbors() + }) + + // if we encounter a different error than ErrNotSupported, terminate the agent. + if managedNeighborResult != nil && !errors.Is(managedNeighborResult, ErrNotSupported) { + log.WithError(managedNeighborResult).Fatal("failed to probe managed neighbor support") + } + }) + + return managedNeighborResult +} + +func haveManagedNeighbors() (outer error) { + // Use a veth device instead of a dummy to avoid the kernel having to modprobe + // the dummy kmod, which could potentially be compiled out. veth is currently + // a hard dependency for Cilium, so safe to assume the module is available if + // not already loaded. + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: "veth0"}, + PeerName: "veth1", + } + + if err := netlink.LinkAdd(veth); err != nil { + return fmt.Errorf("failed to add dummy veth: %w", err) + } + + neigh := netlink.Neigh{ + LinkIndex: veth.Index, + IP: net.IPv4(0, 0, 0, 1), + Flags: NTF_EXT_LEARNED, + FlagsExt: NTF_EXT_MANAGED, + } + + if err := netlink.NeighAdd(&neigh); err != nil { + return fmt.Errorf("failed to add neighbor: %w", err) + } + + nl, err := safenetlink.NeighList(veth.Index, 0) + if err != nil { + return fmt.Errorf("failed to list neighbors: %w", err) + } + + for _, n := range nl { + if !n.IP.Equal(neigh.IP) { + continue + } + if n.Flags != NTF_EXT_LEARNED { + continue + } + if n.FlagsExt != NTF_EXT_MANAGED { + continue + } + + return nil + } + + return ErrNotSupported +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go new file mode 100644 index 0000000000..dcf0950dcd --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes.go @@ -0,0 +1,853 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net" + "os" + "path/filepath" + "strings" + "sync" + "text/template" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/features" + "github.com/cilium/ebpf/link" + "github.com/gopacket/gopacket" + "github.com/gopacket/gopacket/layers" + "golang.org/x/sys/unix" + + "github.com/cilium/cilium/pkg/command/exec" + "github.com/cilium/cilium/pkg/defaults" + "github.com/cilium/cilium/pkg/logging" + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/netns" +) + +var ( + log = logging.DefaultLogger.WithField(logfields.LogSubsys, "probes") + once sync.Once + probeManager *ProbeManager + tpl = template.New("headerfile") +) + +func init() { + const content = ` +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright Authors of Cilium */ + +/* THIS FILE WAS GENERATED DURING AGENT STARTUP. */ + +#pragma once + +{{- if not .Common}} +#include "features.h" +{{- end}} + +{{- range $key, $value := .Features}} +{{- if $value}} +#define {{$key}} 1 +{{end}} +{{- end}} +` + var err error + tpl, err = tpl.Parse(content) + if err != nil { + log.WithError(err).Fatal("could not parse headerfile template") + } +} + +// ErrNotSupported indicates that a feature is not supported by the current kernel. +var ErrNotSupported = errors.New("not supported") + +// KernelParam is a type based on string which represents CONFIG_* kernel +// parameters which usually have values "y", "n" or "m". +type KernelParam string + +// Enabled checks whether the kernel parameter is enabled. +func (kp KernelParam) Enabled() bool { + return kp == "y" +} + +// Module checks whether the kernel parameter is enabled as a module. +func (kp KernelParam) Module() bool { + return kp == "m" +} + +// kernelOption holds information about kernel parameters to probe. +type kernelOption struct { + Description string + Enabled bool + CanBeModule bool +} + +type ProgramHelper struct { + Program ebpf.ProgramType + Helper asm.BuiltinFunc +} + +type miscFeatures struct { + HaveFibIfindex bool +} + +type FeatureProbes struct { + ProgramHelpers map[ProgramHelper]bool + Misc miscFeatures +} + +// SystemConfig contains kernel configuration and sysctl parameters related to +// BPF functionality. +type SystemConfig struct { + UnprivilegedBpfDisabled int `json:"unprivileged_bpf_disabled"` + BpfJitEnable int `json:"bpf_jit_enable"` + BpfJitHarden int `json:"bpf_jit_harden"` + BpfJitKallsyms int `json:"bpf_jit_kallsyms"` + BpfJitLimit int `json:"bpf_jit_limit"` + ConfigBpf KernelParam `json:"CONFIG_BPF"` + ConfigBpfSyscall KernelParam `json:"CONFIG_BPF_SYSCALL"` + ConfigHaveEbpfJit KernelParam `json:"CONFIG_HAVE_EBPF_JIT"` + ConfigBpfJit KernelParam `json:"CONFIG_BPF_JIT"` + ConfigBpfJitAlwaysOn KernelParam `json:"CONFIG_BPF_JIT_ALWAYS_ON"` + ConfigCgroups KernelParam `json:"CONFIG_CGROUPS"` + ConfigCgroupBpf KernelParam `json:"CONFIG_CGROUP_BPF"` + ConfigCgroupNetClassID KernelParam `json:"CONFIG_CGROUP_NET_CLASSID"` + ConfigSockCgroupData KernelParam `json:"CONFIG_SOCK_CGROUP_DATA"` + ConfigBpfEvents KernelParam `json:"CONFIG_BPF_EVENTS"` + ConfigKprobeEvents KernelParam `json:"CONFIG_KPROBE_EVENTS"` + ConfigUprobeEvents KernelParam `json:"CONFIG_UPROBE_EVENTS"` + ConfigTracing KernelParam `json:"CONFIG_TRACING"` + ConfigFtraceSyscalls KernelParam `json:"CONFIG_FTRACE_SYSCALLS"` + ConfigFunctionErrorInjection KernelParam `json:"CONFIG_FUNCTION_ERROR_INJECTION"` + ConfigBpfKprobeOverride KernelParam `json:"CONFIG_BPF_KPROBE_OVERRIDE"` + ConfigNet KernelParam `json:"CONFIG_NET"` + ConfigXdpSockets KernelParam `json:"CONFIG_XDP_SOCKETS"` + ConfigLwtunnelBpf KernelParam `json:"CONFIG_LWTUNNEL_BPF"` + ConfigNetActBpf KernelParam `json:"CONFIG_NET_ACT_BPF"` + ConfigNetClsBpf KernelParam `json:"CONFIG_NET_CLS_BPF"` + ConfigNetClsAct KernelParam `json:"CONFIG_NET_CLS_ACT"` + ConfigNetSchIngress KernelParam `json:"CONFIG_NET_SCH_INGRESS"` + ConfigXfrm KernelParam `json:"CONFIG_XFRM"` + ConfigIPRouteClassID KernelParam `json:"CONFIG_IP_ROUTE_CLASSID"` + ConfigIPv6Seg6Bpf KernelParam `json:"CONFIG_IPV6_SEG6_BPF"` + ConfigBpfLircMode2 KernelParam `json:"CONFIG_BPF_LIRC_MODE2"` + ConfigBpfStreamParser KernelParam `json:"CONFIG_BPF_STREAM_PARSER"` + ConfigNetfilterXtMatchBpf KernelParam `json:"CONFIG_NETFILTER_XT_MATCH_BPF"` + ConfigBpfilter KernelParam `json:"CONFIG_BPFILTER"` + ConfigBpfilterUmh KernelParam `json:"CONFIG_BPFILTER_UMH"` + ConfigTestBpf KernelParam `json:"CONFIG_TEST_BPF"` + ConfigKernelHz KernelParam `json:"CONFIG_HZ"` +} + +// MapTypes contains bools indicating which types of BPF maps the currently +// running kernel supports. +type MapTypes struct { + HaveHashMapType bool `json:"have_hash_map_type"` + HaveArrayMapType bool `json:"have_array_map_type"` + HaveProgArrayMapType bool `json:"have_prog_array_map_type"` + HavePerfEventArrayMapType bool `json:"have_perf_event_array_map_type"` + HavePercpuHashMapType bool `json:"have_percpu_hash_map_type"` + HavePercpuArrayMapType bool `json:"have_percpu_array_map_type"` + HaveStackTraceMapType bool `json:"have_stack_trace_map_type"` + HaveCgroupArrayMapType bool `json:"have_cgroup_array_map_type"` + HaveLruHashMapType bool `json:"have_lru_hash_map_type"` + HaveLruPercpuHashMapType bool `json:"have_lru_percpu_hash_map_type"` + HaveLpmTrieMapType bool `json:"have_lpm_trie_map_type"` + HaveArrayOfMapsMapType bool `json:"have_array_of_maps_map_type"` + HaveHashOfMapsMapType bool `json:"have_hash_of_maps_map_type"` + HaveDevmapMapType bool `json:"have_devmap_map_type"` + HaveSockmapMapType bool `json:"have_sockmap_map_type"` + HaveCpumapMapType bool `json:"have_cpumap_map_type"` + HaveXskmapMapType bool `json:"have_xskmap_map_type"` + HaveSockhashMapType bool `json:"have_sockhash_map_type"` + HaveCgroupStorageMapType bool `json:"have_cgroup_storage_map_type"` + HaveReuseportSockarrayMapType bool `json:"have_reuseport_sockarray_map_type"` + HavePercpuCgroupStorageMapType bool `json:"have_percpu_cgroup_storage_map_type"` + HaveQueueMapType bool `json:"have_queue_map_type"` + HaveStackMapType bool `json:"have_stack_map_type"` +} + +// Features contains BPF feature checks returned by bpftool. +type Features struct { + SystemConfig `json:"system_config"` + MapTypes `json:"map_types"` +} + +// ProbeManager is a manager of BPF feature checks. +type ProbeManager struct { + features Features +} + +// NewProbeManager returns a new instance of ProbeManager - a manager of BPF +// feature checks. +func NewProbeManager() *ProbeManager { + newProbeManager := func() { + probeManager = &ProbeManager{} + probeManager.features = probeManager.Probe() + } + once.Do(newProbeManager) + return probeManager +} + +// Probe probes the underlying kernel for features. +func (*ProbeManager) Probe() Features { + var features Features + out, err := exec.WithTimeout( + defaults.ExecTimeout, + "bpftool", "-j", "feature", "probe", + ).CombinedOutput(log, true) + if err != nil { + log.WithError(err).Fatal("could not run bpftool") + } + if err := json.Unmarshal(out, &features); err != nil { + log.WithError(err).Fatal("could not parse bpftool output") + } + return features +} + +// SystemConfigProbes performs a check of kernel configuration parameters. It +// returns an error when parameters required by Cilium are not enabled. It logs +// warnings when optional parameters are not enabled. +// +// When kernel config file is not found, bpftool can't probe kernel configuration +// parameter real setting, so only return error log when kernel config file exists +// and kernel configuration parameter setting is disabled +func (p *ProbeManager) SystemConfigProbes() error { + var notFound bool + if !p.KernelConfigAvailable() { + notFound = true + log.Info("Kernel config file not found: if the agent fails to start, check the system requirements at https://docs.cilium.io/en/stable/operations/system_requirements") + } + requiredParams := p.GetRequiredConfig() + for param, kernelOption := range requiredParams { + if !kernelOption.Enabled && !notFound { + module := "" + if kernelOption.CanBeModule { + module = " or module" + } + return fmt.Errorf("%s kernel parameter%s is required (needed for: %s)", param, module, kernelOption.Description) + } + } + optionalParams := p.GetOptionalConfig() + for param, kernelOption := range optionalParams { + if !kernelOption.Enabled && !notFound { + module := "" + if kernelOption.CanBeModule { + module = " or module" + } + log.Warningf("%s optional kernel parameter%s is not in kernel (needed for: %s)", param, module, kernelOption.Description) + } + } + return nil +} + +// GetRequiredConfig performs a check of mandatory kernel configuration options. It +// returns a map indicating which required kernel parameters are enabled - and which are not. +// GetRequiredConfig is being used by CLI "cilium kernel-check". +func (p *ProbeManager) GetRequiredConfig() map[KernelParam]kernelOption { + config := p.features.SystemConfig + coreInfraDescription := "Essential eBPF infrastructure" + kernelParams := make(map[KernelParam]kernelOption) + + kernelParams["CONFIG_BPF"] = kernelOption{ + Enabled: config.ConfigBpf.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_BPF_SYSCALL"] = kernelOption{ + Enabled: config.ConfigBpfSyscall.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_NET_SCH_INGRESS"] = kernelOption{ + Enabled: config.ConfigNetSchIngress.Enabled() || config.ConfigNetSchIngress.Module(), + Description: coreInfraDescription, + CanBeModule: true, + } + kernelParams["CONFIG_NET_CLS_BPF"] = kernelOption{ + Enabled: config.ConfigNetClsBpf.Enabled() || config.ConfigNetClsBpf.Module(), + Description: coreInfraDescription, + CanBeModule: true, + } + kernelParams["CONFIG_NET_CLS_ACT"] = kernelOption{ + Enabled: config.ConfigNetClsAct.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_BPF_JIT"] = kernelOption{ + Enabled: config.ConfigBpfJit.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + kernelParams["CONFIG_HAVE_EBPF_JIT"] = kernelOption{ + Enabled: config.ConfigHaveEbpfJit.Enabled(), + Description: coreInfraDescription, + CanBeModule: false, + } + + return kernelParams +} + +// GetOptionalConfig performs a check of *optional* kernel configuration options. It +// returns a map indicating which optional/non-mandatory kernel parameters are enabled. +// GetOptionalConfig is being used by CLI "cilium kernel-check". +func (p *ProbeManager) GetOptionalConfig() map[KernelParam]kernelOption { + config := p.features.SystemConfig + kernelParams := make(map[KernelParam]kernelOption) + + kernelParams["CONFIG_CGROUP_BPF"] = kernelOption{ + Enabled: config.ConfigCgroupBpf.Enabled(), + Description: "Host Reachable Services and Sockmap optimization", + CanBeModule: false, + } + kernelParams["CONFIG_LWTUNNEL_BPF"] = kernelOption{ + Enabled: config.ConfigLwtunnelBpf.Enabled(), + Description: "Lightweight Tunnel hook for IP-in-IP encapsulation", + CanBeModule: false, + } + kernelParams["CONFIG_BPF_EVENTS"] = kernelOption{ + Enabled: config.ConfigBpfEvents.Enabled(), + Description: "Visibility and congestion management with datapath", + CanBeModule: false, + } + + return kernelParams +} + +// KernelConfigAvailable checks if the Kernel Config is available on the +// system or not. +func (p *ProbeManager) KernelConfigAvailable() bool { + // Check Kernel Config is available or not. + // We are replicating BPFTools logic here to check if kernel config is available + // https://elixir.bootlin.com/linux/v5.7/source/tools/bpf/bpftool/feature.c#L390 + info := unix.Utsname{} + err := unix.Uname(&info) + if err != nil { + return false + } + release := strings.TrimSpace(string(bytes.Trim(info.Release[:], "\x00"))) + + // Any error checking these files will return Kernel config not found error + if _, err := os.Stat(fmt.Sprintf("/boot/config-%s", release)); err != nil { + if _, err = os.Stat("/proc/config.gz"); err != nil { + return false + } + } + + return true +} + +// HaveProgramHelper is a wrapper around features.HaveProgramHelper() to +// check if a certain BPF program/helper copmbination is supported by the kernel. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + err := features.HaveProgramHelper(pt, helper) + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).WithField("programtype", pt).WithField("helper", helper).Fatal("failed to probe helper") + } + return nil +} + +// HaveLargeInstructionLimit is a wrapper around features.HaveLargeInstructions() +// to check if the kernel supports the 1 Million instruction limit. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveLargeInstructionLimit() error { + err := features.HaveLargeInstructions() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe large instruction limit") + } + return nil +} + +// HaveBoundedLoops is a wrapper around features.HaveBoundedLoops() +// to check if the kernel supports bounded loops in BPF programs. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveBoundedLoops() error { + err := features.HaveBoundedLoops() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe bounded loops") + } + return nil +} + +// HaveFibIfindex checks if kernel has d1c362e1dd68 ("bpf: Always return target +// ifindex in bpf_fib_lookup") which is 5.10+. This got merged in the same kernel +// as the new redirect helpers. +func HaveFibIfindex() error { + return features.HaveProgramHelper(ebpf.SchedCLS, asm.FnRedirectPeer) +} + +// HaveWriteableQueueMapping checks if kernel has 74e31ca850c1 ("bpf: add +// skb->queue_mapping write access from tc clsact") which is 5.1+. This got merged +// in the same kernel as the bpf_skb_ecn_set_ce() helper. +func HaveWriteableQueueMapping() error { + return features.HaveProgramHelper(ebpf.SchedCLS, asm.FnSkbEcnSetCe) +} + +// HaveV2ISA is a wrapper around features.HaveV2ISA() to check if the kernel +// supports the V2 ISA. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveV2ISA() error { + err := features.HaveV2ISA() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe V2 ISA") + } + return nil +} + +// HaveV3ISA is a wrapper around features.HaveV3ISA() to check if the kernel +// supports the V3 ISA. +// On unexpected probe results this function will terminate with log.Fatal(). +func HaveV3ISA() error { + err := features.HaveV3ISA() + if errors.Is(err, ebpf.ErrNotSupported) { + return err + } + if err != nil { + log.WithError(err).Fatal("failed to probe V3 ISA") + } + return nil +} + +// HaveTCX returns nil if the running kernel supports attaching bpf programs to +// tcx hooks. +var HaveTCX = sync.OnceValue(func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + License: "Apache-2.0", + }) + if err != nil { + return err + } + defer prog.Close() + + ns, err := netns.New() + if err != nil { + return fmt.Errorf("create netns: %w", err) + } + defer ns.Close() + + // link.AttachTCX already performs its own feature detection and returns + // ebpf.ErrNotSupported if the host kernel doesn't have tcx. + return ns.Do(func() error { + l, err := link.AttachTCX(link.TCXOptions{ + Program: prog, + Attach: ebpf.AttachTCXIngress, + Interface: 1, // lo + Anchor: link.Tail(), + }) + if err != nil { + return fmt.Errorf("creating link: %w", err) + } + if err := l.Close(); err != nil { + return fmt.Errorf("closing link: %w", err) + } + + return nil + }) +}) + +// HaveNetkit returns nil if the running kernel supports attaching bpf programs +// to netkit devices. +var HaveNetkit = sync.OnceValue(func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + License: "Apache-2.0", + }) + if err != nil { + return err + } + defer prog.Close() + + ns, err := netns.New() + if err != nil { + return fmt.Errorf("create netns: %w", err) + } + defer ns.Close() + + return ns.Do(func() error { + l, err := link.AttachNetkit(link.NetkitOptions{ + Program: prog, + Attach: ebpf.AttachNetkitPrimary, + Interface: math.MaxInt, + }) + // We rely on this being checked during the syscall. With + // an otherwise correct payload we expect ENODEV here as + // an indication that the feature is present. + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return fmt.Errorf("creating link: %w", err) + } + if err := l.Close(); err != nil { + return fmt.Errorf("closing link: %w", err) + } + + return fmt.Errorf("unexpected success: %w", err) + }) +}) + +// HaveOuterSourceIPSupport tests whether the kernel support setting the outer +// source IP address via the bpf_skb_set_tunnel_key BPF helper. We can't rely +// on the verifier to reject a program using the new support because the +// verifier just accepts any argument size for that helper; non-supported +// fields will simply not be used. Instead, we set the outer source IP and +// retrieve it with bpf_skb_get_tunnel_key right after. If the retrieved value +// equals the value set, we have a confirmation the kernel supports it. +func HaveOuterSourceIPSupport() (err error) { + defer func() { + if err != nil && !errors.Is(err, ebpf.ErrNotSupported) { + log.WithError(err).Fatal("failed to probe for outer source IP support") + } + }() + + progSpec := &ebpf.ProgramSpec{ + Name: "set_tunnel_key_probe", + Type: ebpf.SchedACT, + License: "GPL", + } + progSpec.Instructions = asm.Instructions{ + asm.Mov.Reg(asm.R8, asm.R1), + + asm.Mov.Imm(asm.R2, 0), + asm.StoreMem(asm.RFP, -8, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -16, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -24, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -32, asm.R2, asm.DWord), + asm.StoreMem(asm.RFP, -40, asm.R2, asm.DWord), + asm.Mov.Imm(asm.R2, 42), + asm.StoreMem(asm.RFP, -44, asm.R2, asm.Word), + asm.Mov.Reg(asm.R2, asm.RFP), + asm.Add.Imm(asm.R2, -44), + asm.Mov.Imm(asm.R3, 44), // sizeof(struct bpf_tunnel_key) when setting the outer source IP is supported. + asm.Mov.Imm(asm.R4, 0), + asm.FnSkbSetTunnelKey.Call(), + + asm.Mov.Reg(asm.R1, asm.R8), + asm.Mov.Reg(asm.R2, asm.RFP), + asm.Add.Imm(asm.R2, -44), + asm.Mov.Imm(asm.R3, 44), + asm.Mov.Imm(asm.R4, 0), + asm.FnSkbGetTunnelKey.Call(), + + asm.LoadMem(asm.R0, asm.RFP, -44, asm.Word), + asm.Return(), + } + prog, err := ebpf.NewProgram(progSpec) + if err != nil { + return err + } + defer prog.Close() + + pkt := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + ret, _, err := prog.Test(pkt) + if err != nil { + return err + } + if ret != 42 { + return ebpf.ErrNotSupported + } + return nil +} + +// HaveSKBAdjustRoomL2RoomMACSupport tests whether the kernel supports the `bpf_skb_adjust_room` helper +// with the `BPF_ADJ_ROOM_MAC` mode. To do so, we create a program that requests the passed in SKB +// to be expanded by 20 bytes. The helper checks the `mode` argument and will return -ENOSUPP if +// the mode is unknown. Otherwise it should resize the SKB by 20 bytes and return 0. +func HaveSKBAdjustRoomL2RoomMACSupport() (err error) { + defer func() { + if err != nil && !errors.Is(err, ebpf.ErrNotSupported) { + log.WithError(err).Fatal("failed to probe for bpf_skb_adjust_room L2 room MAC support") + } + }() + + progSpec := &ebpf.ProgramSpec{ + Name: "adjust_mac_room", + Type: ebpf.SchedCLS, + License: "GPL", + } + progSpec.Instructions = asm.Instructions{ + asm.Mov.Imm(asm.R2, 20), // len_diff + asm.Mov.Imm(asm.R3, 1), // mode: BPF_ADJ_ROOM_MAC + asm.Mov.Imm(asm.R4, 0), // flags: 0 + asm.FnSkbAdjustRoom.Call(), + asm.Return(), + } + prog, err := ebpf.NewProgram(progSpec) + if err != nil { + return err + } + defer prog.Close() + + // This is a Eth + IPv4 + UDP + data packet. The helper relies on a valid packet being passed in + // since it wants to know offsets of the different layers. + buf := gopacket.NewSerializeBuffer() + err = gopacket.SerializeLayers(buf, gopacket.SerializeOptions{}, + &layers.Ethernet{ + DstMAC: net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + SrcMAC: net.HardwareAddr{0x0e, 0xf5, 0x16, 0x3d, 0x6b, 0xab}, + EthernetType: layers.EthernetTypeIPv4, + }, + &layers.IPv4{ + Version: 4, + IHL: 5, + Length: 49, + Id: 0xCECB, + TTL: 64, + Protocol: layers.IPProtocolUDP, + SrcIP: net.IPv4(0xc0, 0xa8, 0xb2, 0x56), + DstIP: net.IPv4(0xc0, 0xa8, 0xb2, 0xff), + }, + &layers.UDP{ + SrcPort: 23939, + DstPort: 32412, + }, + gopacket.Payload("M-SEARCH * HTTP/1.1\x0d\x0a"), + ) + if err != nil { + return fmt.Errorf("craft packet: %w", err) + } + + ret, _, err := prog.Test(buf.Bytes()) + if err != nil { + return err + } + if ret != 0 { + return ebpf.ErrNotSupported + } + return nil +} + +// HaveDeadCodeElim tests whether the kernel supports dead code elimination. +func HaveDeadCodeElim() error { + spec := ebpf.ProgramSpec{ + Name: "test", + Type: ebpf.XDP, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R1, 0), + asm.JEq.Imm(asm.R1, 1, "else"), + asm.Mov.Imm(asm.R0, 2), + asm.Ja.Label("end"), + asm.Mov.Imm(asm.R0, 3).WithSymbol("else"), + asm.Return().WithSymbol("end"), + }, + } + + prog, err := ebpf.NewProgram(&spec) + if err != nil { + return fmt.Errorf("loading program: %w", err) + } + + info, err := prog.Info() + if err != nil { + return fmt.Errorf("get prog info: %w", err) + } + infoInst, err := info.Instructions() + if err != nil { + return fmt.Errorf("get instructions: %w", err) + } + + for _, inst := range infoInst { + if inst.OpCode.Class().IsJump() && inst.OpCode.JumpOp() != asm.Exit { + return fmt.Errorf("Jump instruction found in the final program, no dead code elimination performed") + } + } + + return nil +} + +// HaveIPv6Support tests whether kernel can open an IPv6 socket. This will +// also implicitly auto-load IPv6 kernel module if available and not yet +// loaded. +func HaveIPv6Support() error { + fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_STREAM, 0) + if errors.Is(err, unix.EAFNOSUPPORT) || errors.Is(err, unix.EPROTONOSUPPORT) { + return ErrNotSupported + } + unix.Close(fd) + return nil +} + +// CreateHeaderFiles creates C header files with macros indicating which BPF +// features are available in the kernel. +func CreateHeaderFiles(headerDir string, probes *FeatureProbes) error { + common, err := os.Create(filepath.Join(headerDir, "features.h")) + if err != nil { + return fmt.Errorf("could not create common features header file: %w", err) + } + defer common.Close() + if err := writeCommonHeader(common, probes); err != nil { + return fmt.Errorf("could not write common features header file: %w", err) + } + + skb, err := os.Create(filepath.Join(headerDir, "features_skb.h")) + if err != nil { + return fmt.Errorf("could not create skb related features header file: %w", err) + } + defer skb.Close() + if err := writeSkbHeader(skb, probes); err != nil { + return fmt.Errorf("could not write skb related features header file: %w", err) + } + + xdp, err := os.Create(filepath.Join(headerDir, "features_xdp.h")) + if err != nil { + return fmt.Errorf("could not create xdp related features header file: %w", err) + } + defer xdp.Close() + if err := writeXdpHeader(xdp, probes); err != nil { + return fmt.Errorf("could not write xdp related features header file: %w", err) + } + + return nil +} + +// ExecuteHeaderProbes probes the kernel for a specific set of BPF features +// which are currently used to generate various feature macros for the datapath. +// The probe results returned in FeatureProbes are then used in the respective +// function that writes the actual C macro definitions. +// Further needed probes should be added here, while new macro strings need to +// be added in the correct `write*Header()` function. +func ExecuteHeaderProbes() *FeatureProbes { + probes := FeatureProbes{ + ProgramHelpers: make(map[ProgramHelper]bool), + Misc: miscFeatures{}, + } + + progHelpers := []ProgramHelper{ + // common probes + {ebpf.CGroupSock, asm.FnGetNetnsCookie}, + {ebpf.CGroupSockAddr, asm.FnGetNetnsCookie}, + {ebpf.CGroupSockAddr, asm.FnGetSocketCookie}, + {ebpf.CGroupSock, asm.FnJiffies64}, + {ebpf.CGroupSockAddr, asm.FnJiffies64}, + {ebpf.SchedCLS, asm.FnJiffies64}, + {ebpf.XDP, asm.FnJiffies64}, + {ebpf.CGroupSockAddr, asm.FnGetCurrentCgroupId}, + {ebpf.CGroupSock, asm.FnSetRetval}, + {ebpf.SchedCLS, asm.FnRedirectNeigh}, + {ebpf.SchedCLS, asm.FnRedirectPeer}, + + // skb related probes + {ebpf.SchedCLS, asm.FnSkbChangeTail}, + {ebpf.SchedCLS, asm.FnCsumLevel}, + + // xdp related probes + {ebpf.XDP, asm.FnXdpGetBuffLen}, + {ebpf.XDP, asm.FnXdpLoadBytes}, + {ebpf.XDP, asm.FnXdpStoreBytes}, + } + for _, ph := range progHelpers { + probes.ProgramHelpers[ph] = (HaveProgramHelper(ph.Program, ph.Helper) == nil) + } + + probes.Misc.HaveFibIfindex = (HaveFibIfindex() == nil) + + return &probes +} + +// writeCommonHeader defines macross for bpf/include/bpf/features.h +func writeCommonHeader(writer io.Writer, probes *FeatureProbes) error { + features := map[string]bool{ + "HAVE_NETNS_COOKIE": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnGetNetnsCookie}] && + probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetNetnsCookie}], + "HAVE_SOCKET_COOKIE": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetSocketCookie}], + "HAVE_JIFFIES": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnJiffies64}] && + probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnJiffies64}] && + probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnJiffies64}] && + probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnJiffies64}], + "HAVE_CGROUP_ID": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSockAddr, asm.FnGetCurrentCgroupId}], + "HAVE_SET_RETVAL": probes.ProgramHelpers[ProgramHelper{ebpf.CGroupSock, asm.FnSetRetval}], + "HAVE_FIB_NEIGH": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnRedirectNeigh}], + "HAVE_FIB_IFINDEX": probes.Misc.HaveFibIfindex, + } + + return writeFeatureHeader(writer, features, true) +} + +// writeSkbHeader defines macros for bpf/include/bpf/features_skb.h +func writeSkbHeader(writer io.Writer, probes *FeatureProbes) error { + featuresSkb := map[string]bool{ + "HAVE_CSUM_LEVEL": probes.ProgramHelpers[ProgramHelper{ebpf.SchedCLS, asm.FnCsumLevel}], + } + + return writeFeatureHeader(writer, featuresSkb, false) +} + +// writeXdpHeader defines macros for bpf/include/bpf/features_xdp.h +func writeXdpHeader(writer io.Writer, probes *FeatureProbes) error { + featuresXdp := map[string]bool{ + "HAVE_XDP_GET_BUFF_LEN": probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnXdpGetBuffLen}], + "HAVE_XDP_LOAD_BYTES": probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnXdpLoadBytes}], + "HAVE_XDP_STORE_BYTES": probes.ProgramHelpers[ProgramHelper{ebpf.XDP, asm.FnXdpStoreBytes}], + } + + return writeFeatureHeader(writer, featuresXdp, false) +} + +func writeFeatureHeader(writer io.Writer, features map[string]bool, common bool) error { + input := struct { + Common bool + Features map[string]bool + }{ + Common: common, + Features: features, + } + + if err := tpl.Execute(writer, input); err != nil { + return fmt.Errorf("could not write template: %w", err) + } + + return nil +} + +// HaveBatchAPI checks if kernel supports batched bpf map lookup API. +func HaveBatchAPI() error { + spec := ebpf.MapSpec{ + Type: ebpf.LRUHash, + KeySize: 1, + ValueSize: 1, + MaxEntries: 2, + } + m, err := ebpf.NewMapWithOptions(&spec, ebpf.MapOptions{}) + if err != nil { + return ErrNotSupported + } + defer m.Close() + var cursor ebpf.MapBatchCursor + _, err = m.BatchLookup(&cursor, []byte{0}, []byte{0}, nil) // only do one batched lookup + if err != nil { + if errors.Is(err, ebpf.ErrNotSupported) { + return ErrNotSupported + } + return nil + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go new file mode 100644 index 0000000000..846e9c28e0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_linux.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package probes + +import "github.com/vishvananda/netlink" + +// Family type definitions +const ( + NTF_EXT_LEARNED = netlink.NTF_EXT_LEARNED + NTF_EXT_MANAGED = netlink.NTF_EXT_MANAGED +) diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go new file mode 100644 index 0000000000..f92efd4990 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/probes/probes_unspecified.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !linux + +package probes + +// Dummy values on non-linux platform +const ( + NTF_EXT_LEARNED = iota + NTF_EXT_MANAGED +) diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_linux.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_linux.go new file mode 100644 index 0000000000..00afeadabb --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_linux.go @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package safenetlink + +import ( + "context" + "errors" + "net" + + "github.com/vishvananda/netlink" + "github.com/vishvananda/netlink/nl" + + "github.com/cilium/cilium/pkg/resiliency" + "github.com/cilium/cilium/pkg/time" +) + +const ( + netlinkRetryInterval = 1 * time.Millisecond + netlinkRetryMax = 30 +) + +// WithRetry runs the netlinkFunc. If netlinkFunc returns netlink.ErrDumpInterrupted, the function is retried. +// If success or any other error is returned, WithRetry returns immediately, propagating the error. +func WithRetry(netlinkFunc func() error) error { + return resiliency.Retry(context.Background(), netlinkRetryInterval, netlinkRetryMax, func(ctx context.Context, retries int) (bool, error) { + err := netlinkFunc() + if errors.Is(err, netlink.ErrDumpInterrupted) { + return false, nil // retry + } + + return true, err + }) +} + +// WithRetryResult works like WithRetry, but allows netlinkFunc to have a return value besides the error +func WithRetryResult[T any](netlinkFunc func() (T, error)) (out T, err error) { + err = WithRetry(func() error { + out, err = netlinkFunc() + return err + }) + return out, err +} + +// AddrList wraps netlink.AddrList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func AddrList(link netlink.Link, family int) ([]netlink.Addr, error) { + return WithRetryResult(func() ([]netlink.Addr, error) { + return netlink.AddrList(link, family) + }) +} + +// BridgeVlanList wraps netlink.BridgeVlanList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { + return WithRetryResult(func() (map[int32][]*nl.BridgeVlanInfo, error) { + return netlink.BridgeVlanList() + }) +} + +// ChainList wraps netlink.ChainList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func ChainList(link netlink.Link, parent uint32) ([]netlink.Chain, error) { + return WithRetryResult(func() ([]netlink.Chain, error) { + return netlink.ChainList(link, parent) + }) +} + +// ClassList wraps netlink.ClassList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func ClassList(link netlink.Link, parent uint32) ([]netlink.Class, error) { + return WithRetryResult(func() ([]netlink.Class, error) { + return netlink.ClassList(link, parent) + }) +} + +// ConntrackTableList wraps netlink.ConntrackTableList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func ConntrackTableList(table netlink.ConntrackTableType, family netlink.InetFamily) ([]*netlink.ConntrackFlow, error) { + return WithRetryResult(func() ([]*netlink.ConntrackFlow, error) { + return netlink.ConntrackTableList(table, family) + }) +} + +// DevLinkGetDeviceList wraps netlink.DevLinkGetDeviceList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func DevLinkGetDeviceList() ([]*netlink.DevlinkDevice, error) { + return WithRetryResult(func() ([]*netlink.DevlinkDevice, error) { + return netlink.DevLinkGetDeviceList() + }) +} + +// DevLinkGetAllPortList wraps netlink.DevLinkGetAllPortList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func DevLinkGetAllPortList() ([]*netlink.DevlinkPort, error) { + return WithRetryResult(func() ([]*netlink.DevlinkPort, error) { + return netlink.DevLinkGetAllPortList() + }) +} + +// DevlinkGetDeviceParams wraps netlink.DevlinkGetDeviceParams, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func DevlinkGetDeviceParams(bus string, device string) ([]*netlink.DevlinkParam, error) { + return WithRetryResult(func() ([]*netlink.DevlinkParam, error) { + return netlink.DevlinkGetDeviceParams(bus, device) + }) +} + +// FilterList wraps netlink.FilterList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func FilterList(link netlink.Link, parent uint32) ([]netlink.Filter, error) { + return WithRetryResult(func() ([]netlink.Filter, error) { + return netlink.FilterList(link, parent) + }) +} + +// FouList wraps netlink.FouList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func FouList(fam int) ([]netlink.Fou, error) { + return WithRetryResult(func() ([]netlink.Fou, error) { + return netlink.FouList(fam) + }) +} + +// GenlFamilyList wraps netlink.GenlFamilyList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func GenlFamilyList() ([]*netlink.GenlFamily, error) { + return WithRetryResult(func() ([]*netlink.GenlFamily, error) { + return netlink.GenlFamilyList() + }) +} + +// GTPPDPList wraps netlink.GTPPDPList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func GTPPDPList() ([]*netlink.PDP, error) { + return WithRetryResult(func() ([]*netlink.PDP, error) { + return netlink.GTPPDPList() + }) +} + +// LinkByName wraps netlink.LinkByName, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func LinkByName(name string) (netlink.Link, error) { + return WithRetryResult(func() (netlink.Link, error) { + return netlink.LinkByName(name) + }) +} + +// LinkByAlias wraps netlink.LinkByAlias, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func LinkByAlias(alias string) (netlink.Link, error) { + return WithRetryResult(func() (netlink.Link, error) { + return netlink.LinkByAlias(alias) + }) +} + +// LinkList wraps netlink.LinkList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func LinkList() ([]netlink.Link, error) { + return WithRetryResult(func() ([]netlink.Link, error) { + return netlink.LinkList() + }) +} + +// LinkSubscribeWithOptions wraps netlink.LinkSubscribeWithOptions, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func LinkSubscribeWithOptions(ch chan<- netlink.LinkUpdate, done <-chan struct{}, options netlink.LinkSubscribeOptions) error { + return WithRetry(func() error { + return netlink.LinkSubscribeWithOptions(ch, done, options) + }) +} + +// NeighList wraps netlink.NeighList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func NeighList(linkIndex, family int) ([]netlink.Neigh, error) { + return WithRetryResult(func() ([]netlink.Neigh, error) { + return netlink.NeighList(linkIndex, family) + }) +} + +// NeighProxyList wraps netlink.NeighProxyList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func NeighProxyList(linkIndex, family int) ([]netlink.Neigh, error) { + return WithRetryResult(func() ([]netlink.Neigh, error) { + return netlink.NeighProxyList(linkIndex, family) + }) +} + +// NeighListExecute wraps netlink.NeighListExecute, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func NeighListExecute(msg netlink.Ndmsg) ([]netlink.Neigh, error) { + return WithRetryResult(func() ([]netlink.Neigh, error) { + return netlink.NeighListExecute(msg) + }) +} + +// NeighSubscribeWithOptions wraps netlink.NeighSubscribeWithOptions, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func NeighSubscribeWithOptions(ch chan<- netlink.NeighUpdate, done <-chan struct{}, options netlink.NeighSubscribeOptions) error { + return WithRetry(func() error { + return netlink.NeighSubscribeWithOptions(ch, done, options) + }) +} + +// LinkGetProtinfo wraps netlink.LinkGetProtinfo, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func LinkGetProtinfo(link netlink.Link) (netlink.Protinfo, error) { + return WithRetryResult(func() (netlink.Protinfo, error) { + return netlink.LinkGetProtinfo(link) + }) +} + +// QdiscList wraps netlink.QdiscList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func QdiscList(link netlink.Link) ([]netlink.Qdisc, error) { + return WithRetryResult(func() ([]netlink.Qdisc, error) { + return netlink.QdiscList(link) + }) +} + +// RdmaLinkList wraps netlink.RdmaLinkList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RdmaLinkList() ([]*netlink.RdmaLink, error) { + return WithRetryResult(func() ([]*netlink.RdmaLink, error) { + return netlink.RdmaLinkList() + }) +} + +// RdmaLinkByName wraps netlink.RdmaLinkByName, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RdmaLinkByName(name string) (*netlink.RdmaLink, error) { + return WithRetryResult(func() (*netlink.RdmaLink, error) { + return netlink.RdmaLinkByName(name) + }) +} + +// RdmaLinkDel wraps netlink.RdmaLinkDel, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RdmaLinkDel(name string) error { + return WithRetry(func() error { + return netlink.RdmaLinkDel(name) + }) +} + +// RouteList wraps netlink.RouteList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RouteList(link netlink.Link, family int) ([]netlink.Route, error) { + return WithRetryResult(func() ([]netlink.Route, error) { + return netlink.RouteList(link, family) + }) +} + +// RouteListFiltered wraps netlink.RouteListFiltered, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RouteListFiltered(family int, filter *netlink.Route, filterMask uint64) ([]netlink.Route, error) { + return WithRetryResult(func() ([]netlink.Route, error) { + return netlink.RouteListFiltered(family, filter, filterMask) + }) +} + +// RouteListFilteredIter wraps netlink.RouteListFilteredIter, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RouteListFilteredIter(family int, filter *netlink.Route, filterMask uint64, f func(netlink.Route) (cont bool)) error { + return WithRetry(func() error { + return netlink.RouteListFilteredIter(family, filter, filterMask, f) + }) +} + +// RouteSubscribeWithOptions wraps netlink.RouteSubscribeWithOptions, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RouteSubscribeWithOptions(ch chan<- netlink.RouteUpdate, done <-chan struct{}, options netlink.RouteSubscribeOptions) error { + return WithRetry(func() error { + return netlink.RouteSubscribeWithOptions(ch, done, options) + }) +} + +// RuleList wraps netlink.RuleList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RuleList(family int) ([]netlink.Rule, error) { + return WithRetryResult(func() ([]netlink.Rule, error) { + return netlink.RuleList(family) + }) +} + +// RuleListFiltered wraps netlink.RuleListFiltered, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func RuleListFiltered(family int, filter *netlink.Rule, filterMask uint64) ([]netlink.Rule, error) { + return WithRetryResult(func() ([]netlink.Rule, error) { + return netlink.RuleListFiltered(family, filter, filterMask) + }) +} + +// SocketGet wraps netlink.SocketGet, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketGet(local, remote net.Addr) (*netlink.Socket, error) { + return WithRetryResult(func() (*netlink.Socket, error) { + return netlink.SocketGet(local, remote) + }) +} + +// SocketDiagTCPInfo wraps netlink.SocketDiagTCPInfo, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketDiagTCPInfo(family uint8) ([]*netlink.InetDiagTCPInfoResp, error) { + return WithRetryResult(func() ([]*netlink.InetDiagTCPInfoResp, error) { + return netlink.SocketDiagTCPInfo(family) + }) +} + +// SocketDiagTCP wraps netlink.SocketDiagTCP, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketDiagTCP(family uint8) ([]*netlink.Socket, error) { + return WithRetryResult(func() ([]*netlink.Socket, error) { + return netlink.SocketDiagTCP(family) + }) +} + +// SocketDiagUDPInfo wraps netlink.SocketDiagUDPInfo, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketDiagUDPInfo(family uint8) ([]*netlink.InetDiagUDPInfoResp, error) { + return WithRetryResult(func() ([]*netlink.InetDiagUDPInfoResp, error) { + return netlink.SocketDiagUDPInfo(family) + }) +} + +// SocketDiagUDP wraps netlink.SocketDiagUDP, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketDiagUDP(family uint8) ([]*netlink.Socket, error) { + return WithRetryResult(func() ([]*netlink.Socket, error) { + return netlink.SocketDiagUDP(family) + }) +} + +// UnixSocketDiagInfo wraps netlink.UnixSocketDiagInfo, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func UnixSocketDiagInfo() ([]*netlink.UnixDiagInfoResp, error) { + return WithRetryResult(func() ([]*netlink.UnixDiagInfoResp, error) { + return netlink.UnixSocketDiagInfo() + }) +} + +// UnixSocketDiag wraps netlink.UnixSocketDiag, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func UnixSocketDiag() ([]*netlink.UnixSocket, error) { + return WithRetryResult(func() ([]*netlink.UnixSocket, error) { + return netlink.UnixSocketDiag() + }) +} + +// SocketXDPGetInfo wraps netlink.SocketXDPGetInfo, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketXDPGetInfo(ino uint32, cookie uint64) (*netlink.XDPDiagInfoResp, error) { + return WithRetryResult(func() (*netlink.XDPDiagInfoResp, error) { + return netlink.SocketXDPGetInfo(ino, cookie) + }) +} + +// SocketDiagXDP wraps netlink.SocketDiagXDP, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func SocketDiagXDP() ([]*netlink.XDPDiagInfoResp, error) { + return WithRetryResult(func() ([]*netlink.XDPDiagInfoResp, error) { + return netlink.SocketDiagXDP() + }) +} + +// VDPAGetDevList wraps netlink.VDPAGetDevList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func VDPAGetDevList() ([]*netlink.VDPADev, error) { + return WithRetryResult(func() ([]*netlink.VDPADev, error) { + return netlink.VDPAGetDevList() + }) +} + +// VDPAGetDevConfigList wraps netlink.VDPAGetDevConfigList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func VDPAGetDevConfigList() ([]*netlink.VDPADevConfig, error) { + return WithRetryResult(func() ([]*netlink.VDPADevConfig, error) { + return netlink.VDPAGetDevConfigList() + }) +} + +// VDPAGetMGMTDevList wraps netlink.VDPAGetMGMTDevList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func VDPAGetMGMTDevList() ([]*netlink.VDPAMGMTDev, error) { + return WithRetryResult(func() ([]*netlink.VDPAMGMTDev, error) { + return netlink.VDPAGetMGMTDevList() + }) +} + +// XfrmPolicyList wraps netlink.XfrmPolicyList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func XfrmPolicyList(family int) ([]netlink.XfrmPolicy, error) { + return WithRetryResult(func() ([]netlink.XfrmPolicy, error) { + return netlink.XfrmPolicyList(family) + }) +} + +// XfrmStateList wraps netlink.XfrmStateList, but retries the call automatically +// if netlink.ErrDumpInterrupted is returned +func XfrmStateList(family int) ([]netlink.XfrmState, error) { + return WithRetryResult(func() ([]netlink.XfrmState, error) { + return netlink.XfrmStateList(family) + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_unspecified.go b/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_unspecified.go new file mode 100644 index 0000000000..046c03f99e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/datapath/linux/safenetlink/netlink_unspecified.go @@ -0,0 +1,141 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !linux + +// This file duplicates the stubs that exist in vishvananda/netlink outside the linux build. Not all +// functions defined in found in netlink_linux.go are present here, because not all have a stub in +// vishvananda/netlink, and thus some of the necessary function signature types are missing outside +// the linux build. + +package safenetlink + +import ( + "net" + + "github.com/vishvananda/netlink" +) + +func AddrList(link netlink.Link, family int) ([]netlink.Addr, error) { + return nil, netlink.ErrNotImplemented +} + +func ChainList(link netlink.Link, parent uint32) ([]netlink.Chain, error) { + return nil, netlink.ErrNotImplemented +} + +func ClassList(link netlink.Link, parent uint32) ([]netlink.Class, error) { + return nil, netlink.ErrNotImplemented +} + +func ConntrackTableList(table netlink.ConntrackTableType, family netlink.InetFamily) ([]*netlink.ConntrackFlow, error) { + return nil, netlink.ErrNotImplemented +} + +func FilterList(link netlink.Link, parent uint32) ([]netlink.Filter, error) { + return nil, netlink.ErrNotImplemented +} + +func FouList(fam int) ([]netlink.Fou, error) { + return nil, netlink.ErrNotImplemented +} + +func GenlFamilyList() ([]*netlink.GenlFamily, error) { + return nil, netlink.ErrNotImplemented +} + +func LinkByName(name string) (netlink.Link, error) { + return nil, netlink.ErrNotImplemented +} + +func LinkByAlias(alias string) (netlink.Link, error) { + return nil, netlink.ErrNotImplemented +} + +func LinkList() ([]netlink.Link, error) { + return nil, netlink.ErrNotImplemented +} + +func NeighList(linkIndex, family int) ([]netlink.Neigh, error) { + return nil, netlink.ErrNotImplemented +} + +func NeighProxyList(linkIndex, family int) ([]netlink.Neigh, error) { + return nil, netlink.ErrNotImplemented +} + +func LinkGetProtinfo(link netlink.Link) (netlink.Protinfo, error) { + return netlink.Protinfo{}, netlink.ErrNotImplemented +} + +func QdiscList(link netlink.Link) ([]netlink.Qdisc, error) { + return nil, netlink.ErrNotImplemented +} + +func RdmaLinkDel(name string) error { + return netlink.ErrNotImplemented +} + +func RouteList(link netlink.Link, family int) ([]netlink.Route, error) { + return nil, netlink.ErrNotImplemented +} + +func RouteListFiltered(family int, filter *netlink.Route, filterMask uint64) ([]netlink.Route, error) { + return nil, netlink.ErrNotImplemented +} + +func RouteListFilteredIter(family int, filter *netlink.Route, filterMask uint64, f func(netlink.Route) (cont bool)) error { + return netlink.ErrNotImplemented +} + +func RuleList(family int) ([]netlink.Rule, error) { + return nil, netlink.ErrNotImplemented +} + +func RuleListFiltered(family int, filter *netlink.Rule, filterMask uint64) ([]netlink.Rule, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketGet(local, remote net.Addr) (*netlink.Socket, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketDiagTCPInfo(family uint8) ([]*netlink.InetDiagTCPInfoResp, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketDiagTCP(family uint8) ([]*netlink.Socket, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketDiagUDPInfo(family uint8) ([]*netlink.InetDiagUDPInfoResp, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketDiagUDP(family uint8) ([]*netlink.Socket, error) { + return nil, netlink.ErrNotImplemented +} + +func UnixSocketDiagInfo() ([]*netlink.UnixDiagInfoResp, error) { + return nil, netlink.ErrNotImplemented +} + +func UnixSocketDiag() ([]*netlink.UnixSocket, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketXDPGetInfo(ino uint32, cookie uint64) (*netlink.XDPDiagInfoResp, error) { + return nil, netlink.ErrNotImplemented +} + +func SocketDiagXDP() ([]*netlink.XDPDiagInfoResp, error) { + return nil, netlink.ErrNotImplemented +} + +func XfrmPolicyList(family int) ([]netlink.XfrmPolicy, error) { + return nil, netlink.ErrNotImplemented +} + +func XfrmStateList(family int) ([]netlink.XfrmState, error) { + return nil, netlink.ErrNotImplemented +} diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go index 1258c1af89..e8032c231f 100644 --- a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go +++ b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go @@ -56,8 +56,9 @@ const ( // TemplatesDir is the default path for the compiled template objects relative to StateDir TemplatesDir = "templates" - // TemplatePath is the default path for a symlink to a template relative to StateDir/ - TemplatePath = "template.o" + // TemplateIDPath is the name of a file which contains the ID (aka hash) of + // the template used by the endpoint. + TemplateIDPath = "template.txt" // BpfDir is the default path for template files relative to LibDir BpfDir = "bpf" @@ -71,20 +72,9 @@ const ( // SockPathEnv is the environment variable to overwrite SockPath SockPathEnv = "CILIUM_SOCK" - // HubbleSockPath is the path to the UNIX domain socket exposing the Hubble - // API to clients locally. - HubbleSockPath = RuntimePath + "/hubble.sock" - - // HubbleSockPathEnv is the environment variable to overwrite - // HubbleSockPath. - HubbleSockPathEnv = "HUBBLE_SOCK" - - // HubbleRecorderStoragePath specifies the directory in which pcap files - // created via the Hubble Recorder API are stored - HubbleRecorderStoragePath = RuntimePath + "/pcaps" - - // HubbleRecorderSinkQueueSize is the queue size for each recorder sink - HubbleRecorderSinkQueueSize = 1024 + // ShellSockPath is the path to the UNIX domain socket exposing the debug shell + // to which "cilium-dbg shell" connects to. + ShellSockPath = RuntimePath + "/shell.sock" // MonitorSockPath1_2 is the path to the UNIX domain socket used to // distribute BPF and agent events to listeners. @@ -102,10 +92,6 @@ const ( // the agent and the CNI plugin processes DeleteQueueLockfile = DeleteQueueDir + "/lockfile" - // EnableHostIPRestore controls whether the host IP should be restored - // from previous state automatically - EnableHostIPRestore = true - // BPFFSRoot is the default path where BPFFS should be mounted BPFFSRoot = "/sys/fs/bpf" @@ -133,7 +119,7 @@ const ( // ToFQDNsMaxIPsPerHost defines the maximum number of IPs to maintain // for each FQDN name in an endpoint's FQDN cache - ToFQDNsMaxIPsPerHost = 50 + ToFQDNsMaxIPsPerHost = 1000 // ToFQDNsMaxDeferredConnectionDeletes Maximum number of IPs to retain for // expired DNS lookups with still-active connections @@ -158,17 +144,40 @@ const ( // endpoints that are larger than 512 Bytes or the EDNS0 option, if present. ToFQDNsEnableDNSCompression = true + // DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy. + DNSProxyEnableTransparentMode = false + + // DNSProxyLockCount is the default array size containing mutexes which protect + // against parallel handling of DNS response names. + DNSProxyLockCount = 131 + + // DNSProxyLockTimeout is the default timeout when acquiring the locks controlled by + // DNSProxyLockCount. + DNSProxyLockTimeout = 500 * time.Millisecond + + // DNSProxySocketLingerTimeout defines how many seconds we wait for the connection + // between the DNS proxy and the upstream server to be closed. + DNSProxySocketLingerTimeout = 10 + // IdentityChangeGracePeriod is the default value for // option.IdentityChangeGracePeriod IdentityChangeGracePeriod = 5 * time.Second - // IdentityRestoreGracePeriod is the default value for - // option.IdentityRestoreGracePeriod - IdentityRestoreGracePeriod = 10 * time.Minute + // IdentityRestoreGracePeriodKvstore is the default value for + // option.IdentityRestoreGracePeriod when kvstore is enabled. + IdentityRestoreGracePeriodKvstore = 10 * time.Minute + + // IdentityRestoreGracePeriodKvstore is the default value for + // option.IdentityRestoreGracePeriod when only k8s is in use + IdentityRestoreGracePeriodK8s = 30 * time.Second // ExecTimeout is a timeout for executing commands. ExecTimeout = 300 * time.Second + // MaxInternalTimerDelay does not enforce a maximum on timer values in + // the agent by default. + MaxInternalTimerDelay = 0 * time.Second + // StatusCollectorInterval is the interval between a probe invocations StatusCollectorInterval = 5 * time.Second @@ -180,6 +189,10 @@ const ( // is considered failed StatusCollectorFailureThreshold = 1 * time.Minute + // SessionAffinityTimeoutMaxFallback defines the maximum number of seconds + // for the session affinity timeout. See also lb{4,6}_affinity_timeout(). + SessionAffinityTimeoutMaxFallback = 0xffffff + // EnableIPv4 is the default value for IPv4 enablement EnableIPv4 = true @@ -201,15 +214,9 @@ const ( // EnableL7Proxy is the default value for L7 proxy enablement EnableL7Proxy = true - // EnvoyConfigTimeout determines how long to wait Envoy to N/ACK resources - EnvoyConfigTimeout = 2 * time.Minute - // EnableHostLegacyRouting is the default value for using the old routing path via stack. EnableHostLegacyRouting = false - // K8sEnableEndpointSlice is the default value for k8s EndpointSlice feature. - K8sEnableEndpointSlice = true - // PreAllocateMaps is the default value for BPF map preallocation PreAllocateMaps = true @@ -224,6 +231,15 @@ const ( // be necessary on key rotations. EnableIPsecKeyWatcher = true + // Enable caching for XfrmState for IPSec. Significantly reduces CPU usage + // in large clusters. + EnableIPSecXfrmStateCaching = true + + // Enable IPSec encrypted overlay + // + // This feature will encrypt overlay traffic before it leaves the cluster. + EnableIPSecEncryptedOverlay = false + // EncryptNode enables encrypting traffic from host networking applications // which are not part of Cilium manged pods. EncryptNode = false @@ -254,15 +270,15 @@ const ( // EnableBPFTProxy is the default value for EnableBPFTProxy EnableBPFTProxy = false - // EnableXTSocketFallback is the default value for EnableXTSocketFallback - EnableXTSocketFallback = true - // EnableLocalNodeRoute default value for EnableLocalNodeRoute EnableLocalNodeRoute = true // EnableAutoDirectRouting is the default value for EnableAutoDirectRouting EnableAutoDirectRouting = false + // EnableDirectRoutingSkipUnreachable is the default value for EnableDirectRoutingIgnoreUnreachableName + EnableDirectRoutingSkipUnreachable = false + // EnableHealthChecking is the default value for EnableHealthChecking EnableHealthChecking = true @@ -274,6 +290,13 @@ const ( // EnableHealthCheckNodePort EnableHealthCheckNodePort = true + // EnableHealthCheckLoadBalancerIP is the default value for + // EnableHealthCheckLoadBalancerIP + EnableHealthCheckLoadBalancerIP = false + + // HealthCheckICMPFailureThreshold is the default value for HealthCheckICMPFailureThreshold + HealthCheckICMPFailureThreshold = 3 + // AlignCheckerName is the BPF object name for the alignchecker. AlignCheckerName = "bpf_alignchecker.o" @@ -287,11 +310,9 @@ const ( // a kvstore path for too long. KVStoreStaleLockTimeout = 30 * time.Second - // IPAllocationTimeout is the timeout when allocating CIDRs - IPAllocationTimeout = 2 * time.Minute - - // PolicyQueueSize is the default queue size for policy-related events. - PolicyQueueSize = 100 + // KVstorePodNetworkSupport represents whether to enable the support for + // running the Cilium KVstore in pod network. + KVstorePodNetworkSupport = false // KVstoreQPS is default rate limit for kv store operations KVstoreQPS = 20 @@ -321,12 +342,6 @@ const ( // connection tracking garbage collection ConntrackGCStartingInterval = 5 * time.Minute - // K8sEventHandover enables use of the kvstore to optimize Kubernetes - // event handling by listening for k8s events in the operator and - // mirroring it into the kvstore for reduced overhead in large - // clusters. - K8sEventHandover = false - // LoopbackIPv4 is the default address for service loopback LoopbackIPv4 = "169.254.42.1" @@ -363,16 +378,15 @@ const ( LockLeaseTTL = 25 * time.Second // KVstoreLeaseMaxTTL is the upper bound for KVStore lease TTL value. - // It is calculated as Min(int64 positive max, etcd MaxLeaseTTL, consul MaxLeaseTTL) + // It is calculated as Min(int64 positive max, etcd MaxLeaseTTL) KVstoreLeaseMaxTTL = 86400 * time.Second // IPAMPreAllocation is the default value for // CiliumNode.Spec.IPAM.PreAllocate if no value is set IPAMPreAllocation = 8 - // IPAMMultiPoolPreAllocation is the default value for multi-pool IPAM - // pre-allocations - IPAMMultiPoolPreAllocation = "default=8" + // IPAMDefaultIPPool is the default value for the multi-pool default pool name. + IPAMDefaultIPPool = "default" // ENIFirstInterfaceIndex is the default value for // CiliumNode.Spec.ENI.FirstInterfaceIndex if no value is set. @@ -414,16 +428,6 @@ const ( // IPAMAPIQPSLimit is the default QPS limit when rate limiting access to external APIs IPAMAPIQPSLimit = 4.0 - // IPAMPodCIDRAllocationThreshold is the default value for - // CiliumNode.Spec.IPAM.PodCIDRAllocationThreshold if no value is set - // Defaults to 8, which is similar to IPAMPreAllocation - IPAMPodCIDRAllocationThreshold = 8 - - // IPAMPodCIDRReleaseThreshold is the default value for - // CiliumNode.Spec.IPAM.PodCIDRReleaseThreshold if no value is set - // Defaults to 16, which is 2x the allocation threshold to avoid flapping - IPAMPodCIDRReleaseThreshold = 16 - // AutoCreateCiliumNodeResource enables automatic creation of a // CiliumNode resource for the local node AutoCreateCiliumNodeResource = true @@ -432,13 +436,11 @@ const ( // policy updates are invoked. PolicyTriggerInterval = 1 * time.Second - // K8sClientQPSLimit is the default qps for the k8s client. It is set to 0 because the the k8s client - // has its own default. - K8sClientQPSLimit float32 = 0.0 + // K8sClientQPSLimit is the default qps for the cilium-agent k8s client. + K8sClientQPSLimit float32 = 10.0 - // K8sClientBurst is the default burst for the k8s client. It is set to 0 because the the k8s client - // has its own default. - K8sClientBurst = 0 + // K8sClientBurst is the default burst for the cilium-agent k8s client. + K8sClientBurst = 20 // K8sServiceCacheSize is the default value for option.K8sServiceCacheSize // which denotes the value of Cilium's K8s service cache size. @@ -463,9 +465,6 @@ const ( // specified in the L7 policies. CertsDirectory = RuntimePath + "/certs" - // EnableRemoteNodeIdentity is the default value for option.EnableRemoteNodeIdentity - EnableRemoteNodeIdentity = true - // IPAMExpiration is the timeout after which an IP subject to expiratio // is being released again if no endpoint is being created in time. IPAMExpiration = 10 * time.Minute @@ -502,11 +501,9 @@ const ( // InstallNoConntrackRules instructs Cilium to install Iptables rules to skip netfilter connection tracking on all pod traffic. InstallNoConntrackIptRules = false - // WireguardSubnetV4 is a default wireguard tunnel subnet - WireguardSubnetV4 = "172.16.43.0/24" - - // WireguardSubnetV6 is a default wireguard tunnel subnet - WireguardSubnetV6 = "fdc9:281f:04d7:9ee9::1/64" + // ContainerIPLocalReservedPortsAuto instructs the Cilium CNI plugin to reserve + // an auto-generated list of ports in the container network namespace + ContainerIPLocalReservedPortsAuto = "auto" // ExternalClusterIP enables cluster external access to ClusterIP services. // Defaults to false to retain prior behaviour of not routing external packets to ClusterIPs. @@ -521,13 +518,16 @@ const ( // TunnelProtocol is the default tunneling protocol TunnelProtocol = "vxlan" + // ServiceNoBackendResponse is the default response for services without backends + ServiceNoBackendResponse = "reject" + // Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation. UseCiliumInternalIPForIPsec = false // TunnelPortVXLAN is the default VXLAN port - TunnelPortVXLAN = 8472 + TunnelPortVXLAN uint16 = 8472 // TunnelPortGeneve is the default Geneve port - TunnelPortGeneve = 6081 + TunnelPortGeneve uint16 = 6081 // ARPBaseReachableTime resembles the kernel's NEIGH_VAR_BASE_REACHABLE_TIME which defaults to 30 seconds. ARPBaseReachableTime = 30 * time.Second @@ -541,6 +541,50 @@ const ( // EnableK8sNetworkPolicy enables support for K8s NetworkPolicy. EnableK8sNetworkPolicy = true + + // EnableCiliumNetworkPolicy enables support for Cilium Network Policy. + EnableCiliumNetworkPolicy = true + + // EnableCiliumClusterwideNetworkPolicy enables support for Cilium Clusterwide + // Network Policy. + EnableCiliumClusterwideNetworkPolicy = true + + // MaxConnectedClusters sets the maximum number of clusters that can be + // connected in a clustermesh. + // The value is used to determine the bit allocation for cluster ID and + // identity in a numeric identity. Values > 255 will decrease the number of + // allocatable identities. + MaxConnectedClusters = 255 + + // EnableNodeSelectorLabels is the default value for option.EnableNodeSelectorLabels + EnableNodeSelectorLabels = false + + // BPFEventsDropEnabled controls whether the Cilium datapath exposes "drop" events to Cilium monitor and Hubble. + BPFEventsDropEnabled = true + + // BPFEventsPolicyVerdictEnabled controls whether the Cilium datapath exposes "policy verdict" events to Cilium monitor and Hubble. + BPFEventsPolicyVerdictEnabled = true + + // BPFEventsTraceEnabled controls whether the Cilium datapath exposes "trace" events to Cilium monitor and Hubble. + BPFEventsTraceEnabled = true + + // BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled + BPFConntrackAccounting = false + + // EnableEnvoyConfig is the default value for option.EnableEnvoyConfig + EnableEnvoyConfig = false + + // NetNsPath is the default path to the mounted network namespaces directory + NetNsPath = "/var/run/cilium/netns" + + // EnableIternalTrafficPolicy is the default value for option.EnableInternalTrafficPolicy + EnableInternalTrafficPolicy = true + + // EnableNonDefaultDenyPolicies allows policies to define whether they are operating in default-deny mode + EnableNonDefaultDenyPolicies = true + + // EnableSourceIPVerification is the default value for source ip validation + EnableSourceIPVerification = true ) var ( @@ -573,4 +617,6 @@ var ( "cilium_lb6_source_range": "enabled,128,0", "cilium_lb6_affinity_match": "enabled,128,0", } + + PolicyCIDRMatchMode = []string{} ) diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/node.go b/vendor/github.com/cilium/cilium/pkg/defaults/node.go index facc7dec83..4cfeef0027 100644 --- a/vendor/github.com/cilium/cilium/pkg/defaults/node.go +++ b/vendor/github.com/cilium/cilium/pkg/defaults/node.go @@ -23,6 +23,18 @@ const ( // SecondHostDevice is the name of the second interface of the host veth pair. SecondHostDevice = "cilium_net" + // IPIPv4Device is a device of type 'ipip', created by the agent. + IPIPv4Device = "cilium_ipip4" + + // IPIPv6Device is a device of type 'ip6tnl', created by the agent. + IPIPv6Device = "cilium_ipip6" + + // GeneveDevice is a device of type 'geneve', created by the agent. + GeneveDevice = "cilium_geneve" + + // VxlanDevice is a device of type 'vxlan', created by the agent. + VxlanDevice = "cilium_vxlan" + // CiliumK8sAnnotationPrefix is the prefix key for the annotations used in kubernetes. CiliumK8sAnnotationPrefix = "cilium.io/" @@ -52,4 +64,13 @@ var ( // IPv4DefaultRoute is the default IPv4 route. IPv4DefaultRoute = net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)} + + // ExcludedDevicePrefixes are prefixes that we don't consider during automatic device detection. + ExcludedDevicePrefixes = []string{ + "cilium_", + "lo", + "lxc", + "cni", + "docker", + } ) diff --git a/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go b/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go index 73a2159bde..96b8d7b102 100644 --- a/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go +++ b/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go @@ -33,8 +33,16 @@ const ( // container ID. The container ID is specific to the container runtime // in use. Only the primary container that defines the networking scope // can be used to address an endpoint. + // This can only be used to look up endpoints which have not opted-out of + // legacy identifiers. + // Deprecated. Use CNIAttachmentIdPrefix instead ContainerIdPrefix PrefixType = "container-id" + // CNIAttachmentIdPrefix is used to address an endpoint via its primary + // container ID and container interface passed to the CNI plugin. + // This attachment ID uniquely identifies a CNI ADD and CNI DEL invocation pair. + CNIAttachmentIdPrefix PrefixType = "cni-attachment-id" + // DockerEndpointPrefix is used to address an endpoint via the Docker // endpoint ID. This method is only possible if the endpoint was // created via the cilium-docker plugin and the container is backed by @@ -45,11 +53,22 @@ const ( // container's name. This addressing mechanism depends on the container // runtime. Only the primary container that the networking scope can be // used to address an endpoint. + // This can only be used to look up endpoints which have not opted-out of + // legacy identifiers. + // Deprecated. Use CNIAttachmentIdPrefix instead ContainerNamePrefix PrefixType = "container-name" + // CEPNamePrefix is used to address an endpoint via its Kubernetes + // CiliumEndpoint resource name. This addressing only works if the endpoint + // is represented as a Kubernetes CiliumEndpoint resource. + CEPNamePrefix PrefixType = "cep-name" + // PodNamePrefix is used to address an endpoint via the Kubernetes pod // name. This addressing only works if the endpoint represents as // Kubernetes pod. + // This can only be used to look up endpoints which have not opted-out of + // legacy identifiers. + // Deprecated. May not be unique. Use CEPNamePrefix instead. PodNamePrefix PrefixType = "pod-name" // IPv4Prefix is used to address an endpoint via the endpoint's IPv4 @@ -62,7 +81,7 @@ const ( // NewCiliumID returns a new endpoint identifier of type CiliumLocalIdPrefix func NewCiliumID(id int64) string { - return fmt.Sprintf("%s:%d", CiliumLocalIdPrefix, id) + return NewID(CiliumLocalIdPrefix, strconv.FormatInt(id, 10)) } // NewID returns a new endpoint identifier @@ -82,9 +101,19 @@ func NewIPPrefixID(ip netip.Addr) string { return "" } +// NewCNIAttachmentID returns an identifier based on the CNI attachment ID. If +// the containerIfName is empty, only the containerID will be used. +func NewCNIAttachmentID(containerID, containerIfName string) string { + id := containerID + if containerIfName != "" { + id = containerID + ":" + containerIfName + } + return NewID(CNIAttachmentIdPrefix, id) +} + // splitID splits ID into prefix and id. No validation is performed on prefix. func splitID(id string) (PrefixType, string) { - if idx := strings.Index(id, ":"); idx > -1 { + if idx := strings.IndexByte(id, ':'); idx > -1 { return PrefixType(id[:idx]), id[idx+1:] } @@ -100,7 +129,7 @@ func ParseCiliumID(id string) (int64, error) { } n, err := strconv.ParseInt(id, 0, 64) if err != nil || n < 0 { - return 0, fmt.Errorf("invalid numeric cilium id: %s", err) + return 0, fmt.Errorf("invalid numeric cilium id: %w", err) } if n > MaxEndpointID { return 0, fmt.Errorf("endpoint id too large: %d", n) @@ -113,7 +142,16 @@ func ParseCiliumID(id string) (int64, error) { func Parse(id string) (PrefixType, string, error) { prefix, id := splitID(id) switch prefix { - case CiliumLocalIdPrefix, CiliumGlobalIdPrefix, ContainerIdPrefix, DockerEndpointPrefix, ContainerNamePrefix, PodNamePrefix, IPv4Prefix, IPv6Prefix: + case CiliumLocalIdPrefix, + CiliumGlobalIdPrefix, + CNIAttachmentIdPrefix, + ContainerIdPrefix, + DockerEndpointPrefix, + ContainerNamePrefix, + CEPNamePrefix, + PodNamePrefix, + IPv4Prefix, + IPv6Prefix: return prefix, id, nil } diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/client.go b/vendor/github.com/cilium/cilium/pkg/health/client/client.go index d61a3783ef..c30e1b25f4 100644 --- a/vendor/github.com/cilium/cilium/pkg/health/client/client.go +++ b/vendor/github.com/cilium/cilium/pkg/health/client/client.go @@ -164,7 +164,7 @@ func GetPathConnectivityStatusType(cp *models.PathStatus) ConnectivityStatusType return status } -func SummarizePathConnectivityStatusType(cps []*models.PathStatus) ConnectivityStatusType { +func SummarizePathConnectivityStatus(cps []*models.PathStatus) ConnectivityStatusType { status := ConnStatusReachable for _, cp := range cps { switch GetPathConnectivityStatusType(cp) { @@ -181,6 +181,16 @@ func SummarizePathConnectivityStatusType(cps []*models.PathStatus) ConnectivityS return status } +// Returns a map of ConnectivityStatusType --> # of paths with ConnectivityStatusType +func SummarizePathConnectivityStatusType(cps []*models.PathStatus) map[ConnectivityStatusType]int { + status := make(map[ConnectivityStatusType]int) + for _, cp := range cps { + cst := GetPathConnectivityStatusType(cp) + status[cst]++ + } + return status +} + func formatConnectivityStatus(w io.Writer, cs *models.ConnectivityStatus, path, indent string) { status := cs.Status switch GetConnectivityStatusType(cs) { @@ -310,48 +320,88 @@ func GetAllEndpointAddresses(node *models.NodeStatus) []*models.PathStatus { return append([]*models.PathStatus{node.HealthEndpoint.PrimaryAddress}, node.HealthEndpoint.SecondaryAddresses...) } -func formatNodeStatus(w io.Writer, node *models.NodeStatus, printAll, succinct, verbose, localhost bool) { +func formatNodeStatus(w io.Writer, node *models.NodeStatus, allNodes, verbose, localhost bool) bool { localStr := "" if localhost { localStr = " (localhost)" } - if succinct { - if printAll || !nodeIsHealthy(node) { - fmt.Fprintf(w, " %s%s\t%s\t%s\t%s\n", node.Name, - localStr, getPrimaryAddressIP(node), - SummarizePathConnectivityStatusType(GetAllHostAddresses(node)).String(), - SummarizePathConnectivityStatusType(GetAllEndpointAddresses(node)).String()) - } - } else { + + if verbose { fmt.Fprintf(w, " %s%s:\n", node.Name, localStr) formatPathStatus(w, "Host", GetHostPrimaryAddress(node), " ", verbose) unhealthyPaths := !allPathsAreHealthyOrUnknown(GetHostSecondaryAddresses(node)) if (verbose || unhealthyPaths) && node.Host != nil { for _, addr := range node.Host.SecondaryAddresses { - formatPathStatus(w, "Secondary", addr, " ", verbose) + formatPathStatus(w, "Secondary Host", addr, " ", verbose) } } formatPathStatus(w, "Endpoint", GetEndpointPrimaryAddress(node), " ", verbose) unhealthyPaths = !allPathsAreHealthyOrUnknown(GetEndpointSecondaryAddresses(node)) if (verbose || unhealthyPaths) && node.HealthEndpoint != nil { for _, addr := range node.HealthEndpoint.SecondaryAddresses { - formatPathStatus(w, "Secondary", addr, " ", verbose) + formatPathStatus(w, "Secondary Endpoint", addr, " ", verbose) } } + return true + } + + hostStatuses := SummarizePathConnectivityStatusType(GetAllHostAddresses(node)) + endpointStatuses := SummarizePathConnectivityStatusType(GetAllEndpointAddresses(node)) + + if !nodeIsHealthy(node) { + ips := []string{getPrimaryAddressIP(node)} + for _, addr := range GetHostSecondaryAddresses(node) { + if addr == nil { + continue + } + ips = append(ips, addr.IP) + } + fmt.Fprintf(w, " %s%s\t%s\t%d/%d", node.Name, localStr, strings.Join(ips, ","), hostStatuses[ConnStatusReachable], len(GetAllHostAddresses(node))) + if hostStatuses[ConnStatusUnknown] > 0 { + fmt.Fprintf(w, " (%d unknown)", hostStatuses[ConnStatusUnknown]) + } + fmt.Fprintf(w, "\t%d/%d", endpointStatuses[ConnStatusReachable], len(GetAllEndpointAddresses(node))) + if endpointStatuses[ConnStatusUnknown] > 0 { + fmt.Fprintf(w, " (%d unknown)", endpointStatuses[ConnStatusUnknown]) + } + fmt.Fprintf(w, "\n") + return true } + + if allNodes { + ips := []string{getPrimaryAddressIP(node)} + for _, addr := range GetHostSecondaryAddresses(node) { + if addr == nil { + continue + } + ips = append(ips, addr.IP) + } + fmt.Fprintf(w, " %s%s\t%s\t%d/%d", node.Name, localStr, strings.Join(ips, ","), hostStatuses[ConnStatusReachable], len(GetAllHostAddresses(node))) + if hostStatuses[ConnStatusUnknown] > 0 { + fmt.Fprintf(w, " (%d unknown)", hostStatuses[ConnStatusUnknown]) + } + fmt.Fprintf(w, "\t%d/%d", endpointStatuses[ConnStatusReachable], len(GetAllEndpointAddresses(node))) + if endpointStatuses[ConnStatusUnknown] > 0 { + fmt.Fprintf(w, " (%d unknown)", endpointStatuses[ConnStatusUnknown]) + } + fmt.Fprintf(w, "\n") + return true + } + + return false } // FormatHealthStatusResponse writes a HealthStatusResponse as a string to the // writer. // -// 'printAll', if true, causes all nodes to be printed regardless of status -// 'succinct', if true, causes node health to be output as one line per node -// 'verbose', if true, overrides 'succinct' and prints all information +// 'allNodes', if true, causes all nodes to be printed regardless of status +// 'verbose', if true, prints all information // 'maxLines', if nonzero, determines the maximum number of lines to print -func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, printAll, succinct, verbose bool, maxLines int) { +func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, allNodes bool, verbose bool, maxLines int) { var ( - healthy int - localhost *models.NodeStatus + healthy int + localhost *models.NodeStatus + printedLines int ) for _, node := range sr.Nodes { if nodeIsHealthy(node) { @@ -361,37 +411,35 @@ func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, pr localhost = node } } - if succinct { - fmt.Fprintf(w, "Cluster health:\t%d/%d reachable\t(%s)\n", - healthy, len(sr.Nodes), sr.Timestamp) - if printAll || healthy < len(sr.Nodes) { - fmt.Fprintf(w, " Name\tIP\tNode\tEndpoints\n") - } - } else { - fmt.Fprintf(w, "Probe time:\t%s\n", sr.Timestamp) - fmt.Fprintf(w, "Nodes:\n") - } + + fmt.Fprintf(w, "Cluster health:\t%d/%d reachable\t(%s)\n", + healthy, len(sr.Nodes), sr.Timestamp) + + fmt.Fprintf(w, "Name\tIP\tNode\tEndpoints\n") if localhost != nil { - formatNodeStatus(w, localhost, printAll, succinct, verbose, true) - maxLines-- + if formatNodeStatus(w, localhost, allNodes, verbose, true) { + printedLines++ + } } nodes := sr.Nodes sort.Slice(nodes, func(i, j int) bool { return strings.Compare(nodes[i].Name, nodes[j].Name) < 0 }) - for n, node := range nodes { - if maxLines > 0 && n > maxLines { + for _, node := range nodes { + if printedLines == maxLines { break } if node == localhost { continue } - formatNodeStatus(w, node, printAll, succinct, verbose, false) + if formatNodeStatus(w, node, allNodes, verbose, false) { + printedLines++ + } } - if maxLines > 0 && len(sr.Nodes)-healthy > maxLines { - fmt.Fprintf(w, " ...") + if len(sr.Nodes)-printedLines-healthy > 0 { + fmt.Fprintf(w, " ...\n") } } @@ -399,9 +447,9 @@ func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, pr // daemon via the default channel and formats its output as a string to the // writer. // -// 'succinct', 'verbose' and 'maxLines' are handled the same as in +// 'verbose' and 'maxLines' are handled the same as in // FormatHealthStatusResponse(). -func GetAndFormatHealthStatus(w io.Writer, succinct, verbose bool, maxLines int) { +func GetAndFormatHealthStatus(w io.Writer, allNodes bool, verbose bool, maxLines int) { client, err := NewClient("") if err != nil { fmt.Fprintf(w, "Cluster health:\t\t\tClient error: %s\n", err) @@ -413,5 +461,5 @@ func GetAndFormatHealthStatus(w io.Writer, succinct, verbose bool, maxLines int) fmt.Fprintf(w, "Cluster health:\t\t\tWarning\tcilium-health daemon unreachable\n") return } - FormatHealthStatusResponse(w, hr.Payload, verbose, succinct, verbose, maxLines) + FormatHealthStatusResponse(w, hr.Payload, allNodes, verbose, maxLines) } diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/modules.go b/vendor/github.com/cilium/cilium/pkg/health/client/modules.go new file mode 100644 index 0000000000..e2292b980f --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/health/client/modules.go @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package client + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + + "k8s.io/apimachinery/pkg/util/duration" + + "github.com/cilium/cilium/pkg/hive/health/types" +) + +const ( + noPod = "(/)" + rootNode = "" + noErr = "" +) + +// GetAndFormatModulesHealth retrieves modules health and formats output. +// +// Deprecated: Following #30925 we will move to either a separate cilium-dbg command or +// use health statedb dump data to render status reports externally. +// Following this we should remove this output as part of status in version v1.17. +func GetAndFormatModulesHealth(w io.Writer, ss []types.Status, verbose bool) { + // Although status' is received from the statedb remote table according to + // the order in which it's queried (in our case, by primary index identifier). + // We sort this to ensure order stability regardless. + sort.Slice(ss, func(i, j int) bool { + return ss[i].ID.String() < ss[j].ID.String() + }) + fmt.Fprintf(w, "Modules Health:") + if verbose { + r := newRoot(rootNode) + for _, s := range ss { + stack := strings.Split(s.ID.String(), ".") + upsertTree(r, &s, stack) + } + r = r.nodes[0] + r.parent = nil + body := strings.ReplaceAll(r.String(), "\n", "\n ") + fmt.Fprintln(w, "\n\t\t"+body) + return + } + tally := make(map[types.Level]int, 4) + for _, s := range ss { + tally[types.Level(s.Level)] += 1 + } + fmt.Fprintf(w, "\t%s(%d) %s(%d) %s(%d)\n", + types.LevelStopped, + tally[types.LevelStopped], + types.LevelDegraded, + tally[types.LevelDegraded], + types.LevelOK, + tally[types.LevelOK], + ) +} + +type TreeView struct { + root *node +} + +func NewTreeView() *TreeView { + return &TreeView{ + root: newRoot("agent"), + } +} + +func (t *TreeView) Render() { + fmt.Fprintln(os.Stdout, "\n"+t.root.String()) +} + +func (t *TreeView) UpsertStatus(ss []types.Status) { + for _, s := range ss { + upsertTree(t.root, &s, strings.Split(s.ID.String(), ".")) + } +} + +func Render(ss []types.Status) { + n := newRoot("agent") + for _, s := range ss { + upsertTree(n, &s, strings.Split(s.ID.String(), ".")) + } + body := strings.ReplaceAll(n.String(), "\n", "\n ") + fmt.Fprintln(os.Stdout, "\n\t\t"+body) +} + +// upsertTree inserts a health report, using a stack of path tokens into +// a tree used for displaying health data. +// +// Because there is no longer a distinction between reporter leaves and parent nodes +// (i.e. parents of subtrees can have their own health status) we modify the tree to +// move all such "parent" reports down to a immediate child, such that in our output +// all health reports appear as leaves. + +// upsertTree inserts a health report, using a stack of path tokens into +// a tree used for displaying health data. +// +// Because there is no longer a distinction between reporter leaves and parent nodes +// (i.e. parents of subtrees can have their own health status) we modify the tree to +// move all such "parent" reports down to a immediate child, such that in our output +// all health reports appear as leaves. +func upsertTree(r *node, report *types.Status, stack []string) { + if len(stack) == 1 { + name := stack[0] + meta := fmt.Sprintf("[%s] %s", strings.ToUpper(string(report.Level)), report.Message) + meta += fmt.Sprintf(" (%s, x%d)", ToAgeHuman(report.Updated), report.Count) + for _, c := range r.nodes { + if c.val == name { + c.meta = meta + c.report = report + return + } + } + r.addNodeWithMeta(name, meta, report) + return + } + pop := stack[0] + stack = stack[1:] + for _, c := range r.nodes { + if c.val == pop { + // In this case, if the node was a leaf, it may contain a status. + // Because parent nodes can now also have health status reports we + // fix this up by moving the report to a leaf node, thus maintaining + // the condition that only leaves have reporters. + if c.report != nil { + // Move former parent nodes health report to child leaf. + upsertTree(c, c.report, []string{"[reporter]"}) + c.report = nil + c.meta = "" + } + upsertTree(c, report, stack) + return + } + } + // Add parent node. + n := r.addNode(pop, nil) + upsertTree(n, report, stack) +} + +// ToAgeHuman converts time to duration. +func ToAgeHuman(t time.Time) string { + if t.IsZero() { + return "n/a" + } + + return duration.HumanDuration(time.Since(t)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/tree.go b/vendor/github.com/cilium/cilium/pkg/health/client/tree.go new file mode 100644 index 0000000000..febac0144a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/health/client/tree.go @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package client + +import ( + "bytes" + "fmt" + "io" + "sort" + "strconv" + "strings" + + "github.com/cilium/cilium/pkg/hive/health/types" +) + +const ( + indentSize = 3 + leafMaxWidth = 40 + link decoration = "│" + mid decoration = "├──" + end decoration = "└──" +) + +type decoration string + +func newRoot(r string) *node { + return &node{val: r} +} + +type node struct { + val, meta string + parent *node + nodes []*node + report *types.Status +} + +func (n *node) addNode(v string, r *types.Status) *node { + return n.addNodeWithMeta(v, "", r) +} + +func (n *node) addNodeWithMeta(v, m string, r *types.Status) *node { + node := node{ + parent: n, + val: v, + meta: m, + report: r, + } + n.nodes = append(n.nodes, &node) + + return &node +} + +func (n *node) addBranch(v string) *node { + return n.addBranchWithMeta(v, "") +} + +func (n *node) addBranchWithMeta(v, m string) *node { + b := node{ + parent: n, + meta: m, + val: v, + } + n.nodes = append(n.nodes, &b) + + return &b +} + +func (n *node) find(val string) *node { + if n.val == val { + return n + } + for _, node := range n.nodes { + if node.val == val { + return node + } + if v := node.find(val); v != nil { + return v + } + } + + return nil +} + +func (n *node) asBytes() []byte { + var ( + w = new(bytes.Buffer) + levelsEnded []int + max = computeMaxLevel(0, n) + ) + if n.parent == nil { + w.WriteString(n.val) + if n.meta != "" { + w.WriteString(" " + n.meta) + } + fmt.Fprintln(w) + } else { + edge := mid + if len(n.nodes) == 0 { + edge = end + levelsEnded = append(levelsEnded, 0) + } + dumpVals(w, 0, max, levelsEnded, edge, n) + } + if len(n.nodes) > 0 { + dumpNodes(w, 0, max, levelsEnded, n.nodes) + } + + return w.Bytes() +} + +func (n *node) String() string { + return string(n.asBytes()) +} + +func (n *node) lastNode() *node { + c := len(n.nodes) + if c == 0 { + return nil + } + + return n.nodes[c-1] +} + +func computeMaxLevel(level int, n *node) int { + if n == nil || len(n.nodes) == 0 { + return level + } + var max int + for _, n := range n.nodes { + m := computeMaxLevel(level+1, n) + if m > max { + max = m + } + } + + return max +} + +func dumpNodes(w io.Writer, level, maxLevel int, levelsEnded []int, nodes []*node) { + sort.Slice(nodes, func(i, j int) bool { + return nodes[i].val < nodes[j].val + }) + + for i, node := range nodes { + edge := mid + if i == len(nodes)-1 { + levelsEnded = append(levelsEnded, level) + edge = end + } + dumpVals(w, level, maxLevel, levelsEnded, edge, node) + if len(node.nodes) > 0 { + dumpNodes(w, level+1, maxLevel, levelsEnded, node.nodes) + } + } +} + +func dumpVals(w io.Writer, level, maxLevel int, levelsEnded []int, edge decoration, node *node) { + for i := 0; i < level; i++ { + if isEnded(levelsEnded, i) { + fmt.Fprint(w, strings.Repeat(" ", indentSize+1)) + continue + } + fmt.Fprintf(w, "%s%s", link, strings.Repeat(" ", indentSize)) + } + + val := dumpVal(level, node) + if node.meta != "" { + c := maxLevel - level + if c < 0 { + c = 0 + } + fmt.Fprintf(w, "%s %-"+strconv.Itoa(leafMaxWidth+c*2)+"s%s%s\n", edge, val, strings.Repeat(" ", c), node.meta) + return + } + fmt.Fprintf(w, "%s %s\n", edge, val) +} + +func isEnded(levelsEnded []int, level int) bool { + for _, l := range levelsEnded { + if l == level { + return true + } + } + + return false +} + +func dumpVal(level int, node *node) string { + lines := strings.Split(node.val, "\n") + if len(lines) < 2 { + return node.val + } + + pad := indent(level, node) + for i := 1; i < len(lines); i++ { + lines[i] = fmt.Sprintf("%s%s", pad, lines[i]) + } + + return strings.Join(lines, "\n") +} + +func indent(level int, node *node) string { + links := make([]string, level+1) + for node.parent != nil { + if isLast(node) { + links[level] = strings.Repeat(" ", indentSize+1) + } else { + links[level] = fmt.Sprintf("%s%s", link, strings.Repeat(" ", indentSize)) + } + level-- + node = node.parent + } + + return strings.Join(links, "") +} + +func isLast(n *node) bool { + return n == n.parent.lastNode() +} diff --git a/vendor/github.com/cilium/cilium/pkg/health/defaults/defaults.go b/vendor/github.com/cilium/cilium/pkg/health/defaults/defaults.go index d84cecd2c5..d58a98e9fe 100644 --- a/vendor/github.com/cilium/cilium/pkg/health/defaults/defaults.go +++ b/vendor/github.com/cilium/cilium/pkg/health/defaults/defaults.go @@ -20,4 +20,7 @@ const ( // HealthEPName is the name used for the health endpoint, which is also // used by the CLI client to detect when connectivity health is enabled HealthEPName = "cilium-health-ep" + + // PidfilePath + PidfilePath = "health-endpoint.pid" ) diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go deleted file mode 100644 index 860e03be38..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/config.go +++ /dev/null @@ -1,156 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package cell - -import ( - "fmt" - "reflect" - "strings" - - "github.com/mitchellh/mapstructure" - "github.com/spf13/pflag" - "go.uber.org/dig" - - "github.com/cilium/cilium/pkg/command" -) - -// Config constructs a new config cell. -// -// The configuration struct `T` needs to implement the Flags method that -// registers the flags. The structure is populated and provided via dependency -// injection by Hive.Run(). The underlying mechanism for populating the struct -// is viper's Unmarshal(). -func Config[Cfg Flagger](def Cfg) Cell { - c := &config[Cfg]{defaultConfig: def, flags: pflag.NewFlagSet("", pflag.ContinueOnError)} - def.Flags(c.flags) - return c -} - -// Flagger is implemented by configuration structs to provide configuration -// for a cell. -type Flagger interface { - // Flags registers the configuration options as command-line flags. - // - // By convention a flag name matches the field name - // if they're the same under case-insensitive comparison when dashes are - // removed. E.g. "my-config-flag" matches field "MyConfigFlag". The - // correspondence to the flag can be also specified with the mapstructure - // tag: MyConfigFlag `mapstructure:"my-config-flag"`. - // - // Exported fields that are not found from the viper settings will cause - // hive.Run() to fail. Unexported fields are ignored. - // - // See https://pkg.go.dev/github.com/mitchellh/mapstructure for more info. - Flags(*pflag.FlagSet) -} - -// config is a cell for configuration. It registers the config's command-line -// flags and provides the parsed config to the hive. -type config[Cfg Flagger] struct { - defaultConfig Cfg - flags *pflag.FlagSet -} - -type AllSettings map[string]any - -type configParams[Cfg Flagger] struct { - dig.In - AllSettings AllSettings - Override func(*Cfg) `optional:"true"` -} - -func (c *config[Cfg]) provideConfig(p configParams[Cfg]) (Cfg, error) { - settings := p.AllSettings - target := c.defaultConfig - decoder, err := mapstructure.NewDecoder(decoderConfig(&target)) - if err != nil { - return target, fmt.Errorf("failed to create config decoder: %w", err) - } - - // As input, only consider the declared flags. - input := make(map[string]any) - - c.flags.VisitAll(func(f *pflag.Flag) { - if v, ok := settings[f.Name]; ok { - input[f.Name] = v - } else { - err = fmt.Errorf("internal error: %s not found from settings", f.Name) - } - }) - if err != nil { - return target, err - } - if err := decoder.Decode(input); err != nil { - return target, fmt.Errorf("failed to unmarshal config struct %T: %w.\n"+ - "Hint: field 'FooBar' matches flag 'foo-bar', or use tag `mapstructure:\"flag-name\"` to match field with flag", - target, err) - } - - // See if the configuration was overridden with ConfigOverride. We check the override - // after the decode to validate that the config struct is properly formed and all - // flags are registered. - if p.Override != nil { - p.Override(&target) - } - - return target, nil -} - -func decoderConfig(target any) *mapstructure.DecoderConfig { - return &mapstructure.DecoderConfig{ - Metadata: nil, - Result: target, - WeaklyTypedInput: true, - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - stringToMapHookFunc(), - ), - ZeroFields: true, - // Error out if the config struct has fields that are - // not found from input. - ErrorUnset: true, - // Error out also if settings from input are not used. - ErrorUnused: true, - // Match field FooBarBaz with "foo-bar-baz" by removing - // the dashes from the flag. - MatchName: func(mapKey, fieldName string) bool { - return strings.EqualFold( - strings.ReplaceAll(mapKey, "-", ""), - fieldName) - }, - } -} - -func (c *config[Cfg]) Apply(cont container) error { - // Register the flags to the global set of all flags. - err := cont.Invoke( - func(allFlags *pflag.FlagSet) { - allFlags.AddFlagSet(c.flags) - }) - if err != nil { - return err - } - // And provide the constructor for the config. - return cont.Provide(c.provideConfig, dig.Export(true)) -} - -func (c *config[Cfg]) Info(cont container) (info Info) { - cont.Invoke(func(cfg Cfg) { - info = &InfoStruct{cfg} - }) - return -} - -// stringToMapHookFunc returns a DecodeHookFunc that converts string -// to map[string]string supporting both json and KV formats. -func stringToMapHookFunc() mapstructure.DecodeHookFunc { - return func(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) { - if from != reflect.String || to != reflect.Map { - return data, nil - } - - return command.ToStringMapStringE(data.(string)) - } -} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go deleted file mode 100644 index 98388d8f23..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/health.go +++ /dev/null @@ -1,240 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package cell - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - "github.com/cilium/cilium/pkg/lock" - - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" -) - -// Level denotes what kind an update is. -type Level string - -const ( - // StatusUnknown is the default status of a Module, prior to it reporting - // any status. - // All created - StatusUnknown Level = "Unknown" - - // StatusStopped is the status of a Module that has completed, further updates - // will not be processed. - StatusStopped Level = "Stopped" - - // StatusDegraded is the status of a Module that has entered a degraded state. - StatusDegraded Level = "Degraded" - - // StatusOK is the status of a Module that has achieved a desired state. - StatusOK Level = "OK" -) - -// HealthReporter provides a method of declaring a Modules health status. -type HealthReporter interface { - // OK declares that a Module has achieved a desired state and has not entered - // any unexpected or incorrect states. - // Modules should only declare themselves as 'OK' once they have stabilized, - // rather than during their initial state. This should be left to be reported - // as the default "unknown" to denote that the module has not reached a "ready" - // health state. - OK(status string) - - // Stopped reports that a module has completed, and will no longer report any - // health status. - Stopped(reason string) - - // Degraded declares that a module has entered a degraded state. - // This means that it may have failed to provide it's intended services, or - // to perform it's desired task. - Degraded(reason string, err error) -} - -// Health provides exported functions for accessing health status data. -// As well, provides unexported functions for use during module apply. -type Health interface { - // All returns a copy of all module statuses. - // This includes unknown status for modules that have not reported a status yet. - All() []Status - - // Get returns a copy of a modules status, by module ID. - // This includes unknown status for modules that have not reported a status yet. - Get(string) *Status - - // Stop stops the health provider from processing updates. - Stop(context.Context) error - - // forModule creates a moduleID scoped reporter handle. - forModule(string) HealthReporter - - // processed returns the number of updates processed. - processed() uint64 -} - -// Update is an event that denotes the change of a modules health state. -type Update struct { - Level - ModuleID string - Message string - Err error -} - -// Status is a modules last health state, including the last update. -type Status struct { - // Update is the last reported update for a module. - Update - // Stopped is true when a module has been completed, thus it contains - // its last reporter status. New updates will not be processed. - Stopped bool - // Final is the stopped message, if the module has been stopped. - Final string - // LastOK is the time of the last OK status update. - LastOK time.Time - // LastUpdated is the time of the last status update. - LastUpdated time.Time -} - -// String returns a string representation of a Status, implements fmt.Stringer. -func (s *Status) String() string { - var sinceLast string - if s.LastUpdated.IsZero() { - sinceLast = "never" - } else { - sinceLast = time.Since(s.LastUpdated).String() + " ago" - } - return fmt.Sprintf("Status{ModuleID: %s, Level: %s, Since: %s, Message: %s, Err: %v}", - s.ModuleID, s.Level, sinceLast, s.Message, s.Err) -} - -// NewHealthProvider starts and returns a health status which processes -// health status updates. -func NewHealthProvider() Health { - p := &healthProvider{ - moduleStatuses: make(map[string]Status), - running: true, - } - return p -} - -func (p *healthProvider) processed() uint64 { - return p.numProcessed.Load() -} - -func (p *healthProvider) process(u Update) { - prev := func() Status { - p.mu.Lock() - defer p.mu.Unlock() - - t := time.Now() - prev := p.moduleStatuses[u.ModuleID] - - if !p.running { - return prev - } - - ns := Status{ - Update: u, - LastUpdated: t, - } - switch u.Level { - case StatusOK: - ns.LastOK = t - case StatusStopped: - // If Stopped, set that module was stopped and preserve last known status. - ns = prev - ns.Stopped = true - ns.Final = u.Message - } - p.moduleStatuses[u.ModuleID] = ns - log.WithField("status", ns.String()).Debug("Processed new health status") - return prev - }() - p.numProcessed.Add(1) - if prev.Stopped { - log.Warnf("module %q reported health status after being Stopped", u.ModuleID) - } -} - -// Finish stops the status provider, and waits for all updates to be processed or -// returns an error if the context is cancelled first. -func (p *healthProvider) Stop(ctx context.Context) error { - p.mu.Lock() - defer p.mu.Unlock() - p.running = false // following this, no new reporters will send. - return nil -} - -// forModule returns a module scoped status reporter handle for emitting status updates. -// This is used to automatically provide declared modules with a status reported. -func (p *healthProvider) forModule(moduleID string) HealthReporter { - p.mu.Lock() - p.moduleStatuses[moduleID] = Status{Update: Update{ - ModuleID: moduleID, - Level: StatusUnknown, - Message: "No status reported yet"}, - } - p.mu.Unlock() - - return &reporter{ - moduleID: moduleID, - process: p.process, - } -} - -// All returns a copy of all the latest statuses. -func (p *healthProvider) All() []Status { - p.mu.RLock() - all := maps.Values(p.moduleStatuses) - p.mu.RUnlock() - slices.SortFunc(all, func(a, b Status) bool { - return a.ModuleID < b.ModuleID - }) - return all -} - -// Get returns the latest status for a module, by module ID. -func (p *healthProvider) Get(moduleID string) *Status { - p.mu.RLock() - defer p.mu.RUnlock() - s, ok := p.moduleStatuses[moduleID] - if ok { - return &s - } - return nil -} - -type healthProvider struct { - mu lock.RWMutex - - running bool - numProcessed atomic.Uint64 - - moduleStatuses map[string]Status -} - -// reporter is a handle for emitting status updates. -type reporter struct { - moduleID string - process func(Update) -} - -// Degraded reports a degraded status update, should be used when a module encounters a -// a state that is not fully reconciled. -func (r *reporter) Degraded(reason string, err error) { - r.process(Update{ModuleID: r.moduleID, Level: StatusDegraded, Message: reason, Err: err}) -} - -// Stopped reports that a module has stopped, further updates will not be processed. -func (r *reporter) Stopped(reason string) { - r.process(Update{ModuleID: r.moduleID, Level: StatusStopped, Message: reason}) -} - -// OK reports that a module is in a healthy state. -func (r *reporter) OK(status string) { - r.process(Update{ModuleID: r.moduleID, Level: StatusOK, Message: status}) -} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go deleted file mode 100644 index 449c2748a3..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/invoke.go +++ /dev/null @@ -1,73 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package cell - -import ( - "time" - - "github.com/cilium/cilium/pkg/hive/internal" -) - -type invoker struct { - cont container - funcs []namedFunc -} - -type namedFunc struct { - name string - fn any -} - -type InvokerList interface { - AppendInvoke(func() error) -} - -func (i *invoker) invoke() error { - for _, afn := range i.funcs { - log.WithField("function", afn.name).Debug("Invoking") - t0 := time.Now() - if err := i.cont.Invoke(afn.fn); err != nil { - log.WithError(err).WithField("", afn.name).Error("Invoke failed") - return err - } - d := time.Since(t0) - log.WithField("duration", d).WithField("function", afn.name).Info("Invoked") - } - return nil -} - -func (i *invoker) Apply(c container) error { - // Remember the scope in which we need to invoke. - i.cont = c - - // Append the invoker to the list of invoke functions. These are invoked - // prior to start to build up the objects. They are not invoked directly - // here as first the configuration flags need to be registered. This allows - // using hives in a command-line application with many commands and where - // we don't yet know which command to run, but we still need to register - // all the flags. - return c.Invoke(func(l InvokerList) { - l.AppendInvoke(i.invoke) - }) -} - -func (i *invoker) Info(container) Info { - n := NewInfoNode("") - for _, namedFunc := range i.funcs { - n.AddLeaf("🛠️ %s: %s", namedFunc.name, internal.PrettyType(namedFunc.fn)) - } - return n -} - -// Invoke constructs a cell for invoke functions. The invoke functions are executed -// when the hive is started to instantiate all objects via the constructors. -func Invoke(funcs ...any) Cell { - namedFuncs := []namedFunc{} - for _, fn := range funcs { - namedFuncs = append( - namedFuncs, - namedFunc{name: internal.FuncNameAndLocation(fn), fn: fn}) - } - return &invoker{funcs: namedFuncs} -} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/metric.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/metric.go deleted file mode 100644 index 76b9cdbf40..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/metric.go +++ /dev/null @@ -1,138 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package cell - -import ( - "fmt" - "reflect" - - "github.com/prometheus/client_golang/prometheus" - "go.uber.org/dig" - - "github.com/cilium/cilium/pkg/hive/internal" - pkgmetric "github.com/cilium/cilium/pkg/metrics/metric" -) - -var ( - withMeta pkgmetric.WithMetadata - collector prometheus.Collector -) - -// Metric constructs a new metric cell. -// -// This cell type provides `S` to the hive as returned by `ctor`, it also makes each individual field -// value available via the `hive-metrics` value group. Infrastructure components such as a registry, -// inspection tool, or documentation generator can collect all metrics in the hive via this value group. -// -// The `ctor` constructor must return a struct or pointer to a struct of type `S`. The returned struct -// must only contain public fields. All field types should implement the -// `github.com/cilium/cilium/pkg/metrics/metric.WithMetadata` -// and `github.com/prometheus/client_golang/prometheus.Collector` interfaces. -func Metric[S any](ctor func() S) Cell { - var nilOut S - outTyp := reflect.TypeOf(nilOut) - if outTyp.Kind() == reflect.Ptr { - outTyp = outTyp.Elem() - } - - if outTyp.Kind() != reflect.Struct { - panic(fmt.Errorf( - "cell.Metric must be invoked with a constructor function that returns a struct or pointer to a struct, "+ - "a constructor which returns a %s was supplied", - outTyp.Kind(), - )) - } - - // Let's be strict for now, could lift this in the future if we ever need to - if outTyp.NumField() == 0 { - panic(fmt.Errorf( - "cell.Metric must be invoked with a constructor function that returns exactly a struct with at least 1 " + - "metric, a constructor which returns a struct with zero fields was supplied", - )) - } - - withMetaTyp := reflect.TypeOf(&withMeta).Elem() - collectorTyp := reflect.TypeOf(&collector).Elem() - for i := 0; i < outTyp.NumField(); i++ { - field := outTyp.Field(i) - if !field.IsExported() { - panic(fmt.Errorf( - "The struct returned by the constructor passed to cell.Metric has a private field '%s', which "+ - "is not allowed. All fields on the returning struct must be exported", - field.Name, - )) - } - - if !field.Type.Implements(withMetaTyp) { - panic(fmt.Errorf( - "The struct returned by the constructor passed to cell.Metric has a field '%s', which is not metric.WithMetadata.", - field.Name, - )) - } - - if !field.Type.Implements(collectorTyp) { - panic(fmt.Errorf( - "The struct returned by the constructor passed to cell.Metric has a field '%s', which is not prometheus.Collector.", - field.Name, - )) - } - } - - return &metric[S]{ - ctor: ctor, - } -} - -type metric[S any] struct { - ctor func() S -} - -type metricOut struct { - dig.Out - - Metrics []pkgmetric.WithMetadata `group:"hive-metrics,flatten"` -} - -func (m *metric[S]) provideMetrics(metricSet S) metricOut { - var metrics []pkgmetric.WithMetadata - - value := reflect.ValueOf(metricSet) - typ := value.Type() - if typ.Kind() == reflect.Pointer { - value = value.Elem() - typ = typ.Elem() - } - - if typ.Kind() != reflect.Struct { - return metricOut{} - } - - for i := 0; i < typ.NumField(); i++ { - if withMeta, ok := value.Field(i).Interface().(pkgmetric.WithMetadata); ok { - metrics = append(metrics, withMeta) - } - } - - return metricOut{ - Metrics: metrics, - } -} - -func (m *metric[S]) Info(container) Info { - n := NewInfoNode(fmt.Sprintf("📈 %s", internal.FuncNameAndLocation(m.ctor))) - n.condensed = true - - return n -} - -func (m *metric[S]) Apply(container container) error { - // Provide the supplied constructor, so its return type is directly accessible by cells - container.Provide(m.ctor, dig.Export(true)) - - // Provide the metrics provider, which will take the return value of the constructor and turn it into a - // slice of metrics to be consumed by anyone interested in handling them. - container.Provide(m.provideMetrics, dig.Export(true)) - - return nil -} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/module.go b/vendor/github.com/cilium/cilium/pkg/hive/cell/module.go deleted file mode 100644 index 74fa5f98e1..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/module.go +++ /dev/null @@ -1,90 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package cell - -import ( - "fmt" - "regexp" - - "github.com/sirupsen/logrus" - "go.uber.org/dig" - - "github.com/cilium/cilium/pkg/logging/logfields" -) - -// Module creates a scoped set of cells with a given identifier. -// -// The id and title will be included in the object dump (hive.PrintObjects). -// The id must be lower-case, at most 30 characters and only contain [a-z0-9-_]. -// Title can contain [a-zA-Z0-9_- ] and must be shorter than 80 characters. -// -// Private constructors with a module (ProvidePrivate) are only accessible -// within this module and its sub-modules. -func Module(id, title string, cells ...Cell) Cell { - validateIDAndTitle(id, title) - return &module{id, title, cells} -} - -var ( - idRegex = regexp.MustCompile(`^[a-z][a-z0-9_\-]{1,30}$`) - titleRegex = regexp.MustCompile(`^[a-zA-Z0-9_\- ]{1,80}$`) -) - -func validateIDAndTitle(id, title string) { - if !idRegex.MatchString(id) { - panic(fmt.Sprintf("Invalid hive.Module id: %q, expected to id match %s", id, idRegex)) - } - if !titleRegex.MatchString(title) { - panic(fmt.Sprintf("Invalid hive.Module title: %q, expected to title match %s", title, titleRegex)) - } -} - -type module struct { - // id is the module identity. It is shown in object output and is used to derive - // the scoped logger. - id string - - // title is a human-readable short title for the module. Shown in object output - // alongside the identifier. - title string - - cells []Cell -} - -func (m *module) logger(log logrus.FieldLogger) logrus.FieldLogger { - return log.WithField(logfields.LogSubsys, m.id) -} - -func (m *module) moduleScopedStatusReporter(p Health) HealthReporter { - return p.forModule(m.id) -} - -func (m *module) Apply(c container) error { - scope := c.Scope(m.id) - - // Provide module scoped status reporter, used for reporting module level - // health status. - if err := scope.Provide(m.moduleScopedStatusReporter, dig.Export(false)); err != nil { - return err - } - - if err := scope.Decorate(m.logger); err != nil { - return err - } - - for _, cell := range m.cells { - if err := cell.Apply(scope); err != nil { - return err - } - } - return nil -} - -func (m *module) Info(c container) Info { - n := NewInfoNode("Ⓜ️ " + m.id + " (" + m.title + ")") - for _, cell := range m.cells { - n.Add(cell.Info(c)) - } - return n -} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/health/types/types.go b/vendor/github.com/cilium/cilium/pkg/hive/health/types/types.go new file mode 100644 index 0000000000..704611896c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/hive/health/types/types.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package types + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/cilium/hive/cell" +) + +// Provider has functionality to create health reporters, scoped a +// module. +type Provider interface { + ForModule(mid cell.FullModuleID) cell.Health +} + +type pathIdent []string + +func (p pathIdent) String() string { + if len(p) == 0 { + return "" + } + return strings.Join(p, ".") +} + +// HealthID is used as the key for the primary index for health status +// tables. +type HealthID string + +func (id HealthID) String() string { + return string(id) +} + +// Identifier is a fully qualified, path based identifier for health status +// which is made up of module ID and component ID parts. +type Identifier struct { + Module cell.FullModuleID + Component pathIdent +} + +// WithSubComponent returns view of an identifier with an appended +// subcomponent. +func (i Identifier) WithSubComponent(name string) Identifier { + return Identifier{ + Module: i.Module, + Component: append(i.Component, name), + } +} + +func (i Identifier) String() string { + return strings.Join([]string{i.Module.String(), i.Component.String()}, ".") +} + +func (i Identifier) HealthID() HealthID { + return HealthID(i.String()) +} + +// Status represents a current health status update. +type Status struct { + ID Identifier + Level Level + Message string + Error string + LastOK time.Time + Updated time.Time + Stopped time.Time + // Final is the final message set when a status is stopped. + Final string + Count uint64 +} + +func (Status) TableHeader() []string { + return []string{"Module", "Component", "Level", "Message", "Error", "LastOK", "UpdatedAt", "Count"} +} + +func (s Status) TableRow() []string { + return []string{ + s.ID.Module.String(), + s.ID.Component.String(), + string(s.Level), + s.Message, + s.Error, + s.LastOK.Format(time.RFC3339), + s.Updated.Format(time.RFC3339), + strconv.FormatUint(s.Count, 10), + } +} + +func (s Status) String() string { + if s.Error != "" { + return fmt.Sprintf("%s: [%s] %s: %s", s.ID.String(), s.Level, s.Message, s.Error) + } else { + return fmt.Sprintf("%s: [%s] %s", s.ID.String(), s.Level, s.Message) + } +} + +type Level string + +func (s Level) String() string { + return string(s) +} + +const ( + LevelOK = "OK" + LevelDegraded = "Degraded" + LevelStopped = "Stopped" +) diff --git a/vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go b/vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go deleted file mode 100644 index c216e41db0..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/hive/internal/reflect.go +++ /dev/null @@ -1,36 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package internal - -import ( - "fmt" - "path" - "reflect" - "regexp" - "runtime" - "strings" -) - -var ( - baseNameRegex = regexp.MustCompile(`^github\.com/cilium/cilium/[\w\/]+/`) -) - -func TrimName(name string) string { - return string(baseNameRegex.ReplaceAll([]byte(name), []byte{})) -} - -func PrettyType(x any) string { - return TrimName(fmt.Sprintf("%T", x)) -} - -func FuncNameAndLocation(fn any) string { - f := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()) - file, line := f.FileLine(f.Entry()) - name := TrimName(f.Name()) - name = strings.TrimSuffix(name, "-fm") - if file != "" { - return fmt.Sprintf("%s (%s:%d)", name, path.Base(file), line) - } - return name -} diff --git a/vendor/github.com/cilium/cilium/pkg/ip/cidr.go b/vendor/github.com/cilium/cilium/pkg/ip/cidr.go index eae17bc5fb..93481dae63 100644 --- a/vendor/github.com/cilium/cilium/pkg/ip/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/ip/cidr.go @@ -6,6 +6,8 @@ package ip import ( "net" "net/netip" + + "go4.org/netipx" ) // ParseCIDRs fetches all CIDRs referred to by the specified slice and returns @@ -57,26 +59,27 @@ func ParsePrefixes(cidrs []string) (valid []netip.Prefix, invalid []string, erro return valid, invalid, errors } -// AddrToIPNet is a convenience helper to convert a netip.Addr to a *net.IPNet -// with a mask corresponding to the addresses's bit length. -func AddrToIPNet(addr netip.Addr) *net.IPNet { - if !addr.IsValid() { - return nil - } - return &net.IPNet{ - IP: addr.AsSlice(), - Mask: net.CIDRMask(addr.BitLen(), addr.BitLen()), - } -} - // IPToNetPrefix is a convenience helper for migrating from the older 'net' // standard library types to the newer 'netip' types. Use this to plug the new // types in newer code into older types in older code during the migration. // // Note: This function assumes given ip is not an IPv4 mapped IPv6 address. -// See the comment of AddrFromIP for more details. +// +// The problem behind this is that when we convert the IPv4 net.IP address with +// netip.AddrFromSlice, the address is interpreted as an IPv4 mapped IPv6 address in some +// cases. +// +// For example, when we do netip.AddrFromSlice(net.ParseIP("1.1.1.1")), it is interpreted +// as an IPv6 address "::ffff:1.1.1.1". This is because 1) net.IP created with +// net.ParseIP(IPv4 string) holds IPv4 address as an IPv4 mapped IPv6 address internally +// and 2) netip.AddrFromSlice recognizes address family with length of the slice (4-byte = +// IPv4 and 16-byte = IPv6). +// +// By using netipx.FromStdIP, we can preserve the address family, but since we cannot distinguish +// IPv4 and IPv4 mapped IPv6 address only from net.IP value (see #37921 on golang/go) we +// need an assumption that given net.IP is not an IPv4 mapped IPv6 address. func IPToNetPrefix(ip net.IP) netip.Prefix { - a, ok := AddrFromIP(ip) + a, ok := netipx.FromStdIP(ip) if !ok { return netip.Prefix{} } diff --git a/vendor/github.com/cilium/cilium/pkg/ip/ip.go b/vendor/github.com/cilium/cilium/pkg/ip/ip.go index 14ef09eb14..7f457f340c 100644 --- a/vendor/github.com/cilium/cilium/pkg/ip/ip.go +++ b/vendor/github.com/cilium/cilium/pkg/ip/ip.go @@ -9,9 +9,10 @@ import ( "math/big" "net" "net/netip" + "slices" "sort" - "github.com/cilium/cilium/pkg/slices" + "go4.org/netipx" ) const ( @@ -291,20 +292,27 @@ func PrefixCeil(numIPs int, multiple int) int { return quotient } -// PrefixToIps converts the given prefix to an array containing all IPs in the prefix / CIDR block. -func PrefixToIps(prefixCidr string) ([]string, error) { +// PrefixToIps converts the given prefix to an array containing IPs in the provided +// prefix/CIDR block. When maxIPs is set to 0, the returned array will contain all IPs +// in the given prefix. Otherwise, the returned array of IPs will be limited to the +// value of maxIPs starting at the first IP in the provided CIDR. For example, when +// providing 192.168.1.0/28 as a CIDR with 4 maxIPs, 192.168.1.0, 192.168.1.1, +// 192.168.1.2, 192.168.1.3 will be returned. +func PrefixToIps(prefixCidr string, maxIPs int) ([]string, error) { var prefixIps []string _, ipNet, err := net.ParseCIDR(prefixCidr) if err != nil { return prefixIps, err } netWithRange := ipNetToRange(*ipNet) - for ip := *netWithRange.First; !ip.Equal(*netWithRange.Last); ip = GetNextIP(ip) { + // Ensure last IP in the prefix is included + for ip := *netWithRange.First; len(prefixIps) < maxIPs || maxIPs == 0; ip = getNextIP(ip) { prefixIps = append(prefixIps, ip.String()) + if ip.Equal(*netWithRange.Last) { + break + } } - // Add the last IP - prefixIps = append(prefixIps, netWithRange.Last.String()) return prefixIps, nil } @@ -361,9 +369,9 @@ func getPreviousIP(ip net.IP) net.IP { return previousIP } -// GetNextIP returns the next IP from the given IP address. If the given IP is +// getNextIP returns the next IP from the given IP address. If the given IP is // the last IP of a v4 or v6 range, the same IP is returned. -func GetNextIP(ip net.IP) net.IP { +func getNextIP(ip net.IP) net.IP { if ip.Equal(upperIPv4) || ip.Equal(upperIPv6) { return ip } @@ -588,7 +596,7 @@ func rangeToCIDRs(firstIP, lastIP net.IP) []*net.IPNet { if bytes.Compare(*lastIPSpanning, lastIP) > 0 { // Split on the next IP of the last IP so that the left list of IPs // of the partition include the lastIP. - nextFirstRangeIP := GetNextIP(lastIP) + nextFirstRangeIP := getNextIP(lastIP) var bitLen int if nextFirstRangeIP.To4() != nil { bitLen = ipv4BitLen @@ -739,36 +747,13 @@ func PartitionCIDR(targetCIDR net.IPNet, excludeCIDR net.IPNet) ([]*net.IPNet, [ return left, excludeList, right } -// KeepUniqueIPs transforms the provided multiset of IPs into a single set, -// lexicographically sorted via a byte-wise comparison of the IP slices (i.e. -// IPv4 addresses show up before IPv6). -// The slice is manipulated in-place destructively. -func KeepUniqueIPs(ips []net.IP) []net.IP { - return slices.SortedUniqueFunc( - ips, - func(i, j int) bool { - return bytes.Compare(ips[i], ips[j]) == -1 - }, - func(a, b net.IP) bool { - return a.Equal(b) - }, - ) -} - // KeepUniqueAddrs transforms the provided multiset of IP addresses into a // single set, lexicographically sorted via comparison of the addresses using // netip.Addr.Compare (i.e. IPv4 addresses show up before IPv6). // The slice is manipulated in-place destructively; it does not create a new slice. func KeepUniqueAddrs(addrs []netip.Addr) []netip.Addr { - return slices.SortedUniqueFunc( - addrs, - func(i, j int) bool { - return addrs[i].Compare(addrs[j]) < 0 - }, - func(a, b netip.Addr) bool { - return a == b - }, - ) + SortAddrList(addrs) + return slices.Compact(addrs) } var privateIPBlocks []*net.IPNet @@ -854,30 +839,34 @@ func ListContainsIP(ipList []net.IP, ip net.IP) bool { // SortIPList sorts the provided net.IP slice in place. func SortIPList(ipList []net.IP) { - sort.Slice(ipList, func(i, j int) bool { - return bytes.Compare(ipList[i], ipList[j]) < 0 - }) + slices.SortFunc(ipList, func(a, b net.IP) int { return bytes.Compare(a, b) }) +} + +func SortAddrList(ipList []netip.Addr) { + slices.SortFunc(ipList, netip.Addr.Compare) } // getSortedIPList returns a new net.IP slice in which the IPs are sorted. func getSortedIPList(ipList []net.IP) []net.IP { sortedIPList := make([]net.IP, len(ipList)) - for i := 0; i < len(ipList); i++ { - sortedIPList[i] = ipList[i] - } - + copy(sortedIPList, ipList) SortIPList(sortedIPList) + return sortedIPList } -// SortedIPListsAreEqual compares two lists of sorted IPs. If any differ it returns -// false. -func SortedIPListsAreEqual(a, b []net.IP) bool { +// UnsortedIPListsAreEqual returns true if the list of net.IP provided is same +// without considering the order of the IPs in the list. The function will first +// attempt to sort both the IP lists and then validate equality for sorted lists. +func UnsortedIPListsAreEqual(ipList1, ipList2 []net.IP) bool { // The IP set is definitely different if the lengths are different. - if len(a) != len(b) { + if len(ipList1) != len(ipList2) { return false } + a := getSortedIPList(ipList1) + b := getSortedIPList(ipList2) + // Lengths are equal, so each member in one set must be in the other // If any IPs at the same index differ the sorted IP list are not equal. for i := range a { @@ -888,21 +877,6 @@ func SortedIPListsAreEqual(a, b []net.IP) bool { return true } -// UnsortedIPListsAreEqual returns true if the list of net.IP provided is same -// without considering the order of the IPs in the list. The function will first -// attempt to sort both the IP lists and then validate equality for sorted lists. -func UnsortedIPListsAreEqual(ipList1, ipList2 []net.IP) bool { - // The IP set is definitely different if the lengths are different. - if len(ipList1) != len(ipList2) { - return false - } - - sortedIPList1 := getSortedIPList(ipList1) - sortedIPList2 := getSortedIPList(ipList2) - - return SortedIPListsAreEqual(sortedIPList1, sortedIPList2) -} - // GetIPFromListByFamily returns a single IP address of the provided family from a list // of ip addresses. func GetIPFromListByFamily(ipList []net.IP, v4Family bool) net.IP { @@ -915,48 +889,13 @@ func GetIPFromListByFamily(ipList []net.IP, v4Family bool) net.IP { return nil } -// AddrFromIP converts a net.IP to netip.Addr using netip.AddrFromSlice, but preserves -// the original address family. It assumes given net.IP is not an IPv4 mapped IPv6 -// address. -// -// The problem behind this is that when we convert the IPv4 net.IP address with -// netip.AddrFromSlice, the address is interpreted as an IPv4 mapped IPv6 address in some -// cases. -// -// For example, when we do netip.AddrFromSlice(net.ParseIP("1.1.1.1")), it is interpreted -// as an IPv6 address "::ffff:1.1.1.1". This is because 1) net.IP created with -// net.ParseIP(IPv4 string) holds IPv4 address as an IPv4 mapped IPv6 address internally -// and 2) netip.AddrFromSlice recognizes address family with length of the slice (4-byte = -// IPv4 and 16-byte = IPv6). -// -// By using AddrFromIP, we can preserve the address family, but since we cannot distinguish -// IPv4 and IPv4 mapped IPv6 address only from net.IP value (see #37921 on golang/go) we -// need an assumption that given net.IP is not an IPv4 mapped IPv6 address. -func AddrFromIP(ip net.IP) (netip.Addr, bool) { - addr, ok := netip.AddrFromSlice(ip) - if !ok { - return addr, ok - } - return addr.Unmap(), ok -} - -// MustAddrFromIP is the same as AddrFromIP except that it assumes the input is -// a valid IP address and always returns a valid netip.Addr. -func MustAddrFromIP(ip net.IP) netip.Addr { - addr, ok := AddrFromIP(ip) - if !ok { - panic("addr is not a valid IP address") - } - return addr -} - // MustAddrsFromIPs converts a slice of net.IP to a slice of netip.Addr. It assumes // the input slice contains only valid IP addresses and always returns a slice // containing valid netip.Addr. func MustAddrsFromIPs(ips []net.IP) []netip.Addr { addrs := make([]netip.Addr, 0, len(ips)) for _, ip := range ips { - addrs = append(addrs, MustAddrFromIP(ip)) + addrs = append(addrs, netipx.MustFromStdIP(ip)) } return addrs } diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go b/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go index 2d82d82bce..90f2b9bb7c 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go @@ -23,9 +23,6 @@ const ( // option.IPAM IPAMClusterPool = "cluster-pool" - // IPAMClusterPoolV2 is the value to select cluster pool version 2 - IPAMClusterPoolV2 = "cluster-pool-v2beta" - // IPAMMultiPool is the value to select the multi pool IPAM mode IPAMMultiPool = "multi-pool" @@ -49,6 +46,3 @@ const ( // prefixes. Every /28 prefix contains 16 IP addresses. // See https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-prefix-eni.html#ec2-prefix-basics for more details const ENIPDBlockSizeIPv4 = 16 - -// PoolDefault is the default IP pool from which to allocate. -const PoolDefault = "default" diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go b/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go index e85b7fa0d5..5f938642ee 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/types/types.go @@ -5,6 +5,7 @@ package types import ( "fmt" + "net/netip" "github.com/cilium/cilium/pkg/cidr" "github.com/cilium/cilium/pkg/lock" @@ -56,6 +57,19 @@ type AllocationMap map[string]AllocationIP // +kubebuilder:validation:Format=cidr type IPAMPodCIDR string +func (c *IPAMPodCIDR) ToPrefix() (*netip.Prefix, error) { + if c == nil { + return nil, fmt.Errorf("nil ipam cidr") + } + + prefix, err := netip.ParsePrefix(string(*c)) + if err != nil { + return nil, fmt.Errorf("failed to parse ipam cidr %v: %w", c, err) + } + + return &prefix, nil +} + // IPAMPoolAllocation describes an allocation of an IPAM pool from the operator to the // node. It contains the assigned PodCIDRs allocated from this pool type IPAMPoolAllocation struct { @@ -105,13 +119,20 @@ type IPAMPoolSpec struct { // // This structure is embedded into v2.CiliumNode type IPAMSpec struct { - // Pool is the list of IPs available to the node for allocation. When - // an IP is used, the IP will remain on this list but will be added to + // Pool is the list of IPv4 addresses available to the node for allocation. + // When an IPv4 address is used, it will remain on this list but will be added to // Status.IPAM.Used // // +optional Pool AllocationMap `json:"pool,omitempty"` + // IPv6Pool is the list of IPv6 addresses available to the node for allocation. + // When an IPv6 address is used, it will remain on this list but will be added to + // Status.IPAM.IPv6Used + // + // +optional + IPv6Pool AllocationMap `json:"ipv6-pool,omitempty"` + // Pools contains the list of assigned IPAM pools for this node. // // +optional @@ -158,27 +179,15 @@ type IPAMSpec struct { // +kubebuilder:validation:Minimum=0 MaxAboveWatermark int `json:"max-above-watermark,omitempty"` - // PodCIDRAllocationThreshold defines the minimum number of free IPs which - // must be available to this node via its pod CIDR pool. If the total number - // of IP addresses in the pod CIDR pool is less than this value, the pod - // CIDRs currently in-use by this node will be marked as depleted and - // cilium-operator will allocate a new pod CIDR to this node. - // This value effectively defines the buffer of IP addresses available - // immediately without requiring cilium-operator to get involved. + // StaticIPTags are used to determine the pool of IPs from which to + // attribute a static IP to the node. For example in AWS this is used to + // filter Elastic IP Addresses. // - // +kubebuilder:validation:Minimum=0 - PodCIDRAllocationThreshold int `json:"pod-cidr-allocation-threshold,omitempty"` - - // PodCIDRReleaseThreshold defines the maximum number of free IPs which may - // be available to this node via its pod CIDR pool. While the total number - // of free IP addresses in the pod CIDR pool is larger than this value, - // cilium-agent will attempt to release currently unused pod CIDRs. - // - // +kubebuilder:validation:Minimum=0 - PodCIDRReleaseThreshold int `json:"pod-cidr-release-threshold,omitempty"` + // +optional + StaticIPTags map[string]string `json:"static-ip-tags,omitempty"` } -// IPReleaseStatus defines the valid states in IP release handshake +// IPReleaseStatus defines the valid states in IP release handshake // // +kubebuilder:validation:Enum=marked-for-release;ready-for-release;do-not-release;released type IPReleaseStatus string @@ -187,12 +196,18 @@ type IPReleaseStatus string // // This structure is embedded into v2.CiliumNode type IPAMStatus struct { - // Used lists all IPs out of Spec.IPAM.Pool which have been allocated + // Used lists all IPv4 addresses out of Spec.IPAM.Pool which have been allocated // and are in use. // // +optional Used AllocationMap `json:"used,omitempty"` + // IPv6Used lists all IPv6 addresses out of Spec.IPAM.IPv6Pool which have been + // allocated and are in use. + // + // +optional + IPv6Used AllocationMap `json:"ipv6-used,omitempty"` + // PodCIDRs lists the status of each pod CIDR allocated to this node. // // +optional @@ -203,8 +218,8 @@ type IPAMStatus struct { // +optional OperatorStatus OperatorStatus `json:"operator-status,omitempty"` - // ReleaseIPs tracks the state for every IP considered for release. - // value can be one of the following string : + // ReleaseIPs tracks the state for every IPv4 address considered for release. + // The value can be one of the following strings: // * marked-for-release : Set by operator as possible candidate for IP // * ready-for-release : Acknowledged as safe to release by agent // * do-not-release : IP already in use / not owned by the node. Set by agent @@ -212,6 +227,21 @@ type IPAMStatus struct { // // +optional ReleaseIPs map[string]IPReleaseStatus `json:"release-ips,omitempty"` + + // ReleaseIPv6s tracks the state for every IPv6 address considered for release. + // The value can be one of the following strings: + // * marked-for-release : Set by operator as possible candidate for IP + // * ready-for-release : Acknowledged as safe to release by agent + // * do-not-release : IP already in use / not owned by the node. Set by agent + // * released : IP successfully released. Set by operator + // + // +optional + ReleaseIPv6s map[string]IPReleaseStatus `json:"release-ipv6s,omitempty"` + + // AssignedStaticIP is the static IP assigned to the node (ex: public Elastic IP address in AWS) + // + // +optional + AssignedStaticIP string `json:"assigned-static-ip,omitempty"` } // IPAMPoolRequest is a request from the agent to the operator, indicating how @@ -279,19 +309,26 @@ type Subnet struct { // Name is the subnet name Name string - // CIDR is the CIDR associated with the subnet + // CIDR is the IPv4 CIDR associated with the subnet CIDR *cidr.CIDR + // IPv6CIDR is the IPv6 CIDR associated with the subnet + IPv6CIDR *cidr.CIDR + // AvailabilityZone is the availability zone of the subnet AvailabilityZone string // VirtualNetworkID is the virtual network the subnet is in VirtualNetworkID string - // AvailableAddresses is the number of addresses available for + // AvailableAddresses is the number of IPv4 addresses available for // allocation AvailableAddresses int + // AvailableIPv6Addresses is the number of IPv6 addresses available for + // allocation + AvailableIPv6Addresses int + // Tags is the tags of the subnet Tags Tags } @@ -330,6 +367,9 @@ type VirtualNetwork struct { // CIDRs is the list of secondary IPv4 CIDR ranges associated with the VPC CIDRs []string + + // IPv6CIDRs is the list of IPv6 CIDR ranges associated with the VPC + IPv6CIDRs []string } // VirtualNetworkMap indexes virtual networks by their ID @@ -351,6 +391,9 @@ type PoolQuota struct { // AvailableIPs is the number of available IPs in the pool AvailableIPs int + + // AvailableIPv6s is the number of available IPv6 addresses in the pool + AvailableIPv6s int } // PoolQuotaMap is a map of pool quotas indexes by pool identifier @@ -366,6 +409,9 @@ type Interface interface { // ForeachAddress must iterate over all addresses of the interface and // call fn for each address ForeachAddress(instanceID string, fn AddressIterator) error + + // DeepCopyInterface returns a deep copy of the underlying interface type. + DeepCopyInterface() Interface } // InterfaceRevision is the configurationr revision of a network interface. It @@ -410,6 +456,13 @@ func NewInstanceMap() *InstanceMap { return &InstanceMap{data: map[string]*Instance{}} } +// UpdateInstance updates the interfaces map for a particular instance. +func (m *InstanceMap) UpdateInstance(instanceID string, instance *Instance) { + m.mutex.Lock() + m.data[instanceID] = instance + m.mutex.Unlock() +} + // Update updates the definition of an interface for a particular instance. If // the interface is already known, the definition is updated, otherwise the // interface is added to the instance. @@ -540,6 +593,7 @@ func (m *InstanceMap) DeepCopy() *InstanceMap { c := NewInstanceMap() m.ForeachInterface("", func(instanceID, interfaceID string, rev InterfaceRevision) error { // c is not exposed yet, we can access it without locking it + rev.Resource = rev.Resource.DeepCopyInterface() c.updateLocked(instanceID, rev) return nil }) diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go index dc090499f1..b0af2cd053 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepcopy.go @@ -138,12 +138,26 @@ func (in *IPAMSpec) DeepCopyInto(out *IPAMSpec) { (*out)[key] = val } } + if in.IPv6Pool != nil { + in, out := &in.IPv6Pool, &out.IPv6Pool + *out = make(AllocationMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } in.Pools.DeepCopyInto(&out.Pools) if in.PodCIDRs != nil { in, out := &in.PodCIDRs, &out.PodCIDRs *out = make([]string, len(*in)) copy(*out, *in) } + if in.StaticIPTags != nil { + in, out := &in.StaticIPTags, &out.StaticIPTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -167,6 +181,13 @@ func (in *IPAMStatus) DeepCopyInto(out *IPAMStatus) { (*out)[key] = val } } + if in.IPv6Used != nil { + in, out := &in.IPv6Used, &out.IPv6Used + *out = make(AllocationMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } if in.PodCIDRs != nil { in, out := &in.PodCIDRs, &out.PodCIDRs *out = make(PodCIDRMap, len(*in)) @@ -182,6 +203,13 @@ func (in *IPAMStatus) DeepCopyInto(out *IPAMStatus) { (*out)[key] = val } } + if in.ReleaseIPv6s != nil { + in, out := &in.ReleaseIPv6s, &out.ReleaseIPv6s + *out = make(map[string]IPReleaseStatus, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -310,6 +338,10 @@ func (in *Subnet) DeepCopyInto(out *Subnet) { in, out := &in.CIDR, &out.CIDR *out = (*in).DeepCopy() } + if in.IPv6CIDR != nil { + in, out := &in.IPv6CIDR, &out.IPv6CIDR + *out = (*in).DeepCopy() + } if in.Tags != nil { in, out := &in.Tags, &out.Tags *out = make(Tags, len(*in)) @@ -390,6 +422,11 @@ func (in *VirtualNetwork) DeepCopyInto(out *VirtualNetwork) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.IPv6CIDRs != nil { + in, out := &in.IPv6CIDRs, &out.IPv6CIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go index b27fd1c459..d0065ee549 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/types/zz_generated.deepequal.go @@ -171,6 +171,13 @@ func (in *IPAMSpec) DeepEqual(other *IPAMSpec) bool { } } + if ((in.IPv6Pool != nil) && (other.IPv6Pool != nil)) || ((in.IPv6Pool == nil) != (other.IPv6Pool == nil)) { + in, other := &in.IPv6Pool, &other.IPv6Pool + if other == nil || !in.DeepEqual(other) { + return false + } + } + if !in.Pools.DeepEqual(&other.Pools) { return false } @@ -204,11 +211,25 @@ func (in *IPAMSpec) DeepEqual(other *IPAMSpec) bool { if in.MaxAboveWatermark != other.MaxAboveWatermark { return false } - if in.PodCIDRAllocationThreshold != other.PodCIDRAllocationThreshold { - return false - } - if in.PodCIDRReleaseThreshold != other.PodCIDRReleaseThreshold { - return false + if ((in.StaticIPTags != nil) && (other.StaticIPTags != nil)) || ((in.StaticIPTags == nil) != (other.StaticIPTags == nil)) { + in, other := &in.StaticIPTags, &other.StaticIPTags + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if inValue != otherValue { + return false + } + } + } + } } return true @@ -228,6 +249,13 @@ func (in *IPAMStatus) DeepEqual(other *IPAMStatus) bool { } } + if ((in.IPv6Used != nil) && (other.IPv6Used != nil)) || ((in.IPv6Used == nil) != (other.IPv6Used == nil)) { + in, other := &in.IPv6Used, &other.IPv6Used + if other == nil || !in.DeepEqual(other) { + return false + } + } + if ((in.PodCIDRs != nil) && (other.PodCIDRs != nil)) || ((in.PodCIDRs == nil) != (other.PodCIDRs == nil)) { in, other := &in.PodCIDRs, &other.PodCIDRs if other == nil || !in.DeepEqual(other) { @@ -260,6 +288,31 @@ func (in *IPAMStatus) DeepEqual(other *IPAMStatus) bool { } } + if ((in.ReleaseIPv6s != nil) && (other.ReleaseIPv6s != nil)) || ((in.ReleaseIPv6s == nil) != (other.ReleaseIPv6s == nil)) { + in, other := &in.ReleaseIPv6s, &other.ReleaseIPv6s + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if inValue != otherValue { + return false + } + } + } + } + } + + if in.AssignedStaticIP != other.AssignedStaticIP { + return false + } + return true } @@ -351,6 +404,9 @@ func (in *PoolQuota) DeepEqual(other *PoolQuota) bool { if in.AvailableIPs != other.AvailableIPs { return false } + if in.AvailableIPv6s != other.AvailableIPv6s { + return false + } return true } @@ -400,6 +456,14 @@ func (in *Subnet) DeepEqual(other *Subnet) bool { } } + if (in.IPv6CIDR == nil) != (other.IPv6CIDR == nil) { + return false + } else if in.IPv6CIDR != nil { + if !in.IPv6CIDR.DeepEqual(other.IPv6CIDR) { + return false + } + } + if in.AvailabilityZone != other.AvailabilityZone { return false } @@ -409,6 +473,9 @@ func (in *Subnet) DeepEqual(other *Subnet) bool { if in.AvailableAddresses != other.AvailableAddresses { return false } + if in.AvailableIPv6Addresses != other.AvailableIPv6Addresses { + return false + } if ((in.Tags != nil) && (other.Tags != nil)) || ((in.Tags == nil) != (other.Tags == nil)) { in, other := &in.Tags, &other.Tags if other == nil || !in.DeepEqual(other) { @@ -497,6 +564,23 @@ func (in *VirtualNetwork) DeepEqual(other *VirtualNetwork) bool { } } + if ((in.IPv6CIDRs != nil) && (other.IPv6CIDRs != nil)) || ((in.IPv6CIDRs == nil) != (other.IPv6CIDRs == nil)) { + in, other := &in.IPv6CIDRs, &other.IPv6CIDRs + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + return true } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go index aa9c0318b1..98b457073f 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/const.go @@ -30,19 +30,16 @@ const ( // running in PolicyLabelCluster = LabelPrefix + ".policy.cluster" - // PolicyLabelIstioSidecarProxy is the label key added to the identity of - // any pod that has been injected by Istio with a Cilium-compatible sidecar - // proxy. The value of this label is expected to be a boolean, i.e. "true" - // or "false". - PolicyLabelIstioSidecarProxy = LabelPrefix + ".policy.istiosidecarproxy" - // PodNamespaceMetaLabels is the label used to store the labels of the // kubernetes namespace's labels. PodNamespaceMetaLabels = LabelPrefix + ".namespace.labels" + // PodNamespaceMetaLabelsPrefix is the prefix used for kubernetes namespace's labels + PodNamespaceMetaLabelsPrefix = PodNamespaceMetaLabels + "." + // PodNamespaceMetaNameLabel is the label that Kubernetes automatically adds // to namespaces. - PodNamespaceMetaNameLabel = PodNamespaceMetaLabels + "." + LabelMetadataName + PodNamespaceMetaNameLabel = PodNamespaceMetaLabelsPrefix + LabelMetadataName // LabelMetadataName is the label name which, in-tree, is used to // automatically label namespaces, so they can be selected easily by tools @@ -61,9 +58,24 @@ const ( // documentation add the label for every resource object. AppKubernetes = "app.kubernetes.io" - // CtrlPrefixPolicyStatus is the prefix used for the controllers set up - // to sync the CNP with kube-apiserver. - CtrlPrefixPolicyStatus = "sync-cnp-policy-status" + // StatefulSetPodNameLabel is the label name which, in-tree, is used to + // automatically label Pods that are owned by StatefulSets with their name, + // so that one can attach a Service to a specific Pod in the StatefulSet. + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" + + // StatefulSetPodIndexLabel is the label name which, in-tree, is used to + // automatically label Pods that are owned by StatefulSets with their + // ordinal index. + StatefulSetPodIndexLabel = "apps.kubernetes.io/pod-index" + + // IndexedJobCompletionIndexLabel is the label name which, in-tree, is used + // to automatically label Pods that are owned by Indexed Jobs with their + // completion index. + IndexedJobCompletionIndexLabel = "batch.kubernetes.io/job-completion-index" + + // BatchJobControllerUID is one of the labels that is available on a Job + // https://kubernetes.io/docs/concepts/workloads/controllers/job/#job-labels + BatchJobControllerUID = "batch.kubernetes.io/controller-uid" // CiliumIdentityAnnotationDeprecated is the previous annotation key used to map to an endpoint's security identity. CiliumIdentityAnnotationDeprecated = "cilium-identity" diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go index 7585346d12..2e1d9f740b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go @@ -15,5 +15,5 @@ const ( // // Maintainers: Run ./Documentation/check-crd-compat-table.sh for each release // Developers: Bump patch for each change in the CRD schema. - CustomResourceDefinitionSchemaVersion = "1.26.10" + CustomResourceDefinitionSchemaVersion = "1.30.8" ) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go index 08fb21a63a..b88ddedad5 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils/utils.go @@ -26,6 +26,12 @@ const ( // represent pods in the default namespace for any source type. podAnyPrefixLbl = labels.LabelSourceAnyKeyPrefix + k8sConst.PodNamespaceLabel + // podK8SNamespaceLabelsPrefix is the prefix use in the label selector for namespace labels. + podK8SNamespaceLabelsPrefix = labels.LabelSourceK8sKeyPrefix + k8sConst.PodNamespaceMetaLabelsPrefix + // podAnyNamespaceLabelsPrefix is the prefix use in the label selector for namespace labels + // for any source type. + podAnyNamespaceLabelsPrefix = labels.LabelSourceAnyKeyPrefix + k8sConst.PodNamespaceMetaLabelsPrefix + // podInitLbl is the label used in a label selector to match on // initializing pods. podInitLbl = labels.LabelSourceReservedKeyPrefix + labels.IDNameInit @@ -82,15 +88,17 @@ func getEndpointSelector(namespace string, labelSelector *slim_metav1.LabelSelec // Those pods don't have any labels, so they don't have a namespace label either. // Don't add a namespace label to those endpoint selectors, or we wouldn't be // able to match on those pods. - if !matchesInit && !es.HasKey(podPrefixLbl) && !es.HasKey(podAnyPrefixLbl) { + if !es.HasKey(podPrefixLbl) && !es.HasKey(podAnyPrefixLbl) { if namespace == "" { // For a clusterwide policy if a namespace is not specified in the labels we add // a selector to only match endpoints that contains a namespace label. // This is to make sure that we are only allowing traffic for cilium managed k8s endpoints // and even if a wildcard is provided in the selector we don't proceed with a truly // empty(allow all) endpoint selector for the policy. - es.AddMatchExpression(podPrefixLbl, slim_metav1.LabelSelectorOpExists, []string{}) - } else { + if !matchesInit { + es.AddMatchExpression(podPrefixLbl, slim_metav1.LabelSelectorOpExists, []string{}) + } + } else if !es.HasKeyPrefix(podK8SNamespaceLabelsPrefix) && !es.HasKeyPrefix(podAnyNamespaceLabelsPrefix) { es.AddMatch(podPrefixLbl, namespace) } } @@ -109,6 +117,13 @@ func parseToCiliumIngressCommonRule(namespace string, es api.EndpointSelector, i } } + if ing.FromNodes != nil { + retRule.FromNodes = make([]api.EndpointSelector, len(ing.FromNodes)) + for j, node := range ing.FromNodes { + retRule.FromNodes[j] = api.NewESFromK8sLabelSelector("", node.LabelSelector) + } + } + if ing.FromCIDR != nil { retRule.FromCIDR = make([]api.CIDR, len(ing.FromCIDR)) copy(retRule.FromCIDR, ing.FromCIDR) @@ -131,6 +146,11 @@ func parseToCiliumIngressCommonRule(namespace string, es api.EndpointSelector, i copy(retRule.FromEntities, ing.FromEntities) } + if ing.FromGroups != nil { + retRule.FromGroups = make([]api.Groups, len(ing.FromGroups)) + copy(retRule.FromGroups, ing.FromGroups) + } + return retRule } @@ -183,7 +203,9 @@ func parseToCiliumEgressCommonRule(namespace string, es api.EndpointSelector, eg if egr.ToEndpoints != nil { retRule.ToEndpoints = make([]api.EndpointSelector, len(egr.ToEndpoints)) for j, ep := range egr.ToEndpoints { - retRule.ToEndpoints[j] = getEndpointSelector(namespace, ep.LabelSelector, true, matchesInit) + endpointSelector := getEndpointSelector(namespace, ep.LabelSelector, true, matchesInit) + endpointSelector.Generated = ep.Generated + retRule.ToEndpoints[j] = endpointSelector } } @@ -214,8 +236,15 @@ func parseToCiliumEgressCommonRule(namespace string, es api.EndpointSelector, eg copy(retRule.ToEntities, egr.ToEntities) } + if egr.ToNodes != nil { + retRule.ToNodes = make([]api.EndpointSelector, len(egr.ToNodes)) + for j, node := range egr.ToNodes { + retRule.ToNodes[j] = api.NewESFromK8sLabelSelector("", node.LabelSelector) + } + } + if egr.ToGroups != nil { - retRule.ToGroups = make([]api.ToGroups, len(egr.ToGroups)) + retRule.ToGroups = make([]api.Groups, len(egr.ToGroups)) copy(retRule.ToGroups, egr.ToGroups) } @@ -299,13 +328,15 @@ func ParseToCiliumRule(namespace, name string, uid types.UID, r *api.Rule) *api. retRule.EndpointSelector = api.NewESFromK8sLabelSelector("", r.EndpointSelector.LabelSelector) // The PodSelector should only reflect to the same namespace // the policy is being stored, thus we add the namespace to - // the MatchLabels map. + // the MatchLabels map. Additionally, Policy repository relies + // on this fact to properly choose correct network policies for + // a given Security Identity. // - // Policies applying on initializing pods are a special case. - // Those pods don't have any labels, so they don't have a namespace label either. - // Don't add a namespace label to those endpoint selectors, or we wouldn't be - // able to match on those pods. - if !retRule.EndpointSelector.HasKey(podInitLbl) && namespace != "" { + // Policies applying to all namespaces are a special case. + // Such policies can match on any traffic from Pods or Nodes, + // so it wouldn't make sense to inject a namespace match for + // those policies. + if namespace != "" { userNamespace, present := r.EndpointSelector.GetMatch(podPrefixLbl) if present && !namespacesAreValid(namespace, userNamespace) { log.WithFields(logrus.Fields{ @@ -329,6 +360,7 @@ func ParseToCiliumRule(namespace, name string, uid types.UID, r *api.Rule) *api. retRule.Labels = ParseToCiliumLabels(namespace, name, uid, r.Labels) retRule.Description = r.Description + retRule.EnableDefaultDeny = r.EnableDefaultDeny return retRule } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go index 8628e7fdc1..48823bf8be 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccec_types.go @@ -23,7 +23,6 @@ type CiliumClusterwideEnvoyConfig struct { metav1.ObjectMeta `json:"metadata"` // +k8s:openapi-gen=false - // +kubebuilder:validation:Type=object Spec CiliumEnvoyConfigSpec `json:"spec,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccnp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccnp_types.go index 14ff2c20c5..36f8e62820 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccnp_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/ccnp_types.go @@ -18,6 +18,7 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +deepequal-gen:private-method=true // +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumclusterwidenetworkpolicy",path="ciliumclusterwidenetworkpolicies",scope="Cluster",shortName={ccnp} +// +kubebuilder:printcolumn:JSONPath=".status.conditions[?(@.type=='Valid')].status",name="Valid",type=string // +kubebuilder:subresource:status // +kubebuilder:storageversion @@ -52,24 +53,6 @@ func (in *CiliumClusterwideNetworkPolicy) DeepEqual(other *CiliumClusterwideNetw return objectMetaDeepEqual(in.ObjectMeta, other.ObjectMeta) && in.deepEqual(other) } -// GetPolicyStatus returns the CiliumClusterwideNetworkPolicyNodeStatus corresponding to -// nodeName in the provided CiliumClusterwideNetworkPolicy. If Nodes within the rule's -// Status is nil, returns an empty CiliumClusterwideNetworkPolicyNodeStatus. -func (r *CiliumClusterwideNetworkPolicy) GetPolicyStatus(nodeName string) CiliumNetworkPolicyNodeStatus { - if r.Status.Nodes == nil { - return CiliumNetworkPolicyNodeStatus{} - } - return r.Status.Nodes[nodeName] -} - -// SetPolicyStatus sets the given policy status for the given nodes' map. -func (r *CiliumClusterwideNetworkPolicy) SetPolicyStatus(nodeName string, cnpns CiliumNetworkPolicyNodeStatus) { - if r.Status.Nodes == nil { - r.Status.Nodes = map[string]CiliumNetworkPolicyNodeStatus{} - } - r.Status.Nodes[nodeName] = cnpns -} - // SetDerivedPolicyStatus set the derivative policy status for the given // derivative policy name. func (r *CiliumClusterwideNetworkPolicy) SetDerivedPolicyStatus(derivativePolicyName string, status CiliumNetworkPolicyNodeStatus) { diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go index 12eb1312aa..e672fbb31b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cec_types.go @@ -14,6 +14,8 @@ import ( "google.golang.org/protobuf/types/known/anypb" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/loadbalancer" "github.com/cilium/cilium/pkg/option" ) @@ -32,7 +34,6 @@ type CiliumEnvoyConfig struct { metav1.ObjectMeta `json:"metadata"` // +k8s:openapi-gen=false - // +kubebuilder:validation:Type=object Spec CiliumEnvoyConfigSpec `json:"spec,omitempty"` } @@ -75,6 +76,13 @@ type CiliumEnvoyConfigSpec struct { // // +kubebuilder:validation:Required Resources []XDSResource `json:"resources,omitempty"` + + // NodeSelector is a label selector that determines to which nodes + // this configuration applies. + // If nil, then this config applies to all nodes. + // + // +kubebuilder:validation:Optional + NodeSelector *slim_metav1.LabelSelector `json:"nodeSelector,omitempty"` } type Service struct { @@ -90,13 +98,20 @@ type Service struct { // +kubebuilder:validation:Optional Namespace string `json:"namespace"` - // Port is the port number, which can be used for filtering in case of underlying + // Ports is a set of port numbers, which can be used for filtering in case of underlying // is exposing multiple port numbers. // // +kubebuilder:validation:Optional Ports []string `json:"number,omitempty"` } +func (l *Service) ServiceName() loadbalancer.ServiceName { + return loadbalancer.ServiceName{ + Namespace: l.Namespace, + Name: l.Name, + } +} + type ServiceListener struct { // Name is the name of a destination Kubernetes service that identifies traffic // to be redirected. @@ -110,6 +125,12 @@ type ServiceListener struct { // +kubebuilder:validation:Optional Namespace string `json:"namespace"` + // Ports is a set of service's frontend ports that should be redirected to the Envoy + // listener. By default all frontend ports of the service are redirected. + // + // +kubebuilder:validation:Optional + Ports []uint16 `json:"ports,omitempty"` + // Listener specifies the name of the Envoy listener the // service traffic is redirected to. The listener must be // specified in the Envoy 'resources' of the same @@ -122,6 +143,13 @@ type ServiceListener struct { Listener string `json:"listener"` } +func (l *ServiceListener) ServiceName() loadbalancer.ServiceName { + return loadbalancer.ServiceName{ + Namespace: l.Namespace, + Name: l.Name, + } +} + // +kubebuilder:pruning:PreserveUnknownFields type XDSResource struct { *anypb.Any `json:"-"` diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cegp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cegp_types.go index 9eb408a037..29e040820e 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cegp_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cegp_types.go @@ -103,7 +103,7 @@ type EgressGateway struct { // policy will use the first IPv4 assigned to the interface with the // default route. // - // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))` + // +kubebuilder:validation:Format=ipv4 EgressIP string `json:"egressIP,omitempty"` } @@ -115,4 +115,8 @@ type EgressRule struct { // This is a label selector which selects Pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. PodSelector *slimv1.LabelSelector `json:"podSelector,omitempty"` + + // This is a label selector which selects Pods by Node. This field follows standard label + // selector semantics; if present but empty, it selects all nodes. + NodeSelector *slimv1.LabelSelector `json:"nodeSelector,omitempty"` } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go index 35f15d905f..8d6081182e 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cew_types.go @@ -47,8 +47,6 @@ type CiliumExternalWorkload struct { // CiliumExternalWorkloadSpec specifies the configurations for redirecting traffic // within a workload. -// -// +kubebuilder:validation:Type=object type CiliumExternalWorkloadSpec struct { // IPv4AllocCIDR is the range of IPv4 addresses in the CIDR format that the external workload can // use to allocate IP addresses for the tunnel device and the health endpoint. diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go index aafc5d40a4..d085df8f6a 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/clrp_types.go @@ -151,21 +151,40 @@ type RedirectBackend struct { // CiliumLocalRedirectPolicySpec specifies the configurations for redirecting traffic // within a node. -// -// +kubebuilder:validation:Type=object type CiliumLocalRedirectPolicySpec struct { // RedirectFrontend specifies frontend configuration to redirect traffic from. // It can not be empty. // // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="redirectFrontend is immutable" RedirectFrontend RedirectFrontend `json:"redirectFrontend"` // RedirectBackend specifies backend configuration to redirect traffic to. // It can not be empty. // // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="redirectBackend is immutable" RedirectBackend RedirectBackend `json:"redirectBackend"` + // SkipRedirectFromBackend indicates whether traffic matching RedirectFrontend + // from RedirectBackend should skip redirection, and hence the traffic will + // be forwarded as-is. + // + // The default is false which means traffic matching RedirectFrontend will + // get redirected from all pods, including the RedirectBackend(s). + // + // Example: If RedirectFrontend is configured to "169.254.169.254:80" as the traffic + // that needs to be redirected to backends selected by RedirectBackend, if + // SkipRedirectFromBackend is set to true, traffic going to "169.254.169.254:80" + // from such backends will not be redirected back to the backends. Instead, + // the matched traffic from the backends will be forwarded to the original + // destination "169.254.169.254:80". + // + // +kubebuilder:validation:Optional + // +kubebuilder:default=false + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="skipRedirectFromBackend is immutable" + SkipRedirectFromBackend bool `json:"skipRedirectFromBackend"` + // Description can be used by the creator of the policy to describe the // purpose of this policy. // @@ -176,8 +195,6 @@ type CiliumLocalRedirectPolicySpec struct { // CiliumLocalRedirectPolicyStatus is the status of a Local Redirect Policy. type CiliumLocalRedirectPolicyStatus struct { // TODO Define status(aditi) - // - // +kubebuilder:validation:Type=object OK bool `json:"ok,omitempty"` } @@ -208,7 +225,7 @@ func (pInfo *PortInfo) SanitizePortInfo(checkNamedPort bool) (uint16, string, lb } else { p, err := strconv.ParseUint(pInfo.Port, 0, 16) if err != nil { - return pInt, pName, protocol, fmt.Errorf("unable to parse port: %v", err) + return pInt, pName, protocol, fmt.Errorf("unable to parse port: %w", err) } if p == 0 { return pInt, pName, protocol, fmt.Errorf("port cannot be 0") diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnc_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnc_types.go new file mode 100644 index 0000000000..65b12fc838 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnc_types.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +//+genclient +//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +//+kubebuilder:resource:categories={cilium} +//+kubebuilder:object:root=true +//+deepequal-gen=false +//+kubebuilder:storageversion + +// CiliumNodeConfig is a list of configuration key-value pairs. It is applied to +// nodes indicated by a label selector. +// +// If multiple overrides apply to the same node, they will be ordered by name +// with later Overrides overwriting any conflicting keys. +type CiliumNodeConfig struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec is the desired Cilium configuration overrides for a given node + Spec CiliumNodeConfigSpec `json:"spec"` +} + +// +deepequal-gen=false +type CiliumNodeConfigSpec struct { + // Defaults is treated the same as the cilium-config ConfigMap - a set + // of key-value pairs parsed by the agent and operator processes. + // Each key must be a valid config-map data field (i.e. a-z, A-Z, -, _, and .) + Defaults map[string]string `json:"defaults"` + + // NodeSelector is a label selector that determines to which nodes + // this configuration applies. + // If not supplied, then this config applies to no nodes. If + // empty, then it applies to all nodes. + NodeSelector *metav1.LabelSelector `json:"nodeSelector"` +} + +//+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +//+deepequal-gen=false + +type CiliumNodeConfigList struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ListMeta `json:"metadata,omitempty"` + Items []CiliumNodeConfig `json:"items"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnp_types.go index 1fb1fdf505..a3210aff37 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnp_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/cnp_types.go @@ -6,13 +6,11 @@ package v2 import ( "fmt" "reflect" - "strings" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/cilium/cilium/pkg/comparator" - k8sConst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" k8sCiliumUtils "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/utils" slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" k8sUtils "github.com/cilium/cilium/pkg/k8s/utils" @@ -25,6 +23,7 @@ import ( // +deepequal-gen:private-method=true // +kubebuilder:resource:categories={cilium,ciliumpolicy},singular="ciliumnetworkpolicy",path="ciliumnetworkpolicies",scope="Namespaced",shortName={cnp,ciliumnp} // +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:printcolumn:JSONPath=".status.conditions[?(@.type=='Valid')].status",name="Valid",type=string // +kubebuilder:subresource:status // +kubebuilder:storageversion @@ -73,12 +72,17 @@ func objectMetaDeepEqual(in, other metav1.ObjectMeta) bool { // CiliumNetworkPolicyStatus is the status of a Cilium policy rule. type CiliumNetworkPolicyStatus struct { - // Nodes is the Cilium policy status for each node - Nodes map[string]CiliumNetworkPolicyNodeStatus `json:"nodes,omitempty"` // DerivativePolicies is the status of all policies derived from the Cilium // policy DerivativePolicies map[string]CiliumNetworkPolicyNodeStatus `json:"derivativePolicies,omitempty"` + + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []NetworkPolicyCondition `json:"conditions,omitempty"` } // +deepequal-gen=true @@ -145,24 +149,6 @@ func (r *CiliumNetworkPolicy) String() string { return result } -// GetPolicyStatus returns the CiliumNetworkPolicyNodeStatus corresponding to -// nodeName in the provided CiliumNetworkPolicy. If Nodes within the rule's -// Status is nil, returns an empty CiliumNetworkPolicyNodeStatus. -func (r *CiliumNetworkPolicy) GetPolicyStatus(nodeName string) CiliumNetworkPolicyNodeStatus { - if r.Status.Nodes == nil { - return CiliumNetworkPolicyNodeStatus{} - } - return r.Status.Nodes[nodeName] -} - -// SetPolicyStatus sets the given policy status for the given nodes' map. -func (r *CiliumNetworkPolicy) SetPolicyStatus(nodeName string, cnpns CiliumNetworkPolicyNodeStatus) { - if r.Status.Nodes == nil { - r.Status.Nodes = map[string]CiliumNetworkPolicyNodeStatus{} - } - r.Status.Nodes[nodeName] = cnpns -} - // SetDerivedPolicyStatus set the derivative policy status for the given // derivative policy name. func (r *CiliumNetworkPolicy) SetDerivedPolicyStatus(derivativePolicyName string, status CiliumNetworkPolicyNodeStatus) { @@ -236,19 +222,6 @@ func (r *CiliumNetworkPolicy) Parse() (api.Rules, error) { return retRules, nil } -// GetControllerName returns the unique name for the controller manager. -func (r *CiliumNetworkPolicy) GetControllerName() string { - name := k8sUtils.GetObjNamespaceName(&r.ObjectMeta) - const staticLen = 6 - var str strings.Builder - str.Grow(staticLen + len(name) + len(k8sConst.CtrlPrefixPolicyStatus)) - str.WriteString(k8sConst.CtrlPrefixPolicyStatus) - str.WriteString(" (v2 ") - str.WriteString(name) - str.WriteString(")") - return str.String() -} - // GetIdentityLabels returns all rule labels in the CiliumNetworkPolicy. func (r *CiliumNetworkPolicy) GetIdentityLabels() labels.LabelArray { namespace := k8sUtils.ExtractNamespace(&r.ObjectMeta) @@ -295,3 +268,25 @@ type CiliumNetworkPolicyList struct { // Items is a list of CiliumNetworkPolicy Items []CiliumNetworkPolicy `json:"items"` } + +type PolicyConditionType string + +const ( + PolicyConditionValid PolicyConditionType = "Valid" +) + +type NetworkPolicyCondition struct { + // The type of the policy condition + Type PolicyConditionType `json:"type"` + // The status of the condition, one of True, False, or Unknown + Status v1.ConditionStatus `json:"status"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime slimv1.Time `json:"lastTransitionTime,omitempty"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/register.go index 3197b46130..bea3596cc5 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/register.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/register.go @@ -127,6 +127,17 @@ const ( // CECName is the full name of Cilium Envoy Config CECName = CECPluralName + "." + CustomResourceDefinitionGroup + + // CiliumNodeConfig (CNC) + + // CNCPluralName is the plural name of Cilium Node Config + CNCPluralName = "ciliumnodeconfigs" + + // CNCKindDefinition is the kind name of Cilium Node Config + CNCKindDefinition = "CiliumNodeConfig" + + // CNCName is the full name of Cilium Node Config + CNCName = CNCPluralName + "." + CustomResourceDefinitionGroup ) // SchemeGroupVersion is group version used to register these objects @@ -180,6 +191,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CiliumEndpointList{}, &CiliumNode{}, &CiliumNodeList{}, + &CiliumNodeConfig{}, + &CiliumNodeConfigList{}, &CiliumExternalWorkload{}, &CiliumExternalWorkloadList{}, &CiliumIdentity{}, diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go index af821e8dfb..d07a0a8a61 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/types.go @@ -4,6 +4,7 @@ package v2 import ( + "net" "sort" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,11 +21,9 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:openapi-gen=false // +kubebuilder:resource:categories={cilium},singular="ciliumendpoint",path="ciliumendpoints",scope="Namespaced",shortName={cep,ciliumep} -// +kubebuilder:printcolumn:JSONPath=".status.id",description="Cilium endpoint id",name="Endpoint ID",type=integer -// +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Cilium identity id",name="Identity ID",type=integer -// +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string -// +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string -// +kubebuilder:printcolumn:JSONPath=".status.visibility-policy-status",description="Status of visibility policy in the endpoint",name="Visibility Policy",type=string +// +kubebuilder:printcolumn:JSONPath=".status.identity.id",description="Security Identity",name="Security Identity",type=integer +// +kubebuilder:printcolumn:JSONPath=".status.policy.ingress.state",description="Ingress enforcement in the endpoint",name="Ingress Enforcement",type=string,priority=1 +// +kubebuilder:printcolumn:JSONPath=".status.policy.egress.state",description="Egress enforcement in the endpoint",name="Egress Enforcement",type=string,priority=1 // +kubebuilder:printcolumn:JSONPath=".status.state",description="Endpoint current state",name="Endpoint State",type=string // +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv4",description="Endpoint IPv4 address",name="IPv4",type=string // +kubebuilder:printcolumn:JSONPath=".status.networking.addressing[0].ipv6",description="Endpoint IPv6 address",name="IPv6",type=string @@ -77,8 +76,6 @@ type EndpointStatus struct { Policy *EndpointPolicy `json:"policy,omitempty"` - VisibilityPolicyStatus *string `json:"visibility-policy-status,omitempty"` - // State is the state of the endpoint. // // +kubebuilder:validation:Enum=creating;waiting-for-identity;not-ready;waiting-to-regenerate;regenerating;restoring;ready;disconnecting;disconnected;invalid @@ -87,10 +84,6 @@ type EndpointStatus struct { NamedPorts models.NamedPorts `json:"named-ports,omitempty"` } -// EndpointStatusLogEntries is the maximum number of log entries in -// EndpointStatus.Log. -const EndpointStatusLogEntries = 5 - // +k8s:deepcopy-gen=false // ControllerList is a list of ControllerStatus. @@ -347,6 +340,11 @@ type NodeSpec struct { // some other means of identification. InstanceID string `json:"instance-id,omitempty"` + // BootID is a unique node identifier generated on boot + // + // +kubebuilder:validation:Optional + BootID string `json:"bootid,omitempty"` + // Addresses is the list of all node addresses. // // +kubebuilder:validation:Optional @@ -465,3 +463,22 @@ func (n *CiliumNode) InstanceID() (instanceID string) { } return } + +func (n NodeAddress) ToString() string { + return n.IP +} + +func (n NodeAddress) AddrType() addressing.AddressType { + return n.Type +} + +// GetIP returns one of the CiliumNode's IP addresses available with the +// following priority: +// - NodeInternalIP +// - NodeExternalIP +// - other IP address type +// An error is returned if GetIP fails to extract an IP from the CiliumNode +// based on the provided address family. +func (n *CiliumNode) GetIP(ipv6 bool) net.IP { + return addressing.ExtractNodeIP[NodeAddress](n.Spec.Addresses, ipv6) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go index cf85aaf6d0..7e8f288578 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepcopy.go @@ -12,6 +12,7 @@ import ( models "github.com/cilium/cilium/api/v1/models" v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" api "github.com/cilium/cilium/pkg/policy/api" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -379,7 +380,7 @@ func (in *CiliumEnvoyConfigSpec) DeepCopyInto(out *CiliumEnvoyConfigSpec) { if (*in)[i] != nil { in, out := &(*in)[i], &(*out)[i] *out = new(ServiceListener) - **out = **in + (*in).DeepCopyInto(*out) } } } @@ -401,6 +402,11 @@ func (in *CiliumEnvoyConfigSpec) DeepCopyInto(out *CiliumEnvoyConfigSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } return } @@ -771,13 +777,6 @@ func (in *CiliumNetworkPolicyNodeStatus) DeepCopy() *CiliumNetworkPolicyNodeStat // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CiliumNetworkPolicyStatus) DeepCopyInto(out *CiliumNetworkPolicyStatus) { *out = *in - if in.Nodes != nil { - in, out := &in.Nodes, &out.Nodes - *out = make(map[string]CiliumNetworkPolicyNodeStatus, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } if in.DerivativePolicies != nil { in, out := &in.DerivativePolicies, &out.DerivativePolicies *out = make(map[string]CiliumNetworkPolicyNodeStatus, len(*in)) @@ -785,6 +784,13 @@ func (in *CiliumNetworkPolicyStatus) DeepCopyInto(out *CiliumNetworkPolicyStatus (*out)[key] = *val.DeepCopy() } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NetworkPolicyCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -826,6 +832,94 @@ func (in *CiliumNode) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumNodeConfig) DeepCopyInto(out *CiliumNodeConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfig. +func (in *CiliumNodeConfig) DeepCopy() *CiliumNodeConfig { + if in == nil { + return nil + } + out := new(CiliumNodeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumNodeConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumNodeConfigList) DeepCopyInto(out *CiliumNodeConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumNodeConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigList. +func (in *CiliumNodeConfigList) DeepCopy() *CiliumNodeConfigList { + if in == nil { + return nil + } + out := new(CiliumNodeConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumNodeConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumNodeConfigSpec) DeepCopyInto(out *CiliumNodeConfigSpec) { + *out = *in + if in.Defaults != nil { + in, out := &in.Defaults, &out.Defaults + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumNodeConfigSpec. +func (in *CiliumNodeConfigSpec) DeepCopy() *CiliumNodeConfigSpec { + if in == nil { + return nil + } + out := new(CiliumNodeConfigSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CiliumNodeList) DeepCopyInto(out *CiliumNodeList) { *out = *in @@ -915,6 +1009,11 @@ func (in *EgressRule) DeepCopyInto(out *EgressRule) { *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } return } @@ -1109,11 +1208,6 @@ func (in *EndpointStatus) DeepCopyInto(out *EndpointStatus) { *out = new(EndpointPolicy) (*in).DeepCopyInto(*out) } - if in.VisibilityPolicyStatus != nil { - in, out := &in.VisibilityPolicyStatus, &out.VisibilityPolicyStatus - *out = new(string) - **out = **in - } if in.NamedPorts != nil { in, out := &in.NamedPorts, &out.NamedPorts *out = make(models.NamedPorts, len(*in)) @@ -1198,6 +1292,23 @@ func (in *IdentityTuple) DeepCopy() *IdentityTuple { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyCondition) DeepCopyInto(out *NetworkPolicyCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyCondition. +func (in *NetworkPolicyCondition) DeepCopy() *NetworkPolicyCondition { + if in == nil { + return nil + } + out := new(NetworkPolicyCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { *out = *in @@ -1371,6 +1482,11 @@ func (in *ServiceInfo) DeepCopy() *ServiceInfo { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServiceListener) DeepCopyInto(out *ServiceListener) { *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]uint16, len(*in)) + copy(*out, *in) + } return } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go index 7c2a855220..cd75476234 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2/zz_generated.deepequal.go @@ -277,6 +277,14 @@ func (in *CiliumEnvoyConfigSpec) DeepEqual(other *CiliumEnvoyConfigSpec) bool { } } + if (in.NodeSelector == nil) != (other.NodeSelector == nil) { + return false + } else if in.NodeSelector != nil { + if !in.NodeSelector.DeepEqual(other.NodeSelector) { + return false + } + } + return true } @@ -388,6 +396,9 @@ func (in *CiliumLocalRedirectPolicySpec) DeepEqual(other *CiliumLocalRedirectPol return false } + if in.SkipRedirectFromBackend != other.SkipRedirectFromBackend { + return false + } if in.Description != other.Description { return false } @@ -488,8 +499,8 @@ func (in *CiliumNetworkPolicyStatus) DeepEqual(other *CiliumNetworkPolicyStatus) return false } - if ((in.Nodes != nil) && (other.Nodes != nil)) || ((in.Nodes == nil) != (other.Nodes == nil)) { - in, other := &in.Nodes, &other.Nodes + if ((in.DerivativePolicies != nil) && (other.DerivativePolicies != nil)) || ((in.DerivativePolicies == nil) != (other.DerivativePolicies == nil)) { + in, other := &in.DerivativePolicies, &other.DerivativePolicies if other == nil { return false } @@ -509,8 +520,8 @@ func (in *CiliumNetworkPolicyStatus) DeepEqual(other *CiliumNetworkPolicyStatus) } } - if ((in.DerivativePolicies != nil) && (other.DerivativePolicies != nil)) || ((in.DerivativePolicies == nil) != (other.DerivativePolicies == nil)) { - in, other := &in.DerivativePolicies, &other.DerivativePolicies + if ((in.Conditions != nil) && (other.Conditions != nil)) || ((in.Conditions == nil) != (other.Conditions == nil)) { + in, other := &in.Conditions, &other.Conditions if other == nil { return false } @@ -518,13 +529,9 @@ func (in *CiliumNetworkPolicyStatus) DeepEqual(other *CiliumNetworkPolicyStatus) if len(*in) != len(*other) { return false } else { - for key, inValue := range *in { - if otherValue, present := (*other)[key]; !present { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { return false - } else { - if !inValue.DeepEqual(&otherValue) { - return false - } } } } @@ -697,6 +704,14 @@ func (in *EgressRule) DeepEqual(other *EgressRule) bool { } } + if (in.NodeSelector == nil) != (other.NodeSelector == nil) { + return false + } else if in.NodeSelector != nil { + if !in.NodeSelector.DeepEqual(other.NodeSelector) { + return false + } + } + return true } @@ -914,14 +929,6 @@ func (in *EndpointStatus) DeepEqual(other *EndpointStatus) bool { } } - if (in.VisibilityPolicyStatus == nil) != (other.VisibilityPolicyStatus == nil) { - return false - } else if in.VisibilityPolicyStatus != nil { - if *in.VisibilityPolicyStatus != *other.VisibilityPolicyStatus { - return false - } - } - if in.State != other.State { return false } @@ -1043,6 +1050,33 @@ func (in *IdentityTuple) DeepEqual(other *IdentityTuple) bool { return true } +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *NetworkPolicyCondition) DeepEqual(other *NetworkPolicyCondition) bool { + if other == nil { + return false + } + + if in.Type != other.Type { + return false + } + if in.Status != other.Status { + return false + } + if !in.LastTransitionTime.DeepEqual(&other.LastTransitionTime) { + return false + } + + if in.Reason != other.Reason { + return false + } + if in.Message != other.Message { + return false + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *NodeAddress) DeepEqual(other *NodeAddress) bool { @@ -1070,6 +1104,9 @@ func (in *NodeSpec) DeepEqual(other *NodeSpec) bool { if in.InstanceID != other.InstanceID { return false } + if in.BootID != other.BootID { + return false + } if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) { in, other := &in.Addresses, &other.Addresses if other == nil { @@ -1304,6 +1341,23 @@ func (in *ServiceListener) DeepEqual(other *ServiceListener) bool { if in.Namespace != other.Namespace { return false } + if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) { + in, other := &in.Ports, &other.Ports + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + if in.Listener != other.Listener { return false } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_advert_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_advert_types.go new file mode 100644 index 0000000000..f601819dae --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_advert_types.go @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// BGPAdvertisementType defines type of advertisement. +// +// Note list of supported advertisements is not exhaustive and can be extended in the future. +// Consumer of this API should be able to handle unknown values. +// +// +kubebuilder:validation:Enum=PodCIDR;CiliumPodIPPool;Service +type BGPAdvertisementType string + +const ( + // BGPPodCIDRAdvert when configured, Cilium will advertise pod CIDRs to BGP peers. + BGPPodCIDRAdvert BGPAdvertisementType = "PodCIDR" + + // BGPCiliumPodIPPoolAdvert when configured, Cilium will advertise prefixes from CiliumPodIPPools to BGP peers. + BGPCiliumPodIPPoolAdvert BGPAdvertisementType = "CiliumPodIPPool" + + // BGPServiceAdvert when configured, Cilium will advertise service related routes to BGP peers. + // + BGPServiceAdvert BGPAdvertisementType = "Service" +) + +// BGPServiceAddressType defines type of service address to be advertised. +// +// Note list of supported service addresses is not exhaustive and can be extended in the future. +// Consumer of this API should be able to handle unknown values. +// +// +kubebuilder:validation:Enum=LoadBalancerIP;ClusterIP;ExternalIP +type BGPServiceAddressType string + +const ( + // BGPLoadBalancerIPAddr when configured, Cilium will advertise load balancer services IPs to BGP peers. + // The loadBalancerClass for a service must be nil or specify a class supported by Cilium, + // e.g. "io.cilium/bgp-control-plane". + // + // Refer to the following document for additional details regarding load balancer + // classes: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + BGPLoadBalancerIPAddr BGPServiceAddressType = "LoadBalancerIP" + + // BGPClusterIPAddr when configured, Cilium will advertise cluster IP prefix of a service to BGP peers. + // Cluster IP for a service is defined here + // https://kubernetes.io/docs/concepts/services-networking/service/#type-clusterip + BGPClusterIPAddr BGPServiceAddressType = "ClusterIP" + + // BGPExternalIPAddr when configured, Cilium will advertise external IP prefix of a service to BGP peers. + // External IP for a service is defined here + // https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + BGPExternalIPAddr BGPServiceAddressType = "ExternalIP" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgpadvertisement",path="ciliumbgpadvertisements",scope="Cluster",shortName={cbgpadvert} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:storageversion + +// CiliumBGPAdvertisement is the Schema for the ciliumbgpadvertisements API +type CiliumBGPAdvertisement struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + Spec CiliumBGPAdvertisementSpec `json:"spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumBGPAdvertisementList contains a list of CiliumBGPAdvertisement +type CiliumBGPAdvertisementList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumBGPAdvertisement. + Items []CiliumBGPAdvertisement `json:"items"` +} + +type CiliumBGPAdvertisementSpec struct { + // Advertisements is a list of BGP advertisements. + // + // +kubebuilder:validation:Required + Advertisements []BGPAdvertisement `json:"advertisements"` +} + +// BGPAdvertisement defines which routes Cilium should advertise to BGP peers. Optionally, additional attributes can be +// set to the advertised routes. +type BGPAdvertisement struct { + // AdvertisementType defines type of advertisement which has to be advertised. + // + // +kubebuilder:validation:Required + AdvertisementType BGPAdvertisementType `json:"advertisementType"` + + // Service defines configuration options for advertisementType service. + // + // +kubebuilder:validation:Optional + Service *BGPServiceOptions `json:"service,omitempty"` + + // Selector is a label selector to select objects of the type specified by AdvertisementType. + // If not specified, no objects of the type specified by AdvertisementType are selected for advertisement. + // + // +kubebuilder:validation:Optional + Selector *slimv1.LabelSelector `json:"selector,omitempty"` + + // Attributes defines additional attributes to set to the advertised routes. + // If not specified, no additional attributes are set. + // + // +kubebuilder:validation:Optional + Attributes *BGPAttributes `json:"attributes,omitempty"` +} + +// BGPServiceOptions defines the configuration for Service advertisement type. +type BGPServiceOptions struct { + // Addresses is a list of service address types which needs to be advertised via BGP. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + Addresses []BGPServiceAddressType `json:"addresses,omitempty"` +} + +// BGPAttributes defines additional attributes to set to the advertised NLRIs. +type BGPAttributes struct { + // Communities sets the community attributes in the route. + // If not specified, no community attribute is set. + // + // +kubebuilder:validation:Optional + Communities *BGPCommunities `json:"communities,omitempty"` + + // LocalPreference sets the local preference attribute in the route. + // If not specified, no local preference attribute is set. + // + // +kubebuilder:validation:Optional + LocalPreference *int64 `json:"localPreference,omitempty"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go new file mode 100644 index 0000000000..781ce5198e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_cluster_types.go @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgpclusterconfig",path="ciliumbgpclusterconfigs",scope="Cluster",shortName={cbgpcluster} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// CiliumBGPClusterConfig is the Schema for the CiliumBGPClusterConfig API +type CiliumBGPClusterConfig struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + // Spec defines the desired cluster configuration of the BGP control plane. + Spec CiliumBGPClusterConfigSpec `json:"spec"` + + // Status is a running status of the cluster configuration + // + // +kubebuilder:validation:Optional + Status CiliumBGPClusterConfigStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumBGPClusterConfigList is a list of CiliumBGPClusterConfig objects. +type CiliumBGPClusterConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumBGPClusterConfig. + Items []CiliumBGPClusterConfig `json:"items"` +} + +type CiliumBGPClusterConfigSpec struct { + // NodeSelector selects a group of nodes where this BGP Cluster + // config applies. + // If empty / nil this config applies to all nodes. + // + // +kubebuilder:validation:Optional + NodeSelector *slimv1.LabelSelector `json:"nodeSelector,omitempty"` + + // A list of CiliumBGPInstance(s) which instructs + // the BGP control plane how to instantiate virtual BGP routers. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +listType=map + // +listMapKey=name + BGPInstances []CiliumBGPInstance `json:"bgpInstances"` +} + +type CiliumBGPInstance struct { + // Name is the name of the BGP instance. It is a unique identifier for the BGP instance + // within the cluster configuration. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // LocalASN is the ASN of this BGP instance. + // Supports extended 32bit ASNs. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4294967295 + LocalASN *int64 `json:"localASN,omitempty"` + + // Peers is a list of neighboring BGP peers for this virtual router + // + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + Peers []CiliumBGPPeer `json:"peers,omitempty"` +} + +type CiliumBGPPeer struct { + // Name is the name of the BGP peer. It is a unique identifier for the peer within the BGP instance. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // PeerAddress is the IP address of the neighbor. + // Supports IPv4 and IPv6 addresses. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))` + PeerAddress *string `json:"peerAddress,omitempty"` + + // PeerASN is the ASN of the peer BGP router. + // Supports extended 32bit ASNs. + // + // If peerASN is 0, the BGP OPEN message validation of ASN will be disabled and + // ASN will be determined based on peer's OPEN message. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=4294967295 + // +kubebuilder:default=0 + PeerASN *int64 `json:"peerASN,omitempty"` + + // PeerConfigRef is a reference to a peer configuration resource. + // If not specified, the default BGP configuration is used for this peer. + // + // +kubebuilder:validation:Optional + PeerConfigRef *PeerConfigReference `json:"peerConfigRef,omitempty"` +} + +// PeerConfigReference is a reference to a peer configuration resource. +type PeerConfigReference struct { + // Group is the group of the peer config resource. + // If not specified, the default of "cilium.io" is used. + // + // +kubebuilder:validation:Optional + // +kubebuilder:default="cilium.io" + Group string `json:"group"` + + // Kind is the kind of the peer config resource. + // If not specified, the default of "CiliumBGPPeerConfig" is used. + // + // +kubebuilder:validation:Optional + // +kubebuilder:default="CiliumBGPPeerConfig" + Kind string `json:"kind"` + + // Name is the name of the peer config resource. + // Name refers to the name of a Kubernetes object (typically a CiliumBGPPeerConfig). + // + // +kubebuilder:validation:Required + Name string `json:"name"` +} + +type CiliumBGPClusterConfigStatus struct { + // The current conditions of the CiliumBGPClusterConfig + // + // +optional + // +listType=map + // +listMapKey=type + // +deepequal-gen=false + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// Conditions for CiliumBGPClusterConfig. When you add a new condition, don't +// forget to update the AllBGPClusterConfigConditions list as well. +const ( + // Node selector selects nothing + BGPClusterConfigConditionNoMatchingNode = "cilium.io/NoMatchingNode" + // Referenced peer configs are missing + BGPClusterConfigConditionMissingPeerConfigs = "cilium.io/MissingPeerConfigs" + // ClusterConfig with conflicting nodeSelector present + BGPClusterConfigConditionConflictingClusterConfigs = "cilium.io/ConflictingClusterConfig" +) + +var AllBGPClusterConfigConditions = []string{ + BGPClusterConfigConditionNoMatchingNode, + BGPClusterConfigConditionMissingPeerConfigs, + BGPClusterConfigConditionConflictingClusterConfigs, +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_override_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_override_types.go new file mode 100644 index 0000000000..f14bdb419e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_override_types.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgpnodeconfigoverride",path="ciliumbgpnodeconfigoverrides",scope="Cluster",shortName={cbgpnodeoverride} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:storageversion + +// CiliumBGPNodeConfigOverride specifies configuration overrides for a CiliumBGPNodeConfig. +// It allows fine-tuning of BGP behavior on a per-node basis. For the override to be effective, +// the names in CiliumBGPNodeConfigOverride and CiliumBGPNodeConfig must match exactly. This +// matching ensures that specific node configurations are applied correctly and only where intended. +type CiliumBGPNodeConfigOverride struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + // Spec is the specification of the desired behavior of the CiliumBGPNodeConfigOverride. + Spec CiliumBGPNodeConfigOverrideSpec `json:"spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumBGPNodeConfigOverrideList is a list of CiliumBGPNodeConfigOverride objects. +type CiliumBGPNodeConfigOverrideList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumBGPNodeConfigOverride. + Items []CiliumBGPNodeConfigOverride `json:"items"` +} + +type CiliumBGPNodeConfigOverrideSpec struct { + // BGPInstances is a list of BGP instances to override. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=name + BGPInstances []CiliumBGPNodeConfigInstanceOverride `json:"bgpInstances"` +} + +// CiliumBGPNodeConfigInstanceOverride defines configuration options which can be overridden for a specific BGP instance. +type CiliumBGPNodeConfigInstanceOverride struct { + // Name is the name of the BGP instance for which the configuration is overridden. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // RouterID is BGP router id to use for this instance. It must be unique across all BGP instances. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Format=ipv4 + RouterID *string `json:"routerID,omitempty"` + + // LocalPort is port to use for this BGP instance. + // + // +kubebuilder:validation:Optional + LocalPort *int32 `json:"localPort,omitempty"` + + // Peers is a list of peer configurations to override. + // + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + Peers []CiliumBGPNodeConfigPeerOverride `json:"peers,omitempty"` +} + +// CiliumBGPNodeConfigPeerOverride defines configuration options which can be overridden for a specific peer. +type CiliumBGPNodeConfigPeerOverride struct { + // Name is the name of the peer for which the configuration is overridden. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // LocalAddress is the IP address to use for connecting to this peer. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))` + LocalAddress *string `json:"localAddress,omitempty"` + + // LocalPort is source port to use for connecting to this peer. + // + // +kubebuilder:validation:Optional + LocalPort *int32 `json:"localPort,omitempty"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_types.go new file mode 100644 index 0000000000..06cf9071f8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_node_types.go @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgpnodeconfig",path="ciliumbgpnodeconfigs",scope="Cluster",shortName={cbgpnode} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// CiliumBGPNodeConfig is node local configuration for BGP agent. Name of the object should be node name. +// This resource will be created by Cilium operator and is read-only for the users. +type CiliumBGPNodeConfig struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + // Spec is the specification of the desired behavior of the CiliumBGPNodeConfig. + Spec CiliumBGPNodeSpec `json:"spec"` + + // Status is the most recently observed status of the CiliumBGPNodeConfig. + // +kubebuilder:validation:Optional + Status CiliumBGPNodeStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumBGPNodeConfigList is a list of CiliumBGPNodeConfig objects. +type CiliumBGPNodeConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumBGPNodeConfig. + Items []CiliumBGPNodeConfig `json:"items"` +} + +type CiliumBGPNodeSpec struct { + // BGPInstances is a list of BGP router instances on the node. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=16 + // +listType=map + // +listMapKey=name + BGPInstances []CiliumBGPNodeInstance `json:"bgpInstances"` +} + +// CiliumBGPNodeInstance is a single BGP router instance configuration on the node. +type CiliumBGPNodeInstance struct { + // Name is the name of the BGP instance. This name is used to identify the BGP instance on the node. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + Name string `json:"name"` + + // LocalASN is the ASN of this virtual router. + // Supports extended 32bit ASNs. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=4294967295 + LocalASN *int64 `json:"localASN,omitempty"` + + // RouterID is the BGP router ID of this virtual router. + // This configuration is derived from CiliumBGPNodeConfigOverride resource. + // + // If not specified, the router ID will be derived from the node local address. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Format=ipv4 + RouterID *string `json:"routerID,omitempty"` + + // LocalPort is the port on which the BGP daemon listens for incoming connections. + // + // If not specified, BGP instance will not listen for incoming connections. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + LocalPort *int32 `json:"localPort,omitempty"` + + // Peers is a list of neighboring BGP peers for this virtual router + // + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + Peers []CiliumBGPNodePeer `json:"peers,omitempty"` +} + +type CiliumBGPNodePeer struct { + // Name is the name of the BGP peer. This name is used to identify the BGP peer for the BGP instance. + // + // +kubebuilder:validation:Required + Name string `json:"name"` + + // PeerAddress is the IP address of the neighbor. + // Supports IPv4 and IPv6 addresses. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))` + PeerAddress *string `json:"peerAddress,omitempty"` + + // PeerASN is the ASN of the peer BGP router. + // Supports extended 32bit ASNs + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=4294967295 + PeerASN *int64 `json:"peerASN,omitempty"` + + // LocalAddress is the IP address of the local interface to use for the peering session. + // This configuration is derived from CiliumBGPNodeConfigOverride resource. If not specified, the local address will be used for setting up peering. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Pattern=`((^\s*((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))\s*$)|(^\s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?\s*$))` + LocalAddress *string `json:"localAddress,omitempty"` + + // PeerConfigRef is a reference to a peer configuration resource. + // If not specified, the default BGP configuration is used for this peer. + // + // +kubebuilder:validation:Optional + PeerConfigRef *PeerConfigReference `json:"peerConfigRef,omitempty"` +} + +// CiliumBGPNodeStatus is the status of the CiliumBGPNodeConfig. +type CiliumBGPNodeStatus struct { + // BGPInstances is the status of the BGP instances on the node. + // + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + BGPInstances []CiliumBGPNodeInstanceStatus `json:"bgpInstances,omitempty"` +} + +type CiliumBGPNodeInstanceStatus struct { + // Name is the name of the BGP instance. This name is used to identify the BGP instance on the node. + // + // +kubebuilder:validation:Required + Name string `json:"name"` + + // LocalASN is the ASN of this BGP instance. + // + // +kubebuilder:validation:Optional + LocalASN *int64 `json:"localASN,omitempty"` + + // PeerStatuses is the state of the BGP peers for this BGP instance. + // + // +kubebuilder:validation:Optional + // +listType=map + // +listMapKey=name + PeerStatuses []CiliumBGPNodePeerStatus `json:"peers,omitempty"` +} + +// CiliumBGPNodePeerStatus is the status of a BGP peer. +type CiliumBGPNodePeerStatus struct { + // Name is the name of the BGP peer. + // + // +kubebuilder:validation:Required + Name string `json:"name"` + + // PeerAddress is the IP address of the neighbor. + // + // +kubebuilder:validation:Required + PeerAddress string `json:"peerAddress"` + + // PeerASN is the ASN of the neighbor. + // + // +kubebuilder:validation:Optional + PeerASN *int64 `json:"peerASN,omitempty"` + + // PeeringState is last known state of the peering session. + // + // +kubebuilder:validation:Optional + PeeringState *string `json:"peeringState,omitempty"` + + // Timers is the state of the negotiated BGP timers for this peer. + // + // +kubebuilder:validation:Optional + Timers *CiliumBGPTimersState `json:"timers,omitempty"` + + // EstablishedTime is the time when the peering session was established. + // It is represented in RFC3339 form and is in UTC. + // + // +kubebuilder:validation:Optional + EstablishedTime *string `json:"establishedTime,omitempty"` + + // RouteCount is the number of routes exchanged with this peer per AFI/SAFI. + // + // +kubebuilder:validation:Optional + RouteCount []BGPFamilyRouteCount `json:"routeCount,omitempty"` +} + +// CiliumBGPTimersState is the state of the negotiated BGP timers for a peer. +type CiliumBGPTimersState struct { + // AppliedHoldTimeSeconds is the negotiated hold time for this peer. + // + // +kubebuilder:validation:Optional + AppliedHoldTimeSeconds *int32 `json:"appliedHoldTimeSeconds,omitempty"` + + // AppliedKeepaliveSeconds is the negotiated keepalive time for this peer. + // + // +kubebuilder:validation:Optional + AppliedKeepaliveSeconds *int32 `json:"appliedKeepaliveSeconds,omitempty"` +} + +type BGPFamilyRouteCount struct { + // Afi is the Address Family Identifier (AFI) of the family. + // + // +kubebuilder:validation:Enum=ipv4;ipv6;l2vpn;ls;opaque + // +kubebuilder:validation:Required + Afi string `json:"afi"` + + // Safi is the Subsequent Address Family Identifier (SAFI) of the family. + // + // +kubebuilder:validation:Enum=unicast;multicast;mpls_label;encapsulation;vpls;evpn;ls;sr_policy;mup;mpls_vpn;mpls_vpn_multicast;route_target_constraints;flowspec_unicast;flowspec_vpn;key_value + // +kubebuilder:validation:Required + Safi string `json:"safi"` + + // Received is the number of routes received from this peer. + // + // +kubebuilder:validation:Optional + Received *int32 `json:"received,omitempty"` + + // Advertised is the number of routes advertised to this peer. + // + // +kubebuilder:validation:Optional + Advertised *int32 `json:"advertised,omitempty"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go new file mode 100644 index 0000000000..752e7b38c6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgp_peer_types.go @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=false +// +deepequal-gen=false + +// CiliumBGPPeerConfigList is a list of CiliumBGPPeer objects. +type CiliumBGPPeerConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items is a list of CiliumBGPPeer. + Items []CiliumBGPPeerConfig `json:"items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:categories={cilium,ciliumbgp},singular="ciliumbgppeerconfig",path="ciliumbgppeerconfigs",scope="Cluster",shortName={cbgppeer} +// +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +type CiliumBGPPeerConfig struct { + // +deepequal-gen=false + metav1.TypeMeta `json:",inline"` + // +deepequal-gen=false + metav1.ObjectMeta `json:"metadata"` + + // Spec is the specification of the desired behavior of the CiliumBGPPeerConfig. + Spec CiliumBGPPeerConfigSpec `json:"spec"` + + // Status is the running status of the CiliumBGPPeerConfig + // + // +kubebuilder:validation:Optional + Status CiliumBGPPeerConfigStatus `json:"status"` +} + +type CiliumBGPPeerConfigSpec struct { + // Transport defines the BGP transport parameters for the peer. + // + // If not specified, the default transport parameters are used. + // + // +kubebuilder:validation:Optional + Transport *CiliumBGPTransport `json:"transport,omitempty"` + + // Timers defines the BGP timers for the peer. + // + // If not specified, the default timers are used. + // + // +kubebuilder:validation:Optional + Timers *CiliumBGPTimers `json:"timers,omitempty"` + + // AuthSecretRef is the name of the secret to use to fetch a TCP + // authentication password for this peer. + // + // If not specified, no authentication is used. + // + // +kubebuilder:validation:Optional + AuthSecretRef *string `json:"authSecretRef,omitempty"` + + // GracefulRestart defines graceful restart parameters which are negotiated + // with this peer. + // + // If not specified, the graceful restart capability is disabled. + // + // +kubebuilder:validation:Optional + GracefulRestart *CiliumBGPNeighborGracefulRestart `json:"gracefulRestart,omitempty"` + + // EBGPMultihopTTL controls the multi-hop feature for eBGP peers. + // Its value defines the Time To Live (TTL) value used in BGP + // packets sent to the peer. + // + // If not specified, EBGP multihop is disabled. This field is ignored for iBGP neighbors. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=255 + // +kubebuilder:default=1 + EBGPMultihop *int32 `json:"ebgpMultihop,omitempty"` + + // Families, if provided, defines a set of AFI/SAFIs the speaker will + // negotiate with it's peer. + // + // If not specified, the default families of IPv6/unicast and IPv4/unicast will be created. + // + // +kubebuilder:validation:Optional + Families []CiliumBGPFamilyWithAdverts `json:"families,omitempty"` +} + +type CiliumBGPPeerConfigStatus struct { + // The current conditions of the CiliumBGPPeerConfig + // + // +optional + // +listType=map + // +listMapKey=type + // +deepequal-gen=false + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// Conditions for CiliumBGPPeerConfig. When you add a new condition, don't +// forget to to update the below AllBGPPeerConfigConditions list as well. +const ( + // Referenced auth secret is missing + BGPPeerConfigConditionMissingAuthSecret = "cilium.io/MissingAuthSecret" +) + +var AllBGPPeerConfigConditions = []string{ + BGPPeerConfigConditionMissingAuthSecret, +} + +// CiliumBGPFamily represents a AFI/SAFI address family pair. +type CiliumBGPFamily struct { + // Afi is the Address Family Identifier (AFI) of the family. + // + // +kubebuilder:validation:Enum=ipv4;ipv6;l2vpn;ls;opaque + // +kubebuilder:validation:Required + Afi string `json:"afi"` + + // Safi is the Subsequent Address Family Identifier (SAFI) of the family. + // + // +kubebuilder:validation:Enum=unicast;multicast;mpls_label;encapsulation;vpls;evpn;ls;sr_policy;mup;mpls_vpn;mpls_vpn_multicast;route_target_constraints;flowspec_unicast;flowspec_vpn;key_value + // +kubebuilder:validation:Required + Safi string `json:"safi"` +} + +// CiliumBGPFamilyWithAdverts represents a AFI/SAFI address family pair along with reference to BGP Advertisements. +type CiliumBGPFamilyWithAdverts struct { + CiliumBGPFamily `json:",inline"` + + // Advertisements selects group of BGP Advertisement(s) to advertise for this family. + // + // If not specified, no advertisements are sent for this family. + // + // This field is ignored in CiliumBGPNeighbor which is used in CiliumBGPPeeringPolicy. + // Use CiliumBGPPeeringPolicy advertisement options instead. + // + // +kubebuilder:validation:Optional + Advertisements *slimv1.LabelSelector `json:"advertisements,omitempty"` +} + +// CiliumBGPTransport defines the BGP transport parameters for the peer. +type CiliumBGPTransport struct { + // Deprecated + // LocalPort is the local port to be used for the BGP session. + // + // If not specified, ephemeral port will be picked to initiate a connection. + // + // This field is deprecated and will be removed in a future release. + // Local port configuration is unnecessary and is not recommended. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + LocalPort *int32 `json:"localPort,omitempty"` + + // PeerPort is the peer port to be used for the BGP session. + // + // If not specified, defaults to TCP port 179. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=179 + PeerPort *int32 `json:"peerPort,omitempty"` +} + +func (t *CiliumBGPTransport) SetDefaults() { + if t.LocalPort == nil || *t.LocalPort == 0 { + t.LocalPort = ptr.To[int32](DefaultBGPPeerLocalPort) + } + + if t.PeerPort == nil || *t.PeerPort == 0 { + t.PeerPort = ptr.To[int32](DefaultBGPPeerPort) + } +} + +type CiliumBGPTimers struct { + // ConnectRetryTimeSeconds defines the initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8). + // + // If not specified, defaults to 120 seconds. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=2147483647 + // +kubebuilder:default=120 + ConnectRetryTimeSeconds *int32 `json:"connectRetryTimeSeconds,omitempty"` + + // HoldTimeSeconds defines the initial value for the BGP HoldTimer (RFC 4271, Section 4.2). + // Updating this value will cause a session reset. + // + // If not specified, defaults to 90 seconds. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=3 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=90 + HoldTimeSeconds *int32 `json:"holdTimeSeconds,omitempty"` + + // KeepaliveTimeSeconds defines the initial value for the BGP KeepaliveTimer (RFC 4271, Section 8). + // It can not be larger than HoldTimeSeconds. Updating this value will cause a session reset. + // + // If not specified, defaults to 30 seconds. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=30 + KeepAliveTimeSeconds *int32 `json:"keepAliveTimeSeconds,omitempty"` +} + +func (t *CiliumBGPTimers) SetDefaults() { + if t.ConnectRetryTimeSeconds == nil || *t.ConnectRetryTimeSeconds == 0 { + t.ConnectRetryTimeSeconds = ptr.To[int32](DefaultBGPConnectRetryTimeSeconds) + } + + if t.HoldTimeSeconds == nil || *t.HoldTimeSeconds == 0 { + t.HoldTimeSeconds = ptr.To[int32](DefaultBGPHoldTimeSeconds) + } + + if t.KeepAliveTimeSeconds == nil || *t.KeepAliveTimeSeconds == 0 { + t.KeepAliveTimeSeconds = ptr.To[int32](DefaultBGPKeepAliveTimeSeconds) + } +} + +func (p *CiliumBGPPeerConfigSpec) SetDefaults() { + if p == nil { + return + } + + if p.Transport == nil { + p.Transport = &CiliumBGPTransport{} + } + p.Transport.SetDefaults() + + if p.Timers == nil { + p.Timers = &CiliumBGPTimers{} + } + p.Timers.SetDefaults() + + if p.EBGPMultihop == nil { + p.EBGPMultihop = ptr.To[int32](DefaultBGPEBGPMultihopTTL) + } + + if p.GracefulRestart == nil { + p.GracefulRestart = &CiliumBGPNeighborGracefulRestart{} + } + p.GracefulRestart.SetDefaults() + + if len(p.Families) == 0 { + p.Families = []CiliumBGPFamilyWithAdverts{ + { + CiliumBGPFamily: CiliumBGPFamily{ + Afi: "ipv6", + Safi: "unicast", + }, + }, + { + CiliumBGPFamily: CiliumBGPFamily{ + Afi: "ipv4", + Safi: "unicast", + }, + }, + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go index b2fb7840fe..d6a75ed2f3 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/bgpp_types.go @@ -7,7 +7,7 @@ import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" ) @@ -15,6 +15,9 @@ import ( const ( // DefaultBGPExportPodCIDR defines the default value for ExportPodCIDR determining whether to export the Node's private CIDR block. DefaultBGPExportPodCIDR = false + // DefaultBGPPeerLocalPort defines the default value for the local port over which to connect to the peer. + // By default, BGP control plane will not set this value, and the kernel will pick a random port source port. + DefaultBGPPeerLocalPort = 0 // DefaultBGPPeerPort defines the TCP port number of a CiliumBGPNeighbor when PeerPort is unspecified. DefaultBGPPeerPort = 179 // DefaultBGPEBGPMultihopTTL defines the default value for the TTL value used in BGP packets sent to the eBGP neighbors. @@ -27,6 +30,15 @@ const ( DefaultBGPKeepAliveTimeSeconds = 30 // DefaultBGPGRRestartTimeSeconds defines default Restart Time for graceful restart (RFC 4724, section 4.2) DefaultBGPGRRestartTimeSeconds = 120 + // BGPLoadBalancerClass defines the BGP Control Plane load balancer class for Services. + BGPLoadBalancerClass = "io.cilium/bgp-control-plane" + // PodCIDRSelectorName defines the name for a selector matching Pod CIDRs + // (standard cluster scope / Kubernetes IPAM CIDRs, not Multi-Pool IPAM CIDRs). + PodCIDRSelectorName = "PodCIDR" + // CiliumLoadBalancerIPPoolSelectorName defines the name for a selector matching CiliumLoadBalancerIPPool resources. + CiliumLoadBalancerIPPoolSelectorName = "CiliumLoadBalancerIPPool" + // CiliumPodIPPoolSelectorName defines the name for a selector matching CiliumPodIPPool resources. + CiliumPodIPPoolSelectorName = CPIPKindDefinition ) // +genclient @@ -99,6 +111,104 @@ type CiliumBGPNeighborGracefulRestart struct { RestartTimeSeconds *int32 `json:"restartTimeSeconds,omitempty"` } +func (gr *CiliumBGPNeighborGracefulRestart) SetDefaults() { + if gr.RestartTimeSeconds == nil || *gr.RestartTimeSeconds == 0 { + gr.RestartTimeSeconds = ptr.To[int32](DefaultBGPGRRestartTimeSeconds) + } +} + +// BGPStandardCommunity type represents a value of the "standard" 32-bit BGP Communities Attribute (RFC 1997) +// as a 4-byte decimal number or two 2-byte decimal numbers separated by a colon (<0-65535>:<0-65535>). +// For example, no-export community value is 65553:65281. +// +kubebuilder:validation:Pattern=`^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$|^([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5]):([0-9]|[1-9][0-9]{1,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` +type BGPStandardCommunity string + +// BGPWellKnownCommunity type represents a value of the "standard" 32-bit BGP Communities Attribute (RFC 1997) +// as a well-known string alias to its numeric value. Allowed values and their mapping to the numeric values: +// +// internet = 0x00000000 (0:0) +// planned-shut = 0xffff0000 (65535:0) +// accept-own = 0xffff0001 (65535:1) +// route-filter-translated-v4 = 0xffff0002 (65535:2) +// route-filter-v4 = 0xffff0003 (65535:3) +// route-filter-translated-v6 = 0xffff0004 (65535:4) +// route-filter-v6 = 0xffff0005 (65535:5) +// llgr-stale = 0xffff0006 (65535:6) +// no-llgr = 0xffff0007 (65535:7) +// blackhole = 0xffff029a (65535:666) +// no-export = 0xffffff01 (65535:65281) +// no-advertise = 0xffffff02 (65535:65282) +// no-export-subconfed = 0xffffff03 (65535:65283) +// no-peer = 0xffffff04 (65535:65284) +// +// +kubebuilder:validation:Enum=internet;planned-shut;accept-own;route-filter-translated-v4;route-filter-v4;route-filter-translated-v6;route-filter-v6;llgr-stale;no-llgr;blackhole;no-export;no-advertise;no-export-subconfed;no-peer +type BGPWellKnownCommunity string + +// BGPLargeCommunity type represents a value of the BGP Large Communities Attribute (RFC 8092), +// as three 4-byte decimal numbers separated by colons. +// +kubebuilder:validation:Pattern=`^([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5]):([0-9]|[1-9][0-9]{1,8}|[1-3][0-9]{9}|4[01][0-9]{8}|42[0-8][0-9]{7}|429[0-3][0-9]{6}|4294[0-8][0-9]{5}|42949[0-5][0-9]{4}|429496[0-6][0-9]{3}|4294967[01][0-9]{2}|42949672[0-8][0-9]|429496729[0-5])$` +type BGPLargeCommunity string + +// BGPCommunities holds community values of the supported BGP community path attributes. +type BGPCommunities struct { + // Standard holds a list of "standard" 32-bit BGP Communities Attribute (RFC 1997) values defined as numeric values. + // + // +kubebuilder:validation:Optional + Standard []BGPStandardCommunity `json:"standard,omitempty"` + + // WellKnown holds a list "standard" 32-bit BGP Communities Attribute (RFC 1997) values defined as + // well-known string aliases to their numeric values. + // + // +kubebuilder:validation:Optional + WellKnown []BGPWellKnownCommunity `json:"wellKnown,omitempty"` + + // Large holds a list of the BGP Large Communities Attribute (RFC 8092) values. + // + // +kubebuilder:validation:Optional + Large []BGPLargeCommunity `json:"large,omitempty"` +} + +// CiliumBGPPathAttributes can be used to apply additional path attributes +// to matched routes when advertising them to a BGP peer. +type CiliumBGPPathAttributes struct { + // SelectorType defines the object type on which the Selector applies: + // - For "PodCIDR" the Selector matches k8s CiliumNode resources + // (path attributes apply to routes announced for PodCIDRs of selected CiliumNodes. + // Only affects routes of cluster scope / Kubernetes IPAM CIDRs, not Multi-Pool IPAM CIDRs. + // - For "CiliumLoadBalancerIPPool" the Selector matches CiliumLoadBalancerIPPool custom resources + // (path attributes apply to routes announced for selected CiliumLoadBalancerIPPools). + // - For "CiliumPodIPPool" the Selector matches CiliumPodIPPool custom resources + // (path attributes apply to routes announced for allocated CIDRs of selected CiliumPodIPPools). + // + // +kubebuilder:validation:Enum=PodCIDR;CiliumLoadBalancerIPPool;CiliumPodIPPool + // +kubebuilder:validation:Required + SelectorType string `json:"selectorType"` + + // Selector selects a group of objects of the SelectorType + // resulting into routes that will be announced with the configured Attributes. + // If nil / not set, all objects of the SelectorType are selected. + // + // +kubebuilder:validation:Optional + Selector *slimv1.LabelSelector `json:"selector,omitempty"` + + // Communities defines a set of community values advertised in the supported BGP Communities path attributes. + // If nil / not set, no BGP Communities path attribute will be advertised. + // + // +kubebuilder:validation:Optional + Communities *BGPCommunities `json:"communities,omitempty"` + + // LocalPreference defines the preference value advertised in the BGP Local Preference path attribute. + // As Local Preference is only valid for iBGP peers, this value will be ignored for eBGP peers + // (no Local Preference path attribute will be advertised). + // If nil / not set, the default Local Preference of 100 will be advertised in + // the Local Preference path attribute for iBGP peers. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=4294967295 + LocalPreference *int64 `json:"localPreference,omitempty"` +} + // CiliumBGPNeighbor is a neighboring peer for use in a // CiliumBGPVirtualRouter configuration. type CiliumBGPNeighbor struct { @@ -124,6 +234,10 @@ type CiliumBGPNeighbor struct { // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=4294967295 PeerASN int64 `json:"peerASN"` + // AuthSecretRef is the name of the secret to use to fetch a TCP + // authentication password for this peer. + // +kubebuilder:validation:Optional + AuthSecretRef *string `json:"authSecretRef,omitempty"` // EBGPMultihopTTL controls the multi-hop feature for eBGP peers. // Its value defines the Time To Live (TTL) value used in BGP packets sent to the neighbor. // The value 1 implies that eBGP multi-hop feature is disabled (only a single hop is allowed). @@ -162,6 +276,20 @@ type CiliumBGPNeighbor struct { // // +kubebuilder:validation:Optional GracefulRestart *CiliumBGPNeighborGracefulRestart `json:"gracefulRestart,omitempty"` + // Families, if provided, defines a set of AFI/SAFIs the speaker will + // negotiate with it's peer. + // + // If this slice is not provided the default families of IPv6 and IPv4 will + // be provided. + // + // +kubebuilder:validation:Optional + Families []CiliumBGPFamily `json:"families"` + // AdvertisedPathAttributes can be used to apply additional path attributes + // to selected routes when advertising them to the peer. + // If empty / nil, no additional path attributes are advertised. + // + // +kubebuilder:validation:Optional + AdvertisedPathAttributes []CiliumBGPPathAttributes `json:"advertisedPathAttributes,omitempty"` } // CiliumBGPVirtualRouter defines a discrete BGP virtual router configuration. @@ -179,13 +307,31 @@ type CiliumBGPVirtualRouter struct { // +kubebuilder:validation:Optional // +kubebuilder:default=false ExportPodCIDR *bool `json:"exportPodCIDR,omitempty"` + // PodIPPoolSelector selects CiliumPodIPPools based on labels. The virtual + // router will announce allocated CIDRs of matching CiliumPodIPPools. + // + // If empty / nil no CiliumPodIPPools will be announced. + // + // +kubebuilder:validation:Optional + PodIPPoolSelector *slimv1.LabelSelector `json:"podIPPoolSelector,omitempty"` // ServiceSelector selects a group of load balancer services which this - // virtual router will announce. + // virtual router will announce. The loadBalancerClass for a service must + // be nil or specify a class supported by Cilium, e.g. "io.cilium/bgp-control-plane". + // Refer to the following document for additional details regarding load balancer + // classes: + // + // https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class // // If empty / nil no services will be announced. // // +kubebuilder:validation:Optional ServiceSelector *slimv1.LabelSelector `json:"serviceSelector,omitempty"` + // ServiceAdvertisements selects a group of BGP Advertisement(s) to advertise + // for the selected services. + // + // +kubebuilder:validation:Optional + // +kubebuilder:default={LoadBalancerIP} + ServiceAdvertisements []BGPServiceAddressType `json:"serviceAdvertisements,omitempty"` // Neighbors is a list of neighboring BGP peers for this virtual router // // +kubebuilder:validation:Required @@ -207,11 +353,15 @@ func (p *CiliumBGPPeeringPolicy) SetDefaults() { // the main use of this method is to avoid the need for nil-checks in the controller code. func (r *CiliumBGPVirtualRouter) SetDefaults() { if r.ExportPodCIDR == nil { - r.ExportPodCIDR = pointer.Bool(DefaultBGPExportPodCIDR) + r.ExportPodCIDR = ptr.To[bool](DefaultBGPExportPodCIDR) } for i := range r.Neighbors { r.Neighbors[i].SetDefaults() } + + if r.ServiceAdvertisements == nil { + r.ServiceAdvertisements = []BGPServiceAddressType{BGPLoadBalancerIPAddr} + } } // SetDefaults applies default values on the CiliumBGPNeighbor. @@ -219,31 +369,43 @@ func (r *CiliumBGPVirtualRouter) SetDefaults() { // the main use of this method is to avoid the need for nil-checks in the controller code. func (n *CiliumBGPNeighbor) SetDefaults() { if n.PeerPort == nil || *n.PeerPort == 0 { - n.PeerPort = pointer.Int32(DefaultBGPPeerPort) + n.PeerPort = ptr.To[int32](DefaultBGPPeerPort) } if n.EBGPMultihopTTL == nil { - n.EBGPMultihopTTL = pointer.Int32(DefaultBGPEBGPMultihopTTL) + n.EBGPMultihopTTL = ptr.To[int32](DefaultBGPEBGPMultihopTTL) } if n.ConnectRetryTimeSeconds == nil || *n.ConnectRetryTimeSeconds == 0 { - n.ConnectRetryTimeSeconds = pointer.Int32(DefaultBGPConnectRetryTimeSeconds) + n.ConnectRetryTimeSeconds = ptr.To[int32](DefaultBGPConnectRetryTimeSeconds) } if n.HoldTimeSeconds == nil || *n.HoldTimeSeconds == 0 { - n.HoldTimeSeconds = pointer.Int32(DefaultBGPHoldTimeSeconds) + n.HoldTimeSeconds = ptr.To[int32](DefaultBGPHoldTimeSeconds) } if n.KeepAliveTimeSeconds == nil || *n.KeepAliveTimeSeconds == 0 { - n.KeepAliveTimeSeconds = pointer.Int32(DefaultBGPKeepAliveTimeSeconds) + n.KeepAliveTimeSeconds = ptr.To[int32](DefaultBGPKeepAliveTimeSeconds) } if n.GracefulRestart != nil && n.GracefulRestart.Enabled && (n.GracefulRestart.RestartTimeSeconds == nil || *n.GracefulRestart.RestartTimeSeconds == 0) { - n.GracefulRestart.RestartTimeSeconds = pointer.Int32(DefaultBGPGRRestartTimeSeconds) + n.GracefulRestart.RestartTimeSeconds = ptr.To[int32](DefaultBGPGRRestartTimeSeconds) + } + if len(n.Families) == 0 { + n.Families = []CiliumBGPFamily{ + { + Afi: "ipv4", + Safi: "unicast", + }, + { + Afi: "ipv6", + Safi: "unicast", + }, + } } } // Validate validates CiliumBGPNeighbor's configuration constraints // that can not be expressed using the kubebuilder validation markers. func (n *CiliumBGPNeighbor) Validate() error { - keepAliveTime := pointer.Int32Deref(n.KeepAliveTimeSeconds, DefaultBGPKeepAliveTimeSeconds) - holdTime := pointer.Int32Deref(n.HoldTimeSeconds, DefaultBGPHoldTimeSeconds) + keepAliveTime := ptr.Deref[int32](n.KeepAliveTimeSeconds, DefaultBGPKeepAliveTimeSeconds) + holdTime := ptr.Deref[int32](n.HoldTimeSeconds, DefaultBGPHoldTimeSeconds) if keepAliveTime > holdTime { return fmt.Errorf("KeepAliveTimeSeconds larger than HoldTimeSeconds for peer ASN:%d IP:%s", n.PeerASN, n.PeerAddress) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/cnc_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/cnc_types.go index 81a7e135e8..33f38480af 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/cnc_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/cnc_types.go @@ -11,8 +11,8 @@ import ( //+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object //+kubebuilder:resource:categories={cilium} //+kubebuilder:object:root=true +//+kubebuilder:deprecatedversion:warning="cilium.io/v2alpha1 CiliumNodeConfig will be deprecated in cilium v1.16; use cilium.io/v2 CiliumNodeConfig" //+deepequal-gen=false -//+kubebuilder:storageversion // CiliumNodeConfig is a list of configuration key-value pairs. It is applied to // nodes indicated by a label selector. diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go index 7a7851cb15..7170260459 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go @@ -9,6 +9,9 @@ import ( slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" ) +// L2AnnounceLoadBalancerClass defines the L2 Announcer load balancer class for Services. +const L2AnnounceLoadBalancerClass = "io.cilium/l2-announcer" + // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -64,7 +67,12 @@ type CiliumL2AnnouncementPolicySpec struct { // // +kubebuilder:validation:Optional NodeSelector *slimv1.LabelSelector `json:"nodeSelector"` - // ServiceSelector selects a set of services which will be announced over L2 networks + // ServiceSelector selects a set of services which will be announced over L2 networks. + // The loadBalancerClass for a service must be nil or specify a supported class, e.g. + // "io.cilium/l2-announcer". Refer to the following document for additional details + // regarding load balancer classes: + // + // https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class // // If nil this policy applies to all services. // diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go index 7fc7ad95bd..01efa35309 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go @@ -14,8 +14,8 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:resource:categories={cilium},singular="ciliumloadbalancerippool",path="ciliumloadbalancerippools",scope="Cluster",shortName={ippools,ippool,lbippool,lbippools} // +kubebuilder:printcolumn:JSONPath=".spec.disabled",name="Disabled",type=boolean -// +kubebuilder:printcolumn:name="Conflicting",type=string,JSONPath=`.status.conditions[?(@.type=="io.cilium/conflict")].status` -// +kubebuilder:printcolumn:name="IPs Available",type=string,JSONPath=`.status.conditions[?(@.type=="io.cilium/ips-available")].message` +// +kubebuilder:printcolumn:name="Conflicting",type=string,JSONPath=`.status.conditions[?(@.type=="cilium.io/PoolConflict")].status` +// +kubebuilder:printcolumn:name="IPs Available",type=string,JSONPath=`.status.conditions[?(@.type=="cilium.io/IPsAvailable")].message` // +kubebuilder:printcolumn:JSONPath=".metadata.creationTimestamp",name="Age",type=date // +kubebuilder:subresource:status // +kubebuilder:storageversion @@ -69,11 +69,16 @@ type CiliumLoadBalancerIPPoolSpec struct { // // +kubebuilder:validation:Optional ServiceSelector *slimv1.LabelSelector `json:"serviceSelector"` - // CiliumLoadBalancerIPPoolCIDRBlock is a list of CIDRs comprising this IP Pool + // AllowFirstLastIPs, if set to `Yes` or undefined means that the first and last IPs of each CIDR will be allocatable. + // If `No`, these IPs will be reserved. This field is ignored for /{31,32} and /{127,128} CIDRs since + // reserving the first and last IPs would make the CIDRs unusable. // - // +kubebuilder:validation:Required - // +kubebuilder:validation:MinItems=1 - Cidrs []CiliumLoadBalancerIPPoolCIDRBlock `json:"cidrs"` + // +kubebuilder:validation:Optional + AllowFirstLastIPs AllowFirstLastIPType `json:"allowFirstLastIPs,omitempty"` + // Blocks is a list of CIDRs comprising this IP Pool + // + // +kubebuilder:validation:Optional + Blocks []CiliumLoadBalancerIPPoolIPBlock `json:"blocks,omitempty"` // Disabled, if set to true means that no new IPs will be allocated from this pool. // Existing allocations will not be removed from services. // @@ -82,11 +87,23 @@ type CiliumLoadBalancerIPPoolSpec struct { Disabled bool `json:"disabled"` } -// CiliumLoadBalancerIPPoolCIDRBlock describes a single CIDR block. -type CiliumLoadBalancerIPPoolCIDRBlock struct { +// +kubebuilder:validation:Enum=Yes;No +type AllowFirstLastIPType string + +const ( + AllowFirstLastIPNo AllowFirstLastIPType = "No" + AllowFirstLastIPYes AllowFirstLastIPType = "Yes" +) + +// CiliumLoadBalancerIPPoolIPBlock describes a single IP block. +type CiliumLoadBalancerIPPoolIPBlock struct { // +kubebuilder:validation:Format=cidr - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional Cidr IPv4orIPv6CIDR `json:"cidr"` + // +kubebuilder:validation:Optional + Start string `json:"start,omitempty"` + // +kubebuilder:validation:Optional + Stop string `json:"stop,omitempty"` } // +deepequal-gen=false diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go index 32bb859573..8446c629e4 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/register.go @@ -40,6 +40,31 @@ const ( // BGPPName is the full name of Cilium BGP Peering Policy BGPPName = BGPPPluralName + "." + CustomResourceDefinitionGroup + // BGPClusterConfig (BGPCC) + BGPCCPluralName = "ciliumbgpclusterconfigs" + BGPCCKindDefinition = "CiliumBGPClusterConfig" + BGPCCName = BGPCCPluralName + "." + CustomResourceDefinitionGroup + + // BGPPeerConfig (BGPPC) + BGPPCPluralName = "ciliumbgppeerconfigs" + BGPPCKindDefinition = "CiliumBGPPeerConfig" + BGPPCName = BGPPCPluralName + "." + CustomResourceDefinitionGroup + + // BGPAdvertisement (BGPA) + BGPAPluralName = "ciliumbgpadvertisements" + BGPAKindDefinition = "CiliumBGPAdvertisement" + BGPAName = BGPAPluralName + "." + CustomResourceDefinitionGroup + + // BGPNodeConfig (BGPNC) + BGPNCPluralName = "ciliumbgpnodeconfigs" + BGPNCKindDefinition = "CiliumBGPNodeConfig" + BGPNCName = BGPNCPluralName + "." + CustomResourceDefinitionGroup + + // BGPNodeConfigOverride (BGPNCO) + BGPNCOPluralName = "ciliumbgpnodeconfigoverrides" + BGPNCOKindDefinition = "CiliumBGPNodeConfigOverride" + BGPNCOName = BGPNCOPluralName + "." + CustomResourceDefinitionGroup + // Cilium Load Balancer IP Pool (IPPool) // PoolPluralName is the plural name of Cilium Load Balancer IP Pool @@ -52,9 +77,15 @@ const ( LBIPPoolName = PoolPluralName + "." + CustomResourceDefinitionGroup // CiliumNodeConfig (CNC) - CNCPluralName = "ciliumnodeconfigs" + + // CNCPluralName is the plural name of Cilium Node Config + CNCPluralName = "ciliumnodeconfigs" + + // CNCKindDefinition is the kind name of Cilium Node Config CNCKindDefinition = "CiliumNodeConfig" - CNCName = CNCPluralName + "." + CustomResourceDefinitionGroup + + // CNCName is the full name of Cilium Node Config + CNCName = CNCPluralName + "." + CustomResourceDefinitionGroup // CiliumCIDRGroup (CCG) CCGPluralName = "ciliumcidrgroups" @@ -128,14 +159,26 @@ func addKnownTypes(scheme *runtime.Scheme) error { &CiliumBGPPeeringPolicyList{}, &CiliumLoadBalancerIPPool{}, &CiliumLoadBalancerIPPoolList{}, - &CiliumNodeConfig{}, - &CiliumNodeConfigList{}, &CiliumCIDRGroup{}, &CiliumCIDRGroupList{}, &CiliumL2AnnouncementPolicy{}, &CiliumL2AnnouncementPolicyList{}, &CiliumPodIPPool{}, &CiliumPodIPPoolList{}, + &CiliumNodeConfig{}, + &CiliumNodeConfigList{}, + + // new BGP types + &CiliumBGPClusterConfig{}, + &CiliumBGPClusterConfigList{}, + &CiliumBGPPeerConfig{}, + &CiliumBGPPeerConfigList{}, + &CiliumBGPAdvertisement{}, + &CiliumBGPAdvertisementList{}, + &CiliumBGPNodeConfig{}, + &CiliumBGPNodeConfigList{}, + &CiliumBGPNodeConfigOverride{}, + &CiliumBGPNodeConfigOverrideList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go index 813cf6a56d..ad712afe26 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepcopy.go @@ -17,32 +17,1019 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPAdvertisement) DeepCopyInto(out *BGPAdvertisement) { + *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(BGPServiceOptions) + (*in).DeepCopyInto(*out) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Attributes != nil { + in, out := &in.Attributes, &out.Attributes + *out = new(BGPAttributes) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPAdvertisement. +func (in *BGPAdvertisement) DeepCopy() *BGPAdvertisement { + if in == nil { + return nil + } + out := new(BGPAdvertisement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPAttributes) DeepCopyInto(out *BGPAttributes) { + *out = *in + if in.Communities != nil { + in, out := &in.Communities, &out.Communities + *out = new(BGPCommunities) + (*in).DeepCopyInto(*out) + } + if in.LocalPreference != nil { + in, out := &in.LocalPreference, &out.LocalPreference + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPAttributes. +func (in *BGPAttributes) DeepCopy() *BGPAttributes { + if in == nil { + return nil + } + out := new(BGPAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPCommunities) DeepCopyInto(out *BGPCommunities) { + *out = *in + if in.Standard != nil { + in, out := &in.Standard, &out.Standard + *out = make([]BGPStandardCommunity, len(*in)) + copy(*out, *in) + } + if in.WellKnown != nil { + in, out := &in.WellKnown, &out.WellKnown + *out = make([]BGPWellKnownCommunity, len(*in)) + copy(*out, *in) + } + if in.Large != nil { + in, out := &in.Large, &out.Large + *out = make([]BGPLargeCommunity, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPCommunities. +func (in *BGPCommunities) DeepCopy() *BGPCommunities { + if in == nil { + return nil + } + out := new(BGPCommunities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPFamilyRouteCount) DeepCopyInto(out *BGPFamilyRouteCount) { + *out = *in + if in.Received != nil { + in, out := &in.Received, &out.Received + *out = new(int32) + **out = **in + } + if in.Advertised != nil { + in, out := &in.Advertised, &out.Advertised + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPFamilyRouteCount. +func (in *BGPFamilyRouteCount) DeepCopy() *BGPFamilyRouteCount { + if in == nil { + return nil + } + out := new(BGPFamilyRouteCount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPServiceOptions) DeepCopyInto(out *BGPServiceOptions) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]BGPServiceAddressType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPServiceOptions. +func (in *BGPServiceOptions) DeepCopy() *BGPServiceOptions { + if in == nil { + return nil + } + out := new(BGPServiceOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPAdvertisement) DeepCopyInto(out *CiliumBGPAdvertisement) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisement. +func (in *CiliumBGPAdvertisement) DeepCopy() *CiliumBGPAdvertisement { + if in == nil { + return nil + } + out := new(CiliumBGPAdvertisement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPAdvertisement) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPAdvertisementList) DeepCopyInto(out *CiliumBGPAdvertisementList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumBGPAdvertisement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisementList. +func (in *CiliumBGPAdvertisementList) DeepCopy() *CiliumBGPAdvertisementList { + if in == nil { + return nil + } + out := new(CiliumBGPAdvertisementList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPAdvertisementList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPAdvertisementSpec) DeepCopyInto(out *CiliumBGPAdvertisementSpec) { + *out = *in + if in.Advertisements != nil { + in, out := &in.Advertisements, &out.Advertisements + *out = make([]BGPAdvertisement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPAdvertisementSpec. +func (in *CiliumBGPAdvertisementSpec) DeepCopy() *CiliumBGPAdvertisementSpec { + if in == nil { + return nil + } + out := new(CiliumBGPAdvertisementSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPClusterConfig) DeepCopyInto(out *CiliumBGPClusterConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfig. +func (in *CiliumBGPClusterConfig) DeepCopy() *CiliumBGPClusterConfig { + if in == nil { + return nil + } + out := new(CiliumBGPClusterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPClusterConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPClusterConfigList) DeepCopyInto(out *CiliumBGPClusterConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumBGPClusterConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigList. +func (in *CiliumBGPClusterConfigList) DeepCopy() *CiliumBGPClusterConfigList { + if in == nil { + return nil + } + out := new(CiliumBGPClusterConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPClusterConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPClusterConfigSpec) DeepCopyInto(out *CiliumBGPClusterConfigSpec) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.BGPInstances != nil { + in, out := &in.BGPInstances, &out.BGPInstances + *out = make([]CiliumBGPInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigSpec. +func (in *CiliumBGPClusterConfigSpec) DeepCopy() *CiliumBGPClusterConfigSpec { + if in == nil { + return nil + } + out := new(CiliumBGPClusterConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPClusterConfigStatus) DeepCopyInto(out *CiliumBGPClusterConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPClusterConfigStatus. +func (in *CiliumBGPClusterConfigStatus) DeepCopy() *CiliumBGPClusterConfigStatus { + if in == nil { + return nil + } + out := new(CiliumBGPClusterConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPFamily) DeepCopyInto(out *CiliumBGPFamily) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPFamily. +func (in *CiliumBGPFamily) DeepCopy() *CiliumBGPFamily { + if in == nil { + return nil + } + out := new(CiliumBGPFamily) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPFamilyWithAdverts) DeepCopyInto(out *CiliumBGPFamilyWithAdverts) { + *out = *in + out.CiliumBGPFamily = in.CiliumBGPFamily + if in.Advertisements != nil { + in, out := &in.Advertisements, &out.Advertisements + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPFamilyWithAdverts. +func (in *CiliumBGPFamilyWithAdverts) DeepCopy() *CiliumBGPFamilyWithAdverts { + if in == nil { + return nil + } + out := new(CiliumBGPFamilyWithAdverts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPInstance) DeepCopyInto(out *CiliumBGPInstance) { + *out = *in + if in.LocalASN != nil { + in, out := &in.LocalASN, &out.LocalASN + *out = new(int64) + **out = **in + } + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]CiliumBGPPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPInstance. +func (in *CiliumBGPInstance) DeepCopy() *CiliumBGPInstance { + if in == nil { + return nil + } + out := new(CiliumBGPInstance) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CiliumBGPNeighbor) DeepCopyInto(out *CiliumBGPNeighbor) { *out = *in - if in.PeerPort != nil { - in, out := &in.PeerPort, &out.PeerPort - *out = new(int32) + if in.PeerPort != nil { + in, out := &in.PeerPort, &out.PeerPort + *out = new(int32) + **out = **in + } + if in.AuthSecretRef != nil { + in, out := &in.AuthSecretRef, &out.AuthSecretRef + *out = new(string) + **out = **in + } + if in.EBGPMultihopTTL != nil { + in, out := &in.EBGPMultihopTTL, &out.EBGPMultihopTTL + *out = new(int32) + **out = **in + } + if in.ConnectRetryTimeSeconds != nil { + in, out := &in.ConnectRetryTimeSeconds, &out.ConnectRetryTimeSeconds + *out = new(int32) + **out = **in + } + if in.HoldTimeSeconds != nil { + in, out := &in.HoldTimeSeconds, &out.HoldTimeSeconds + *out = new(int32) + **out = **in + } + if in.KeepAliveTimeSeconds != nil { + in, out := &in.KeepAliveTimeSeconds, &out.KeepAliveTimeSeconds + *out = new(int32) + **out = **in + } + if in.GracefulRestart != nil { + in, out := &in.GracefulRestart, &out.GracefulRestart + *out = new(CiliumBGPNeighborGracefulRestart) + (*in).DeepCopyInto(*out) + } + if in.Families != nil { + in, out := &in.Families, &out.Families + *out = make([]CiliumBGPFamily, len(*in)) + copy(*out, *in) + } + if in.AdvertisedPathAttributes != nil { + in, out := &in.AdvertisedPathAttributes, &out.AdvertisedPathAttributes + *out = make([]CiliumBGPPathAttributes, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighbor. +func (in *CiliumBGPNeighbor) DeepCopy() *CiliumBGPNeighbor { + if in == nil { + return nil + } + out := new(CiliumBGPNeighbor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNeighborGracefulRestart) DeepCopyInto(out *CiliumBGPNeighborGracefulRestart) { + *out = *in + if in.RestartTimeSeconds != nil { + in, out := &in.RestartTimeSeconds, &out.RestartTimeSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighborGracefulRestart. +func (in *CiliumBGPNeighborGracefulRestart) DeepCopy() *CiliumBGPNeighborGracefulRestart { + if in == nil { + return nil + } + out := new(CiliumBGPNeighborGracefulRestart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfig) DeepCopyInto(out *CiliumBGPNodeConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfig. +func (in *CiliumBGPNodeConfig) DeepCopy() *CiliumBGPNodeConfig { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPNodeConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigInstanceOverride) DeepCopyInto(out *CiliumBGPNodeConfigInstanceOverride) { + *out = *in + if in.RouterID != nil { + in, out := &in.RouterID, &out.RouterID + *out = new(string) + **out = **in + } + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(int32) + **out = **in + } + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]CiliumBGPNodeConfigPeerOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigInstanceOverride. +func (in *CiliumBGPNodeConfigInstanceOverride) DeepCopy() *CiliumBGPNodeConfigInstanceOverride { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigInstanceOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigList) DeepCopyInto(out *CiliumBGPNodeConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumBGPNodeConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigList. +func (in *CiliumBGPNodeConfigList) DeepCopy() *CiliumBGPNodeConfigList { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPNodeConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigOverride) DeepCopyInto(out *CiliumBGPNodeConfigOverride) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverride. +func (in *CiliumBGPNodeConfigOverride) DeepCopy() *CiliumBGPNodeConfigOverride { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPNodeConfigOverride) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigOverrideList) DeepCopyInto(out *CiliumBGPNodeConfigOverrideList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumBGPNodeConfigOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverrideList. +func (in *CiliumBGPNodeConfigOverrideList) DeepCopy() *CiliumBGPNodeConfigOverrideList { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigOverrideList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPNodeConfigOverrideList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigOverrideSpec) DeepCopyInto(out *CiliumBGPNodeConfigOverrideSpec) { + *out = *in + if in.BGPInstances != nil { + in, out := &in.BGPInstances, &out.BGPInstances + *out = make([]CiliumBGPNodeConfigInstanceOverride, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigOverrideSpec. +func (in *CiliumBGPNodeConfigOverrideSpec) DeepCopy() *CiliumBGPNodeConfigOverrideSpec { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigOverrideSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeConfigPeerOverride) DeepCopyInto(out *CiliumBGPNodeConfigPeerOverride) { + *out = *in + if in.LocalAddress != nil { + in, out := &in.LocalAddress, &out.LocalAddress + *out = new(string) + **out = **in + } + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeConfigPeerOverride. +func (in *CiliumBGPNodeConfigPeerOverride) DeepCopy() *CiliumBGPNodeConfigPeerOverride { + if in == nil { + return nil + } + out := new(CiliumBGPNodeConfigPeerOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeInstance) DeepCopyInto(out *CiliumBGPNodeInstance) { + *out = *in + if in.LocalASN != nil { + in, out := &in.LocalASN, &out.LocalASN + *out = new(int64) + **out = **in + } + if in.RouterID != nil { + in, out := &in.RouterID, &out.RouterID + *out = new(string) + **out = **in + } + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(int32) + **out = **in + } + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]CiliumBGPNodePeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeInstance. +func (in *CiliumBGPNodeInstance) DeepCopy() *CiliumBGPNodeInstance { + if in == nil { + return nil + } + out := new(CiliumBGPNodeInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeInstanceStatus) DeepCopyInto(out *CiliumBGPNodeInstanceStatus) { + *out = *in + if in.LocalASN != nil { + in, out := &in.LocalASN, &out.LocalASN + *out = new(int64) + **out = **in + } + if in.PeerStatuses != nil { + in, out := &in.PeerStatuses, &out.PeerStatuses + *out = make([]CiliumBGPNodePeerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeInstanceStatus. +func (in *CiliumBGPNodeInstanceStatus) DeepCopy() *CiliumBGPNodeInstanceStatus { + if in == nil { + return nil + } + out := new(CiliumBGPNodeInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodePeer) DeepCopyInto(out *CiliumBGPNodePeer) { + *out = *in + if in.PeerAddress != nil { + in, out := &in.PeerAddress, &out.PeerAddress + *out = new(string) + **out = **in + } + if in.PeerASN != nil { + in, out := &in.PeerASN, &out.PeerASN + *out = new(int64) + **out = **in + } + if in.LocalAddress != nil { + in, out := &in.LocalAddress, &out.LocalAddress + *out = new(string) + **out = **in + } + if in.PeerConfigRef != nil { + in, out := &in.PeerConfigRef, &out.PeerConfigRef + *out = new(PeerConfigReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodePeer. +func (in *CiliumBGPNodePeer) DeepCopy() *CiliumBGPNodePeer { + if in == nil { + return nil + } + out := new(CiliumBGPNodePeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodePeerStatus) DeepCopyInto(out *CiliumBGPNodePeerStatus) { + *out = *in + if in.PeerASN != nil { + in, out := &in.PeerASN, &out.PeerASN + *out = new(int64) + **out = **in + } + if in.PeeringState != nil { + in, out := &in.PeeringState, &out.PeeringState + *out = new(string) + **out = **in + } + if in.Timers != nil { + in, out := &in.Timers, &out.Timers + *out = new(CiliumBGPTimersState) + (*in).DeepCopyInto(*out) + } + if in.EstablishedTime != nil { + in, out := &in.EstablishedTime, &out.EstablishedTime + *out = new(string) + **out = **in + } + if in.RouteCount != nil { + in, out := &in.RouteCount, &out.RouteCount + *out = make([]BGPFamilyRouteCount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodePeerStatus. +func (in *CiliumBGPNodePeerStatus) DeepCopy() *CiliumBGPNodePeerStatus { + if in == nil { + return nil + } + out := new(CiliumBGPNodePeerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeSpec) DeepCopyInto(out *CiliumBGPNodeSpec) { + *out = *in + if in.BGPInstances != nil { + in, out := &in.BGPInstances, &out.BGPInstances + *out = make([]CiliumBGPNodeInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeSpec. +func (in *CiliumBGPNodeSpec) DeepCopy() *CiliumBGPNodeSpec { + if in == nil { + return nil + } + out := new(CiliumBGPNodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPNodeStatus) DeepCopyInto(out *CiliumBGPNodeStatus) { + *out = *in + if in.BGPInstances != nil { + in, out := &in.BGPInstances, &out.BGPInstances + *out = make([]CiliumBGPNodeInstanceStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNodeStatus. +func (in *CiliumBGPNodeStatus) DeepCopy() *CiliumBGPNodeStatus { + if in == nil { + return nil + } + out := new(CiliumBGPNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPPathAttributes) DeepCopyInto(out *CiliumBGPPathAttributes) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Communities != nil { + in, out := &in.Communities, &out.Communities + *out = new(BGPCommunities) + (*in).DeepCopyInto(*out) + } + if in.LocalPreference != nil { + in, out := &in.LocalPreference, &out.LocalPreference + *out = new(int64) **out = **in } - if in.EBGPMultihopTTL != nil { - in, out := &in.EBGPMultihopTTL, &out.EBGPMultihopTTL - *out = new(int32) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPathAttributes. +func (in *CiliumBGPPathAttributes) DeepCopy() *CiliumBGPPathAttributes { + if in == nil { + return nil + } + out := new(CiliumBGPPathAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPPeer) DeepCopyInto(out *CiliumBGPPeer) { + *out = *in + if in.PeerAddress != nil { + in, out := &in.PeerAddress, &out.PeerAddress + *out = new(string) **out = **in } - if in.ConnectRetryTimeSeconds != nil { - in, out := &in.ConnectRetryTimeSeconds, &out.ConnectRetryTimeSeconds - *out = new(int32) + if in.PeerASN != nil { + in, out := &in.PeerASN, &out.PeerASN + *out = new(int64) **out = **in } - if in.HoldTimeSeconds != nil { - in, out := &in.HoldTimeSeconds, &out.HoldTimeSeconds - *out = new(int32) + if in.PeerConfigRef != nil { + in, out := &in.PeerConfigRef, &out.PeerConfigRef + *out = new(PeerConfigReference) **out = **in } - if in.KeepAliveTimeSeconds != nil { - in, out := &in.KeepAliveTimeSeconds, &out.KeepAliveTimeSeconds - *out = new(int32) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeer. +func (in *CiliumBGPPeer) DeepCopy() *CiliumBGPPeer { + if in == nil { + return nil + } + out := new(CiliumBGPPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPPeerConfig) DeepCopyInto(out *CiliumBGPPeerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfig. +func (in *CiliumBGPPeerConfig) DeepCopy() *CiliumBGPPeerConfig { + if in == nil { + return nil + } + out := new(CiliumBGPPeerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPPeerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPPeerConfigList) DeepCopyInto(out *CiliumBGPPeerConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CiliumBGPPeerConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigList. +func (in *CiliumBGPPeerConfigList) DeepCopy() *CiliumBGPPeerConfigList { + if in == nil { + return nil + } + out := new(CiliumBGPPeerConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CiliumBGPPeerConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPPeerConfigSpec) DeepCopyInto(out *CiliumBGPPeerConfigSpec) { + *out = *in + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(CiliumBGPTransport) + (*in).DeepCopyInto(*out) + } + if in.Timers != nil { + in, out := &in.Timers, &out.Timers + *out = new(CiliumBGPTimers) + (*in).DeepCopyInto(*out) + } + if in.AuthSecretRef != nil { + in, out := &in.AuthSecretRef, &out.AuthSecretRef + *out = new(string) **out = **in } if in.GracefulRestart != nil { @@ -50,36 +1037,50 @@ func (in *CiliumBGPNeighbor) DeepCopyInto(out *CiliumBGPNeighbor) { *out = new(CiliumBGPNeighborGracefulRestart) (*in).DeepCopyInto(*out) } + if in.EBGPMultihop != nil { + in, out := &in.EBGPMultihop, &out.EBGPMultihop + *out = new(int32) + **out = **in + } + if in.Families != nil { + in, out := &in.Families, &out.Families + *out = make([]CiliumBGPFamilyWithAdverts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighbor. -func (in *CiliumBGPNeighbor) DeepCopy() *CiliumBGPNeighbor { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigSpec. +func (in *CiliumBGPPeerConfigSpec) DeepCopy() *CiliumBGPPeerConfigSpec { if in == nil { return nil } - out := new(CiliumBGPNeighbor) + out := new(CiliumBGPPeerConfigSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CiliumBGPNeighborGracefulRestart) DeepCopyInto(out *CiliumBGPNeighborGracefulRestart) { +func (in *CiliumBGPPeerConfigStatus) DeepCopyInto(out *CiliumBGPPeerConfigStatus) { *out = *in - if in.RestartTimeSeconds != nil { - in, out := &in.RestartTimeSeconds, &out.RestartTimeSeconds - *out = new(int32) - **out = **in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPNeighborGracefulRestart. -func (in *CiliumBGPNeighborGracefulRestart) DeepCopy() *CiliumBGPNeighborGracefulRestart { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPPeerConfigStatus. +func (in *CiliumBGPPeerConfigStatus) DeepCopy() *CiliumBGPPeerConfigStatus { if in == nil { return nil } - out := new(CiliumBGPNeighborGracefulRestart) + out := new(CiliumBGPPeerConfigStatus) in.DeepCopyInto(out) return out } @@ -172,6 +1173,89 @@ func (in *CiliumBGPPeeringPolicySpec) DeepCopy() *CiliumBGPPeeringPolicySpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPTimers) DeepCopyInto(out *CiliumBGPTimers) { + *out = *in + if in.ConnectRetryTimeSeconds != nil { + in, out := &in.ConnectRetryTimeSeconds, &out.ConnectRetryTimeSeconds + *out = new(int32) + **out = **in + } + if in.HoldTimeSeconds != nil { + in, out := &in.HoldTimeSeconds, &out.HoldTimeSeconds + *out = new(int32) + **out = **in + } + if in.KeepAliveTimeSeconds != nil { + in, out := &in.KeepAliveTimeSeconds, &out.KeepAliveTimeSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTimers. +func (in *CiliumBGPTimers) DeepCopy() *CiliumBGPTimers { + if in == nil { + return nil + } + out := new(CiliumBGPTimers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPTimersState) DeepCopyInto(out *CiliumBGPTimersState) { + *out = *in + if in.AppliedHoldTimeSeconds != nil { + in, out := &in.AppliedHoldTimeSeconds, &out.AppliedHoldTimeSeconds + *out = new(int32) + **out = **in + } + if in.AppliedKeepaliveSeconds != nil { + in, out := &in.AppliedKeepaliveSeconds, &out.AppliedKeepaliveSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTimersState. +func (in *CiliumBGPTimersState) DeepCopy() *CiliumBGPTimersState { + if in == nil { + return nil + } + out := new(CiliumBGPTimersState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CiliumBGPTransport) DeepCopyInto(out *CiliumBGPTransport) { + *out = *in + if in.LocalPort != nil { + in, out := &in.LocalPort, &out.LocalPort + *out = new(int32) + **out = **in + } + if in.PeerPort != nil { + in, out := &in.PeerPort, &out.PeerPort + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumBGPTransport. +func (in *CiliumBGPTransport) DeepCopy() *CiliumBGPTransport { + if in == nil { + return nil + } + out := new(CiliumBGPTransport) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CiliumBGPVirtualRouter) DeepCopyInto(out *CiliumBGPVirtualRouter) { *out = *in @@ -180,11 +1264,21 @@ func (in *CiliumBGPVirtualRouter) DeepCopyInto(out *CiliumBGPVirtualRouter) { *out = new(bool) **out = **in } + if in.PodIPPoolSelector != nil { + in, out := &in.PodIPPoolSelector, &out.PodIPPoolSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } if in.ServiceSelector != nil { in, out := &in.ServiceSelector, &out.ServiceSelector *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.ServiceAdvertisements != nil { + in, out := &in.ServiceAdvertisements, &out.ServiceAdvertisements + *out = make([]BGPServiceAddressType, len(*in)) + copy(*out, *in) + } if in.Neighbors != nil { in, out := &in.Neighbors, &out.Neighbors *out = make([]CiliumBGPNeighbor, len(*in)) @@ -496,17 +1590,17 @@ func (in *CiliumLoadBalancerIPPool) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepCopyInto(out *CiliumLoadBalancerIPPoolCIDRBlock) { +func (in *CiliumLoadBalancerIPPoolIPBlock) DeepCopyInto(out *CiliumLoadBalancerIPPoolIPBlock) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolCIDRBlock. -func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepCopy() *CiliumLoadBalancerIPPoolCIDRBlock { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CiliumLoadBalancerIPPoolIPBlock. +func (in *CiliumLoadBalancerIPPoolIPBlock) DeepCopy() *CiliumLoadBalancerIPPoolIPBlock { if in == nil { return nil } - out := new(CiliumLoadBalancerIPPoolCIDRBlock) + out := new(CiliumLoadBalancerIPPoolIPBlock) in.DeepCopyInto(out) return out } @@ -552,9 +1646,9 @@ func (in *CiliumLoadBalancerIPPoolSpec) DeepCopyInto(out *CiliumLoadBalancerIPPo *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } - if in.Cidrs != nil { - in, out := &in.Cidrs, &out.Cidrs - *out = make([]CiliumLoadBalancerIPPoolCIDRBlock, len(*in)) + if in.Blocks != nil { + in, out := &in.Blocks, &out.Blocks + *out = make([]CiliumLoadBalancerIPPoolIPBlock, len(*in)) copy(*out, *in) } return @@ -867,3 +1961,19 @@ func (in *IPv6PoolSpec) DeepCopy() *IPv6PoolSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerConfigReference) DeepCopyInto(out *PeerConfigReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerConfigReference. +func (in *PeerConfigReference) DeepCopy() *PeerConfigReference { + if in == nil { + return nil + } + out := new(PeerConfigReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go index 7908b87c12..cca99d9aea 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1/zz_generated.deepequal.go @@ -8,6 +8,370 @@ package v2alpha1 +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *BGPAdvertisement) DeepEqual(other *BGPAdvertisement) bool { + if other == nil { + return false + } + + if in.AdvertisementType != other.AdvertisementType { + return false + } + if (in.Service == nil) != (other.Service == nil) { + return false + } else if in.Service != nil { + if !in.Service.DeepEqual(other.Service) { + return false + } + } + + if (in.Selector == nil) != (other.Selector == nil) { + return false + } else if in.Selector != nil { + if !in.Selector.DeepEqual(other.Selector) { + return false + } + } + + if (in.Attributes == nil) != (other.Attributes == nil) { + return false + } else if in.Attributes != nil { + if !in.Attributes.DeepEqual(other.Attributes) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *BGPAttributes) DeepEqual(other *BGPAttributes) bool { + if other == nil { + return false + } + + if (in.Communities == nil) != (other.Communities == nil) { + return false + } else if in.Communities != nil { + if !in.Communities.DeepEqual(other.Communities) { + return false + } + } + + if (in.LocalPreference == nil) != (other.LocalPreference == nil) { + return false + } else if in.LocalPreference != nil { + if *in.LocalPreference != *other.LocalPreference { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *BGPCommunities) DeepEqual(other *BGPCommunities) bool { + if other == nil { + return false + } + + if ((in.Standard != nil) && (other.Standard != nil)) || ((in.Standard == nil) != (other.Standard == nil)) { + in, other := &in.Standard, &other.Standard + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + if ((in.WellKnown != nil) && (other.WellKnown != nil)) || ((in.WellKnown == nil) != (other.WellKnown == nil)) { + in, other := &in.WellKnown, &other.WellKnown + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + if ((in.Large != nil) && (other.Large != nil)) || ((in.Large == nil) != (other.Large == nil)) { + in, other := &in.Large, &other.Large + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *BGPFamilyRouteCount) DeepEqual(other *BGPFamilyRouteCount) bool { + if other == nil { + return false + } + + if in.Afi != other.Afi { + return false + } + if in.Safi != other.Safi { + return false + } + if (in.Received == nil) != (other.Received == nil) { + return false + } else if in.Received != nil { + if *in.Received != *other.Received { + return false + } + } + + if (in.Advertised == nil) != (other.Advertised == nil) { + return false + } else if in.Advertised != nil { + if *in.Advertised != *other.Advertised { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *BGPServiceOptions) DeepEqual(other *BGPServiceOptions) bool { + if other == nil { + return false + } + + if ((in.Addresses != nil) && (other.Addresses != nil)) || ((in.Addresses == nil) != (other.Addresses == nil)) { + in, other := &in.Addresses, &other.Addresses + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPAdvertisement) DeepEqual(other *CiliumBGPAdvertisement) bool { + if other == nil { + return false + } + + if !in.Spec.DeepEqual(&other.Spec) { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPAdvertisementSpec) DeepEqual(other *CiliumBGPAdvertisementSpec) bool { + if other == nil { + return false + } + + if ((in.Advertisements != nil) && (other.Advertisements != nil)) || ((in.Advertisements == nil) != (other.Advertisements == nil)) { + in, other := &in.Advertisements, &other.Advertisements + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPClusterConfig) DeepEqual(other *CiliumBGPClusterConfig) bool { + if other == nil { + return false + } + + if !in.Spec.DeepEqual(&other.Spec) { + return false + } + + if !in.Status.DeepEqual(&other.Status) { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPClusterConfigSpec) DeepEqual(other *CiliumBGPClusterConfigSpec) bool { + if other == nil { + return false + } + + if (in.NodeSelector == nil) != (other.NodeSelector == nil) { + return false + } else if in.NodeSelector != nil { + if !in.NodeSelector.DeepEqual(other.NodeSelector) { + return false + } + } + + if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) { + in, other := &in.BGPInstances, &other.BGPInstances + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPClusterConfigStatus) DeepEqual(other *CiliumBGPClusterConfigStatus) bool { + if other == nil { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPFamily) DeepEqual(other *CiliumBGPFamily) bool { + if other == nil { + return false + } + + if in.Afi != other.Afi { + return false + } + if in.Safi != other.Safi { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPFamilyWithAdverts) DeepEqual(other *CiliumBGPFamilyWithAdverts) bool { + if other == nil { + return false + } + + if in.CiliumBGPFamily != other.CiliumBGPFamily { + return false + } + + if (in.Advertisements == nil) != (other.Advertisements == nil) { + return false + } else if in.Advertisements != nil { + if !in.Advertisements.DeepEqual(other.Advertisements) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPInstance) DeepEqual(other *CiliumBGPInstance) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.LocalASN == nil) != (other.LocalASN == nil) { + return false + } else if in.LocalASN != nil { + if *in.LocalASN != *other.LocalASN { + return false + } + } + + if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) { + in, other := &in.Peers, &other.Peers + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *CiliumBGPNeighbor) DeepEqual(other *CiliumBGPNeighbor) bool { @@ -15,56 +379,583 @@ func (in *CiliumBGPNeighbor) DeepEqual(other *CiliumBGPNeighbor) bool { return false } - if in.PeerAddress != other.PeerAddress { + if in.PeerAddress != other.PeerAddress { + return false + } + if (in.PeerPort == nil) != (other.PeerPort == nil) { + return false + } else if in.PeerPort != nil { + if *in.PeerPort != *other.PeerPort { + return false + } + } + + if in.PeerASN != other.PeerASN { + return false + } + if (in.AuthSecretRef == nil) != (other.AuthSecretRef == nil) { + return false + } else if in.AuthSecretRef != nil { + if *in.AuthSecretRef != *other.AuthSecretRef { + return false + } + } + + if (in.EBGPMultihopTTL == nil) != (other.EBGPMultihopTTL == nil) { + return false + } else if in.EBGPMultihopTTL != nil { + if *in.EBGPMultihopTTL != *other.EBGPMultihopTTL { + return false + } + } + + if (in.ConnectRetryTimeSeconds == nil) != (other.ConnectRetryTimeSeconds == nil) { + return false + } else if in.ConnectRetryTimeSeconds != nil { + if *in.ConnectRetryTimeSeconds != *other.ConnectRetryTimeSeconds { + return false + } + } + + if (in.HoldTimeSeconds == nil) != (other.HoldTimeSeconds == nil) { + return false + } else if in.HoldTimeSeconds != nil { + if *in.HoldTimeSeconds != *other.HoldTimeSeconds { + return false + } + } + + if (in.KeepAliveTimeSeconds == nil) != (other.KeepAliveTimeSeconds == nil) { + return false + } else if in.KeepAliveTimeSeconds != nil { + if *in.KeepAliveTimeSeconds != *other.KeepAliveTimeSeconds { + return false + } + } + + if (in.GracefulRestart == nil) != (other.GracefulRestart == nil) { + return false + } else if in.GracefulRestart != nil { + if !in.GracefulRestart.DeepEqual(other.GracefulRestart) { + return false + } + } + + if ((in.Families != nil) && (other.Families != nil)) || ((in.Families == nil) != (other.Families == nil)) { + in, other := &in.Families, &other.Families + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if ((in.AdvertisedPathAttributes != nil) && (other.AdvertisedPathAttributes != nil)) || ((in.AdvertisedPathAttributes == nil) != (other.AdvertisedPathAttributes == nil)) { + in, other := &in.AdvertisedPathAttributes, &other.AdvertisedPathAttributes + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNeighborGracefulRestart) DeepEqual(other *CiliumBGPNeighborGracefulRestart) bool { + if other == nil { + return false + } + + if in.Enabled != other.Enabled { + return false + } + if (in.RestartTimeSeconds == nil) != (other.RestartTimeSeconds == nil) { + return false + } else if in.RestartTimeSeconds != nil { + if *in.RestartTimeSeconds != *other.RestartTimeSeconds { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeConfig) DeepEqual(other *CiliumBGPNodeConfig) bool { + if other == nil { + return false + } + + if !in.Spec.DeepEqual(&other.Spec) { + return false + } + + if !in.Status.DeepEqual(&other.Status) { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeConfigInstanceOverride) DeepEqual(other *CiliumBGPNodeConfigInstanceOverride) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.RouterID == nil) != (other.RouterID == nil) { + return false + } else if in.RouterID != nil { + if *in.RouterID != *other.RouterID { + return false + } + } + + if (in.LocalPort == nil) != (other.LocalPort == nil) { + return false + } else if in.LocalPort != nil { + if *in.LocalPort != *other.LocalPort { + return false + } + } + + if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) { + in, other := &in.Peers, &other.Peers + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeConfigOverride) DeepEqual(other *CiliumBGPNodeConfigOverride) bool { + if other == nil { + return false + } + + if !in.Spec.DeepEqual(&other.Spec) { + return false + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeConfigOverrideSpec) DeepEqual(other *CiliumBGPNodeConfigOverrideSpec) bool { + if other == nil { + return false + } + + if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) { + in, other := &in.BGPInstances, &other.BGPInstances + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeConfigPeerOverride) DeepEqual(other *CiliumBGPNodeConfigPeerOverride) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.LocalAddress == nil) != (other.LocalAddress == nil) { + return false + } else if in.LocalAddress != nil { + if *in.LocalAddress != *other.LocalAddress { + return false + } + } + + if (in.LocalPort == nil) != (other.LocalPort == nil) { + return false + } else if in.LocalPort != nil { + if *in.LocalPort != *other.LocalPort { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeInstance) DeepEqual(other *CiliumBGPNodeInstance) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.LocalASN == nil) != (other.LocalASN == nil) { + return false + } else if in.LocalASN != nil { + if *in.LocalASN != *other.LocalASN { + return false + } + } + + if (in.RouterID == nil) != (other.RouterID == nil) { + return false + } else if in.RouterID != nil { + if *in.RouterID != *other.RouterID { + return false + } + } + + if (in.LocalPort == nil) != (other.LocalPort == nil) { + return false + } else if in.LocalPort != nil { + if *in.LocalPort != *other.LocalPort { + return false + } + } + + if ((in.Peers != nil) && (other.Peers != nil)) || ((in.Peers == nil) != (other.Peers == nil)) { + in, other := &in.Peers, &other.Peers + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeInstanceStatus) DeepEqual(other *CiliumBGPNodeInstanceStatus) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.LocalASN == nil) != (other.LocalASN == nil) { + return false + } else if in.LocalASN != nil { + if *in.LocalASN != *other.LocalASN { + return false + } + } + + if ((in.PeerStatuses != nil) && (other.PeerStatuses != nil)) || ((in.PeerStatuses == nil) != (other.PeerStatuses == nil)) { + in, other := &in.PeerStatuses, &other.PeerStatuses + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodePeer) DeepEqual(other *CiliumBGPNodePeer) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if (in.PeerAddress == nil) != (other.PeerAddress == nil) { + return false + } else if in.PeerAddress != nil { + if *in.PeerAddress != *other.PeerAddress { + return false + } + } + + if (in.PeerASN == nil) != (other.PeerASN == nil) { + return false + } else if in.PeerASN != nil { + if *in.PeerASN != *other.PeerASN { + return false + } + } + + if (in.LocalAddress == nil) != (other.LocalAddress == nil) { + return false + } else if in.LocalAddress != nil { + if *in.LocalAddress != *other.LocalAddress { + return false + } + } + + if (in.PeerConfigRef == nil) != (other.PeerConfigRef == nil) { + return false + } else if in.PeerConfigRef != nil { + if !in.PeerConfigRef.DeepEqual(other.PeerConfigRef) { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodePeerStatus) DeepEqual(other *CiliumBGPNodePeerStatus) bool { + if other == nil { + return false + } + + if in.Name != other.Name { + return false + } + if in.PeerAddress != other.PeerAddress { + return false + } + if (in.PeerASN == nil) != (other.PeerASN == nil) { + return false + } else if in.PeerASN != nil { + if *in.PeerASN != *other.PeerASN { + return false + } + } + + if (in.PeeringState == nil) != (other.PeeringState == nil) { + return false + } else if in.PeeringState != nil { + if *in.PeeringState != *other.PeeringState { + return false + } + } + + if (in.Timers == nil) != (other.Timers == nil) { + return false + } else if in.Timers != nil { + if !in.Timers.DeepEqual(other.Timers) { + return false + } + } + + if (in.EstablishedTime == nil) != (other.EstablishedTime == nil) { + return false + } else if in.EstablishedTime != nil { + if *in.EstablishedTime != *other.EstablishedTime { + return false + } + } + + if ((in.RouteCount != nil) && (other.RouteCount != nil)) || ((in.RouteCount == nil) != (other.RouteCount == nil)) { + in, other := &in.RouteCount, &other.RouteCount + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeSpec) DeepEqual(other *CiliumBGPNodeSpec) bool { + if other == nil { + return false + } + + if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) { + in, other := &in.BGPInstances, &other.BGPInstances + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPNodeStatus) DeepEqual(other *CiliumBGPNodeStatus) bool { + if other == nil { + return false + } + + if ((in.BGPInstances != nil) && (other.BGPInstances != nil)) || ((in.BGPInstances == nil) != (other.BGPInstances == nil)) { + in, other := &in.BGPInstances, &other.BGPInstances + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPPathAttributes) DeepEqual(other *CiliumBGPPathAttributes) bool { + if other == nil { return false } - if (in.PeerPort == nil) != (other.PeerPort == nil) { + + if in.SelectorType != other.SelectorType { return false - } else if in.PeerPort != nil { - if *in.PeerPort != *other.PeerPort { + } + if (in.Selector == nil) != (other.Selector == nil) { + return false + } else if in.Selector != nil { + if !in.Selector.DeepEqual(other.Selector) { return false } } - if in.PeerASN != other.PeerASN { + if (in.Communities == nil) != (other.Communities == nil) { return false - } - if (in.EBGPMultihopTTL == nil) != (other.EBGPMultihopTTL == nil) { - return false - } else if in.EBGPMultihopTTL != nil { - if *in.EBGPMultihopTTL != *other.EBGPMultihopTTL { + } else if in.Communities != nil { + if !in.Communities.DeepEqual(other.Communities) { return false } } - if (in.ConnectRetryTimeSeconds == nil) != (other.ConnectRetryTimeSeconds == nil) { + if (in.LocalPreference == nil) != (other.LocalPreference == nil) { return false - } else if in.ConnectRetryTimeSeconds != nil { - if *in.ConnectRetryTimeSeconds != *other.ConnectRetryTimeSeconds { + } else if in.LocalPreference != nil { + if *in.LocalPreference != *other.LocalPreference { return false } } - if (in.HoldTimeSeconds == nil) != (other.HoldTimeSeconds == nil) { + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPPeer) DeepEqual(other *CiliumBGPPeer) bool { + if other == nil { return false - } else if in.HoldTimeSeconds != nil { - if *in.HoldTimeSeconds != *other.HoldTimeSeconds { + } + + if in.Name != other.Name { + return false + } + if (in.PeerAddress == nil) != (other.PeerAddress == nil) { + return false + } else if in.PeerAddress != nil { + if *in.PeerAddress != *other.PeerAddress { return false } } - if (in.KeepAliveTimeSeconds == nil) != (other.KeepAliveTimeSeconds == nil) { + if (in.PeerASN == nil) != (other.PeerASN == nil) { return false - } else if in.KeepAliveTimeSeconds != nil { - if *in.KeepAliveTimeSeconds != *other.KeepAliveTimeSeconds { + } else if in.PeerASN != nil { + if *in.PeerASN != *other.PeerASN { return false } } - if (in.GracefulRestart == nil) != (other.GracefulRestart == nil) { + if (in.PeerConfigRef == nil) != (other.PeerConfigRef == nil) { return false - } else if in.GracefulRestart != nil { - if !in.GracefulRestart.DeepEqual(other.GracefulRestart) { + } else if in.PeerConfigRef != nil { + if !in.PeerConfigRef.DeepEqual(other.PeerConfigRef) { return false } } @@ -74,20 +965,94 @@ func (in *CiliumBGPNeighbor) DeepEqual(other *CiliumBGPNeighbor) bool { // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. -func (in *CiliumBGPNeighborGracefulRestart) DeepEqual(other *CiliumBGPNeighborGracefulRestart) bool { +func (in *CiliumBGPPeerConfig) DeepEqual(other *CiliumBGPPeerConfig) bool { if other == nil { return false } - if in.Enabled != other.Enabled { + if !in.Spec.DeepEqual(&other.Spec) { return false } - if (in.RestartTimeSeconds == nil) != (other.RestartTimeSeconds == nil) { + + if !in.Status.DeepEqual(&other.Status) { return false - } else if in.RestartTimeSeconds != nil { - if *in.RestartTimeSeconds != *other.RestartTimeSeconds { + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPPeerConfigSpec) DeepEqual(other *CiliumBGPPeerConfigSpec) bool { + if other == nil { + return false + } + + if (in.Transport == nil) != (other.Transport == nil) { + return false + } else if in.Transport != nil { + if !in.Transport.DeepEqual(other.Transport) { + return false + } + } + + if (in.Timers == nil) != (other.Timers == nil) { + return false + } else if in.Timers != nil { + if !in.Timers.DeepEqual(other.Timers) { + return false + } + } + + if (in.AuthSecretRef == nil) != (other.AuthSecretRef == nil) { + return false + } else if in.AuthSecretRef != nil { + if *in.AuthSecretRef != *other.AuthSecretRef { + return false + } + } + + if (in.GracefulRestart == nil) != (other.GracefulRestart == nil) { + return false + } else if in.GracefulRestart != nil { + if !in.GracefulRestart.DeepEqual(other.GracefulRestart) { + return false + } + } + + if (in.EBGPMultihop == nil) != (other.EBGPMultihop == nil) { + return false + } else if in.EBGPMultihop != nil { + if *in.EBGPMultihop != *other.EBGPMultihop { + return false + } + } + + if ((in.Families != nil) && (other.Families != nil)) || ((in.Families == nil) != (other.Families == nil)) { + in, other := &in.Families, &other.Families + if other == nil { return false } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPPeerConfigStatus) DeepEqual(other *CiliumBGPPeerConfigStatus) bool { + if other == nil { + return false } return true @@ -142,6 +1107,92 @@ func (in *CiliumBGPPeeringPolicySpec) DeepEqual(other *CiliumBGPPeeringPolicySpe return true } +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPTimers) DeepEqual(other *CiliumBGPTimers) bool { + if other == nil { + return false + } + + if (in.ConnectRetryTimeSeconds == nil) != (other.ConnectRetryTimeSeconds == nil) { + return false + } else if in.ConnectRetryTimeSeconds != nil { + if *in.ConnectRetryTimeSeconds != *other.ConnectRetryTimeSeconds { + return false + } + } + + if (in.HoldTimeSeconds == nil) != (other.HoldTimeSeconds == nil) { + return false + } else if in.HoldTimeSeconds != nil { + if *in.HoldTimeSeconds != *other.HoldTimeSeconds { + return false + } + } + + if (in.KeepAliveTimeSeconds == nil) != (other.KeepAliveTimeSeconds == nil) { + return false + } else if in.KeepAliveTimeSeconds != nil { + if *in.KeepAliveTimeSeconds != *other.KeepAliveTimeSeconds { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPTimersState) DeepEqual(other *CiliumBGPTimersState) bool { + if other == nil { + return false + } + + if (in.AppliedHoldTimeSeconds == nil) != (other.AppliedHoldTimeSeconds == nil) { + return false + } else if in.AppliedHoldTimeSeconds != nil { + if *in.AppliedHoldTimeSeconds != *other.AppliedHoldTimeSeconds { + return false + } + } + + if (in.AppliedKeepaliveSeconds == nil) != (other.AppliedKeepaliveSeconds == nil) { + return false + } else if in.AppliedKeepaliveSeconds != nil { + if *in.AppliedKeepaliveSeconds != *other.AppliedKeepaliveSeconds { + return false + } + } + + return true +} + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *CiliumBGPTransport) DeepEqual(other *CiliumBGPTransport) bool { + if other == nil { + return false + } + + if (in.LocalPort == nil) != (other.LocalPort == nil) { + return false + } else if in.LocalPort != nil { + if *in.LocalPort != *other.LocalPort { + return false + } + } + + if (in.PeerPort == nil) != (other.PeerPort == nil) { + return false + } else if in.PeerPort != nil { + if *in.PeerPort != *other.PeerPort { + return false + } + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *CiliumBGPVirtualRouter) DeepEqual(other *CiliumBGPVirtualRouter) bool { @@ -160,6 +1211,14 @@ func (in *CiliumBGPVirtualRouter) DeepEqual(other *CiliumBGPVirtualRouter) bool } } + if (in.PodIPPoolSelector == nil) != (other.PodIPPoolSelector == nil) { + return false + } else if in.PodIPPoolSelector != nil { + if !in.PodIPPoolSelector.DeepEqual(other.PodIPPoolSelector) { + return false + } + } + if (in.ServiceSelector == nil) != (other.ServiceSelector == nil) { return false } else if in.ServiceSelector != nil { @@ -168,6 +1227,23 @@ func (in *CiliumBGPVirtualRouter) DeepEqual(other *CiliumBGPVirtualRouter) bool } } + if ((in.ServiceAdvertisements != nil) && (other.ServiceAdvertisements != nil)) || ((in.ServiceAdvertisements == nil) != (other.ServiceAdvertisements == nil)) { + in, other := &in.ServiceAdvertisements, &other.ServiceAdvertisements + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if inElement != (*other)[i] { + return false + } + } + } + } + if ((in.Neighbors != nil) && (other.Neighbors != nil)) || ((in.Neighbors == nil) != (other.Neighbors == nil)) { in, other := &in.Neighbors, &other.Neighbors if other == nil { @@ -324,7 +1400,7 @@ func (in *CiliumLoadBalancerIPPool) DeepEqual(other *CiliumLoadBalancerIPPool) b // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. -func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepEqual(other *CiliumLoadBalancerIPPoolCIDRBlock) bool { +func (in *CiliumLoadBalancerIPPoolIPBlock) DeepEqual(other *CiliumLoadBalancerIPPoolIPBlock) bool { if other == nil { return false } @@ -332,6 +1408,12 @@ func (in *CiliumLoadBalancerIPPoolCIDRBlock) DeepEqual(other *CiliumLoadBalancer if in.Cidr != other.Cidr { return false } + if in.Start != other.Start { + return false + } + if in.Stop != other.Stop { + return false + } return true } @@ -351,8 +1433,11 @@ func (in *CiliumLoadBalancerIPPoolSpec) DeepEqual(other *CiliumLoadBalancerIPPoo } } - if ((in.Cidrs != nil) && (other.Cidrs != nil)) || ((in.Cidrs == nil) != (other.Cidrs == nil)) { - in, other := &in.Cidrs, &other.Cidrs + if in.AllowFirstLastIPs != other.AllowFirstLastIPs { + return false + } + if ((in.Blocks != nil) && (other.Blocks != nil)) || ((in.Blocks == nil) != (other.Blocks == nil)) { + in, other := &in.Blocks, &other.Blocks if other == nil { return false } @@ -537,3 +1622,23 @@ func (in *IPv6PoolSpec) DeepEqual(other *IPv6PoolSpec) bool { return true } + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *PeerConfigReference) DeepEqual(other *PeerConfigReference) bool { + if other == nil { + return false + } + + if in.Group != other.Group { + return false + } + if in.Kind != other.Kind { + return false + } + if in.Name != other.Name { + return false + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/clientset.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/clientset.go index 720686010a..bd6b644e13 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/clientset.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/clientset.go @@ -6,8 +6,8 @@ package versioned import ( - "fmt" - "net/http" + fmt "fmt" + http "net/http" ciliumv2 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2" ciliumv2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1" diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/cilium.io_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/cilium.io_client.go index 39002ac0d7..c079fb85df 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/cilium.io_client.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/cilium.io_client.go @@ -6,10 +6,10 @@ package v2 import ( - "net/http" + http "net/http" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) @@ -25,6 +25,7 @@ type CiliumV2Interface interface { CiliumLocalRedirectPoliciesGetter CiliumNetworkPoliciesGetter CiliumNodesGetter + CiliumNodeConfigsGetter } // CiliumV2Client is used to interact with features provided by the cilium.io group. @@ -72,6 +73,10 @@ func (c *CiliumV2Client) CiliumNodes() CiliumNodeInterface { return newCiliumNodes(c) } +func (c *CiliumV2Client) CiliumNodeConfigs(namespace string) CiliumNodeConfigInterface { + return newCiliumNodeConfigs(c, namespace) +} + // NewForConfig creates a new CiliumV2Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). @@ -117,10 +122,10 @@ func New(c rest.Interface) *CiliumV2Client { } func setConfigDefaults(config *rest.Config) error { - gv := v2.SchemeGroupVersion + gv := ciliumiov2.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwideenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwideenvoyconfig.go index cf8eecec3b..4e5007ef7e 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwideenvoyconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwideenvoyconfig.go @@ -6,15 +6,14 @@ package v2 import ( - "context" - "time" + context "context" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumClusterwideEnvoyConfigsGetter has a method to return a CiliumClusterwideEnvoyConfigInterface. @@ -25,131 +24,34 @@ type CiliumClusterwideEnvoyConfigsGetter interface { // CiliumClusterwideEnvoyConfigInterface has methods to work with CiliumClusterwideEnvoyConfig resources. type CiliumClusterwideEnvoyConfigInterface interface { - Create(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.CreateOptions) (*v2.CiliumClusterwideEnvoyConfig, error) - Update(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.UpdateOptions) (*v2.CiliumClusterwideEnvoyConfig, error) + Create(ctx context.Context, ciliumClusterwideEnvoyConfig *ciliumiov2.CiliumClusterwideEnvoyConfig, opts v1.CreateOptions) (*ciliumiov2.CiliumClusterwideEnvoyConfig, error) + Update(ctx context.Context, ciliumClusterwideEnvoyConfig *ciliumiov2.CiliumClusterwideEnvoyConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumClusterwideEnvoyConfig, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumClusterwideEnvoyConfig, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumClusterwideEnvoyConfigList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumClusterwideEnvoyConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumClusterwideEnvoyConfigList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideEnvoyConfig, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumClusterwideEnvoyConfig, err error) CiliumClusterwideEnvoyConfigExpansion } // ciliumClusterwideEnvoyConfigs implements CiliumClusterwideEnvoyConfigInterface type ciliumClusterwideEnvoyConfigs struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2.CiliumClusterwideEnvoyConfig, *ciliumiov2.CiliumClusterwideEnvoyConfigList] } // newCiliumClusterwideEnvoyConfigs returns a CiliumClusterwideEnvoyConfigs func newCiliumClusterwideEnvoyConfigs(c *CiliumV2Client) *ciliumClusterwideEnvoyConfigs { return &ciliumClusterwideEnvoyConfigs{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2.CiliumClusterwideEnvoyConfig, *ciliumiov2.CiliumClusterwideEnvoyConfigList]( + "ciliumclusterwideenvoyconfigs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2.CiliumClusterwideEnvoyConfig { return &ciliumiov2.CiliumClusterwideEnvoyConfig{} }, + func() *ciliumiov2.CiliumClusterwideEnvoyConfigList { + return &ciliumiov2.CiliumClusterwideEnvoyConfigList{} + }, + ), } } - -// Get takes name of the ciliumClusterwideEnvoyConfig, and returns the corresponding ciliumClusterwideEnvoyConfig object, and an error if there is any. -func (c *ciliumClusterwideEnvoyConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) { - result = &v2.CiliumClusterwideEnvoyConfig{} - err = c.client.Get(). - Resource("ciliumclusterwideenvoyconfigs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumClusterwideEnvoyConfigs that match those selectors. -func (c *ciliumClusterwideEnvoyConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumClusterwideEnvoyConfigList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.CiliumClusterwideEnvoyConfigList{} - err = c.client.Get(). - Resource("ciliumclusterwideenvoyconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumClusterwideEnvoyConfigs. -func (c *ciliumClusterwideEnvoyConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumclusterwideenvoyconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumClusterwideEnvoyConfig and creates it. Returns the server's representation of the ciliumClusterwideEnvoyConfig, and an error, if there is any. -func (c *ciliumClusterwideEnvoyConfigs) Create(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.CreateOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) { - result = &v2.CiliumClusterwideEnvoyConfig{} - err = c.client.Post(). - Resource("ciliumclusterwideenvoyconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumClusterwideEnvoyConfig). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumClusterwideEnvoyConfig and updates it. Returns the server's representation of the ciliumClusterwideEnvoyConfig, and an error, if there is any. -func (c *ciliumClusterwideEnvoyConfigs) Update(ctx context.Context, ciliumClusterwideEnvoyConfig *v2.CiliumClusterwideEnvoyConfig, opts v1.UpdateOptions) (result *v2.CiliumClusterwideEnvoyConfig, err error) { - result = &v2.CiliumClusterwideEnvoyConfig{} - err = c.client.Put(). - Resource("ciliumclusterwideenvoyconfigs"). - Name(ciliumClusterwideEnvoyConfig.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumClusterwideEnvoyConfig). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumClusterwideEnvoyConfig and deletes it. Returns an error if one occurs. -func (c *ciliumClusterwideEnvoyConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumclusterwideenvoyconfigs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumClusterwideEnvoyConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumclusterwideenvoyconfigs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumClusterwideEnvoyConfig. -func (c *ciliumClusterwideEnvoyConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideEnvoyConfig, err error) { - result = &v2.CiliumClusterwideEnvoyConfig{} - err = c.client.Patch(pt). - Resource("ciliumclusterwideenvoyconfigs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwidenetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwidenetworkpolicy.go index 56cde26263..98cd24896f 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwidenetworkpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumclusterwidenetworkpolicy.go @@ -6,15 +6,14 @@ package v2 import ( - "context" - "time" + context "context" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumClusterwideNetworkPoliciesGetter has a method to return a CiliumClusterwideNetworkPolicyInterface. @@ -25,147 +24,36 @@ type CiliumClusterwideNetworkPoliciesGetter interface { // CiliumClusterwideNetworkPolicyInterface has methods to work with CiliumClusterwideNetworkPolicy resources. type CiliumClusterwideNetworkPolicyInterface interface { - Create(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.CreateOptions) (*v2.CiliumClusterwideNetworkPolicy, error) - Update(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumClusterwideNetworkPolicy, error) - UpdateStatus(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumClusterwideNetworkPolicy, error) + Create(ctx context.Context, ciliumClusterwideNetworkPolicy *ciliumiov2.CiliumClusterwideNetworkPolicy, opts v1.CreateOptions) (*ciliumiov2.CiliumClusterwideNetworkPolicy, error) + Update(ctx context.Context, ciliumClusterwideNetworkPolicy *ciliumiov2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumClusterwideNetworkPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumClusterwideNetworkPolicy *ciliumiov2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumClusterwideNetworkPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumClusterwideNetworkPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumClusterwideNetworkPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumClusterwideNetworkPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumClusterwideNetworkPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideNetworkPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumClusterwideNetworkPolicy, err error) CiliumClusterwideNetworkPolicyExpansion } // ciliumClusterwideNetworkPolicies implements CiliumClusterwideNetworkPolicyInterface type ciliumClusterwideNetworkPolicies struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2.CiliumClusterwideNetworkPolicy, *ciliumiov2.CiliumClusterwideNetworkPolicyList] } // newCiliumClusterwideNetworkPolicies returns a CiliumClusterwideNetworkPolicies func newCiliumClusterwideNetworkPolicies(c *CiliumV2Client) *ciliumClusterwideNetworkPolicies { return &ciliumClusterwideNetworkPolicies{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2.CiliumClusterwideNetworkPolicy, *ciliumiov2.CiliumClusterwideNetworkPolicyList]( + "ciliumclusterwidenetworkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2.CiliumClusterwideNetworkPolicy { return &ciliumiov2.CiliumClusterwideNetworkPolicy{} }, + func() *ciliumiov2.CiliumClusterwideNetworkPolicyList { + return &ciliumiov2.CiliumClusterwideNetworkPolicyList{} + }, + ), } } - -// Get takes name of the ciliumClusterwideNetworkPolicy, and returns the corresponding ciliumClusterwideNetworkPolicy object, and an error if there is any. -func (c *ciliumClusterwideNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) { - result = &v2.CiliumClusterwideNetworkPolicy{} - err = c.client.Get(). - Resource("ciliumclusterwidenetworkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumClusterwideNetworkPolicies that match those selectors. -func (c *ciliumClusterwideNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumClusterwideNetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.CiliumClusterwideNetworkPolicyList{} - err = c.client.Get(). - Resource("ciliumclusterwidenetworkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumClusterwideNetworkPolicies. -func (c *ciliumClusterwideNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumclusterwidenetworkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumClusterwideNetworkPolicy and creates it. Returns the server's representation of the ciliumClusterwideNetworkPolicy, and an error, if there is any. -func (c *ciliumClusterwideNetworkPolicies) Create(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.CreateOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) { - result = &v2.CiliumClusterwideNetworkPolicy{} - err = c.client.Post(). - Resource("ciliumclusterwidenetworkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumClusterwideNetworkPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumClusterwideNetworkPolicy and updates it. Returns the server's representation of the ciliumClusterwideNetworkPolicy, and an error, if there is any. -func (c *ciliumClusterwideNetworkPolicies) Update(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) { - result = &v2.CiliumClusterwideNetworkPolicy{} - err = c.client.Put(). - Resource("ciliumclusterwidenetworkpolicies"). - Name(ciliumClusterwideNetworkPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumClusterwideNetworkPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ciliumClusterwideNetworkPolicies) UpdateStatus(ctx context.Context, ciliumClusterwideNetworkPolicy *v2.CiliumClusterwideNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumClusterwideNetworkPolicy, err error) { - result = &v2.CiliumClusterwideNetworkPolicy{} - err = c.client.Put(). - Resource("ciliumclusterwidenetworkpolicies"). - Name(ciliumClusterwideNetworkPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumClusterwideNetworkPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumClusterwideNetworkPolicy and deletes it. Returns an error if one occurs. -func (c *ciliumClusterwideNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumclusterwidenetworkpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumClusterwideNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumclusterwidenetworkpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumClusterwideNetworkPolicy. -func (c *ciliumClusterwideNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumClusterwideNetworkPolicy, err error) { - result = &v2.CiliumClusterwideNetworkPolicy{} - err = c.client.Patch(pt). - Resource("ciliumclusterwidenetworkpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumegressgatewaypolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumegressgatewaypolicy.go index 625a89b74b..c00e9d9661 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumegressgatewaypolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumegressgatewaypolicy.go @@ -6,15 +6,14 @@ package v2 import ( - "context" - "time" + context "context" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumEgressGatewayPoliciesGetter has a method to return a CiliumEgressGatewayPolicyInterface. @@ -25,131 +24,32 @@ type CiliumEgressGatewayPoliciesGetter interface { // CiliumEgressGatewayPolicyInterface has methods to work with CiliumEgressGatewayPolicy resources. type CiliumEgressGatewayPolicyInterface interface { - Create(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.CreateOptions) (*v2.CiliumEgressGatewayPolicy, error) - Update(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.UpdateOptions) (*v2.CiliumEgressGatewayPolicy, error) + Create(ctx context.Context, ciliumEgressGatewayPolicy *ciliumiov2.CiliumEgressGatewayPolicy, opts v1.CreateOptions) (*ciliumiov2.CiliumEgressGatewayPolicy, error) + Update(ctx context.Context, ciliumEgressGatewayPolicy *ciliumiov2.CiliumEgressGatewayPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumEgressGatewayPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumEgressGatewayPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumEgressGatewayPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumEgressGatewayPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumEgressGatewayPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEgressGatewayPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumEgressGatewayPolicy, err error) CiliumEgressGatewayPolicyExpansion } // ciliumEgressGatewayPolicies implements CiliumEgressGatewayPolicyInterface type ciliumEgressGatewayPolicies struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2.CiliumEgressGatewayPolicy, *ciliumiov2.CiliumEgressGatewayPolicyList] } // newCiliumEgressGatewayPolicies returns a CiliumEgressGatewayPolicies func newCiliumEgressGatewayPolicies(c *CiliumV2Client) *ciliumEgressGatewayPolicies { return &ciliumEgressGatewayPolicies{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2.CiliumEgressGatewayPolicy, *ciliumiov2.CiliumEgressGatewayPolicyList]( + "ciliumegressgatewaypolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2.CiliumEgressGatewayPolicy { return &ciliumiov2.CiliumEgressGatewayPolicy{} }, + func() *ciliumiov2.CiliumEgressGatewayPolicyList { return &ciliumiov2.CiliumEgressGatewayPolicyList{} }, + ), } } - -// Get takes name of the ciliumEgressGatewayPolicy, and returns the corresponding ciliumEgressGatewayPolicy object, and an error if there is any. -func (c *ciliumEgressGatewayPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEgressGatewayPolicy, err error) { - result = &v2.CiliumEgressGatewayPolicy{} - err = c.client.Get(). - Resource("ciliumegressgatewaypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumEgressGatewayPolicies that match those selectors. -func (c *ciliumEgressGatewayPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEgressGatewayPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.CiliumEgressGatewayPolicyList{} - err = c.client.Get(). - Resource("ciliumegressgatewaypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumEgressGatewayPolicies. -func (c *ciliumEgressGatewayPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumegressgatewaypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumEgressGatewayPolicy and creates it. Returns the server's representation of the ciliumEgressGatewayPolicy, and an error, if there is any. -func (c *ciliumEgressGatewayPolicies) Create(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.CreateOptions) (result *v2.CiliumEgressGatewayPolicy, err error) { - result = &v2.CiliumEgressGatewayPolicy{} - err = c.client.Post(). - Resource("ciliumegressgatewaypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumEgressGatewayPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumEgressGatewayPolicy and updates it. Returns the server's representation of the ciliumEgressGatewayPolicy, and an error, if there is any. -func (c *ciliumEgressGatewayPolicies) Update(ctx context.Context, ciliumEgressGatewayPolicy *v2.CiliumEgressGatewayPolicy, opts v1.UpdateOptions) (result *v2.CiliumEgressGatewayPolicy, err error) { - result = &v2.CiliumEgressGatewayPolicy{} - err = c.client.Put(). - Resource("ciliumegressgatewaypolicies"). - Name(ciliumEgressGatewayPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumEgressGatewayPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumEgressGatewayPolicy and deletes it. Returns an error if one occurs. -func (c *ciliumEgressGatewayPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumegressgatewaypolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumEgressGatewayPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumegressgatewaypolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumEgressGatewayPolicy. -func (c *ciliumEgressGatewayPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEgressGatewayPolicy, err error) { - result = &v2.CiliumEgressGatewayPolicy{} - err = c.client.Patch(pt). - Resource("ciliumegressgatewaypolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumendpoint.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumendpoint.go index 3cd66d46ae..2765364930 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumendpoint.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumendpoint.go @@ -6,15 +6,14 @@ package v2 import ( - "context" - "time" + context "context" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumEndpointsGetter has a method to return a CiliumEndpointInterface. @@ -25,158 +24,34 @@ type CiliumEndpointsGetter interface { // CiliumEndpointInterface has methods to work with CiliumEndpoint resources. type CiliumEndpointInterface interface { - Create(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.CreateOptions) (*v2.CiliumEndpoint, error) - Update(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (*v2.CiliumEndpoint, error) - UpdateStatus(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (*v2.CiliumEndpoint, error) + Create(ctx context.Context, ciliumEndpoint *ciliumiov2.CiliumEndpoint, opts v1.CreateOptions) (*ciliumiov2.CiliumEndpoint, error) + Update(ctx context.Context, ciliumEndpoint *ciliumiov2.CiliumEndpoint, opts v1.UpdateOptions) (*ciliumiov2.CiliumEndpoint, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumEndpoint *ciliumiov2.CiliumEndpoint, opts v1.UpdateOptions) (*ciliumiov2.CiliumEndpoint, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumEndpoint, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumEndpointList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumEndpoint, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumEndpointList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEndpoint, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumEndpoint, err error) CiliumEndpointExpansion } // ciliumEndpoints implements CiliumEndpointInterface type ciliumEndpoints struct { - client rest.Interface - ns string + *gentype.ClientWithList[*ciliumiov2.CiliumEndpoint, *ciliumiov2.CiliumEndpointList] } // newCiliumEndpoints returns a CiliumEndpoints func newCiliumEndpoints(c *CiliumV2Client, namespace string) *ciliumEndpoints { return &ciliumEndpoints{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*ciliumiov2.CiliumEndpoint, *ciliumiov2.CiliumEndpointList]( + "ciliumendpoints", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *ciliumiov2.CiliumEndpoint { return &ciliumiov2.CiliumEndpoint{} }, + func() *ciliumiov2.CiliumEndpointList { return &ciliumiov2.CiliumEndpointList{} }, + ), } } - -// Get takes name of the ciliumEndpoint, and returns the corresponding ciliumEndpoint object, and an error if there is any. -func (c *ciliumEndpoints) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEndpoint, err error) { - result = &v2.CiliumEndpoint{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ciliumendpoints"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumEndpoints that match those selectors. -func (c *ciliumEndpoints) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEndpointList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.CiliumEndpointList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ciliumendpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumEndpoints. -func (c *ciliumEndpoints) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ciliumendpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumEndpoint and creates it. Returns the server's representation of the ciliumEndpoint, and an error, if there is any. -func (c *ciliumEndpoints) Create(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.CreateOptions) (result *v2.CiliumEndpoint, err error) { - result = &v2.CiliumEndpoint{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ciliumendpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumEndpoint). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumEndpoint and updates it. Returns the server's representation of the ciliumEndpoint, and an error, if there is any. -func (c *ciliumEndpoints) Update(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (result *v2.CiliumEndpoint, err error) { - result = &v2.CiliumEndpoint{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ciliumendpoints"). - Name(ciliumEndpoint.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumEndpoint). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ciliumEndpoints) UpdateStatus(ctx context.Context, ciliumEndpoint *v2.CiliumEndpoint, opts v1.UpdateOptions) (result *v2.CiliumEndpoint, err error) { - result = &v2.CiliumEndpoint{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ciliumendpoints"). - Name(ciliumEndpoint.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumEndpoint). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumEndpoint and deletes it. Returns an error if one occurs. -func (c *ciliumEndpoints) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ciliumendpoints"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumEndpoints) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ciliumendpoints"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumEndpoint. -func (c *ciliumEndpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEndpoint, err error) { - result = &v2.CiliumEndpoint{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ciliumendpoints"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumenvoyconfig.go index 9f5acd456d..5cbdb63aea 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumenvoyconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumenvoyconfig.go @@ -6,15 +6,14 @@ package v2 import ( - "context" - "time" + context "context" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumEnvoyConfigsGetter has a method to return a CiliumEnvoyConfigInterface. @@ -25,141 +24,32 @@ type CiliumEnvoyConfigsGetter interface { // CiliumEnvoyConfigInterface has methods to work with CiliumEnvoyConfig resources. type CiliumEnvoyConfigInterface interface { - Create(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.CreateOptions) (*v2.CiliumEnvoyConfig, error) - Update(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.UpdateOptions) (*v2.CiliumEnvoyConfig, error) + Create(ctx context.Context, ciliumEnvoyConfig *ciliumiov2.CiliumEnvoyConfig, opts v1.CreateOptions) (*ciliumiov2.CiliumEnvoyConfig, error) + Update(ctx context.Context, ciliumEnvoyConfig *ciliumiov2.CiliumEnvoyConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumEnvoyConfig, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumEnvoyConfig, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumEnvoyConfigList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumEnvoyConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumEnvoyConfigList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEnvoyConfig, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumEnvoyConfig, err error) CiliumEnvoyConfigExpansion } // ciliumEnvoyConfigs implements CiliumEnvoyConfigInterface type ciliumEnvoyConfigs struct { - client rest.Interface - ns string + *gentype.ClientWithList[*ciliumiov2.CiliumEnvoyConfig, *ciliumiov2.CiliumEnvoyConfigList] } // newCiliumEnvoyConfigs returns a CiliumEnvoyConfigs func newCiliumEnvoyConfigs(c *CiliumV2Client, namespace string) *ciliumEnvoyConfigs { return &ciliumEnvoyConfigs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*ciliumiov2.CiliumEnvoyConfig, *ciliumiov2.CiliumEnvoyConfigList]( + "ciliumenvoyconfigs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *ciliumiov2.CiliumEnvoyConfig { return &ciliumiov2.CiliumEnvoyConfig{} }, + func() *ciliumiov2.CiliumEnvoyConfigList { return &ciliumiov2.CiliumEnvoyConfigList{} }, + ), } } - -// Get takes name of the ciliumEnvoyConfig, and returns the corresponding ciliumEnvoyConfig object, and an error if there is any. -func (c *ciliumEnvoyConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumEnvoyConfig, err error) { - result = &v2.CiliumEnvoyConfig{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ciliumenvoyconfigs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumEnvoyConfigs that match those selectors. -func (c *ciliumEnvoyConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumEnvoyConfigList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.CiliumEnvoyConfigList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ciliumenvoyconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumEnvoyConfigs. -func (c *ciliumEnvoyConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ciliumenvoyconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumEnvoyConfig and creates it. Returns the server's representation of the ciliumEnvoyConfig, and an error, if there is any. -func (c *ciliumEnvoyConfigs) Create(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.CreateOptions) (result *v2.CiliumEnvoyConfig, err error) { - result = &v2.CiliumEnvoyConfig{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ciliumenvoyconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumEnvoyConfig). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumEnvoyConfig and updates it. Returns the server's representation of the ciliumEnvoyConfig, and an error, if there is any. -func (c *ciliumEnvoyConfigs) Update(ctx context.Context, ciliumEnvoyConfig *v2.CiliumEnvoyConfig, opts v1.UpdateOptions) (result *v2.CiliumEnvoyConfig, err error) { - result = &v2.CiliumEnvoyConfig{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ciliumenvoyconfigs"). - Name(ciliumEnvoyConfig.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumEnvoyConfig). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumEnvoyConfig and deletes it. Returns an error if one occurs. -func (c *ciliumEnvoyConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ciliumenvoyconfigs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumEnvoyConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ciliumenvoyconfigs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumEnvoyConfig. -func (c *ciliumEnvoyConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumEnvoyConfig, err error) { - result = &v2.CiliumEnvoyConfig{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ciliumenvoyconfigs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumexternalworkload.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumexternalworkload.go index 261cbfeed1..57ef50d699 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumexternalworkload.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumexternalworkload.go @@ -6,15 +6,14 @@ package v2 import ( - "context" - "time" + context "context" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumExternalWorkloadsGetter has a method to return a CiliumExternalWorkloadInterface. @@ -25,147 +24,34 @@ type CiliumExternalWorkloadsGetter interface { // CiliumExternalWorkloadInterface has methods to work with CiliumExternalWorkload resources. type CiliumExternalWorkloadInterface interface { - Create(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.CreateOptions) (*v2.CiliumExternalWorkload, error) - Update(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (*v2.CiliumExternalWorkload, error) - UpdateStatus(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (*v2.CiliumExternalWorkload, error) + Create(ctx context.Context, ciliumExternalWorkload *ciliumiov2.CiliumExternalWorkload, opts v1.CreateOptions) (*ciliumiov2.CiliumExternalWorkload, error) + Update(ctx context.Context, ciliumExternalWorkload *ciliumiov2.CiliumExternalWorkload, opts v1.UpdateOptions) (*ciliumiov2.CiliumExternalWorkload, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumExternalWorkload *ciliumiov2.CiliumExternalWorkload, opts v1.UpdateOptions) (*ciliumiov2.CiliumExternalWorkload, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumExternalWorkload, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumExternalWorkloadList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumExternalWorkload, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumExternalWorkloadList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumExternalWorkload, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumExternalWorkload, err error) CiliumExternalWorkloadExpansion } // ciliumExternalWorkloads implements CiliumExternalWorkloadInterface type ciliumExternalWorkloads struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2.CiliumExternalWorkload, *ciliumiov2.CiliumExternalWorkloadList] } // newCiliumExternalWorkloads returns a CiliumExternalWorkloads func newCiliumExternalWorkloads(c *CiliumV2Client) *ciliumExternalWorkloads { return &ciliumExternalWorkloads{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2.CiliumExternalWorkload, *ciliumiov2.CiliumExternalWorkloadList]( + "ciliumexternalworkloads", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2.CiliumExternalWorkload { return &ciliumiov2.CiliumExternalWorkload{} }, + func() *ciliumiov2.CiliumExternalWorkloadList { return &ciliumiov2.CiliumExternalWorkloadList{} }, + ), } } - -// Get takes name of the ciliumExternalWorkload, and returns the corresponding ciliumExternalWorkload object, and an error if there is any. -func (c *ciliumExternalWorkloads) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumExternalWorkload, err error) { - result = &v2.CiliumExternalWorkload{} - err = c.client.Get(). - Resource("ciliumexternalworkloads"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumExternalWorkloads that match those selectors. -func (c *ciliumExternalWorkloads) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumExternalWorkloadList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.CiliumExternalWorkloadList{} - err = c.client.Get(). - Resource("ciliumexternalworkloads"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumExternalWorkloads. -func (c *ciliumExternalWorkloads) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumexternalworkloads"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumExternalWorkload and creates it. Returns the server's representation of the ciliumExternalWorkload, and an error, if there is any. -func (c *ciliumExternalWorkloads) Create(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.CreateOptions) (result *v2.CiliumExternalWorkload, err error) { - result = &v2.CiliumExternalWorkload{} - err = c.client.Post(). - Resource("ciliumexternalworkloads"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumExternalWorkload). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumExternalWorkload and updates it. Returns the server's representation of the ciliumExternalWorkload, and an error, if there is any. -func (c *ciliumExternalWorkloads) Update(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (result *v2.CiliumExternalWorkload, err error) { - result = &v2.CiliumExternalWorkload{} - err = c.client.Put(). - Resource("ciliumexternalworkloads"). - Name(ciliumExternalWorkload.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumExternalWorkload). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ciliumExternalWorkloads) UpdateStatus(ctx context.Context, ciliumExternalWorkload *v2.CiliumExternalWorkload, opts v1.UpdateOptions) (result *v2.CiliumExternalWorkload, err error) { - result = &v2.CiliumExternalWorkload{} - err = c.client.Put(). - Resource("ciliumexternalworkloads"). - Name(ciliumExternalWorkload.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumExternalWorkload). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumExternalWorkload and deletes it. Returns an error if one occurs. -func (c *ciliumExternalWorkloads) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumexternalworkloads"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumExternalWorkloads) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumexternalworkloads"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumExternalWorkload. -func (c *ciliumExternalWorkloads) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumExternalWorkload, err error) { - result = &v2.CiliumExternalWorkload{} - err = c.client.Patch(pt). - Resource("ciliumexternalworkloads"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumidentity.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumidentity.go index c23f017263..6e4def6e83 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumidentity.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumidentity.go @@ -6,15 +6,14 @@ package v2 import ( - "context" - "time" + context "context" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumIdentitiesGetter has a method to return a CiliumIdentityInterface. @@ -25,131 +24,32 @@ type CiliumIdentitiesGetter interface { // CiliumIdentityInterface has methods to work with CiliumIdentity resources. type CiliumIdentityInterface interface { - Create(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.CreateOptions) (*v2.CiliumIdentity, error) - Update(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.UpdateOptions) (*v2.CiliumIdentity, error) + Create(ctx context.Context, ciliumIdentity *ciliumiov2.CiliumIdentity, opts v1.CreateOptions) (*ciliumiov2.CiliumIdentity, error) + Update(ctx context.Context, ciliumIdentity *ciliumiov2.CiliumIdentity, opts v1.UpdateOptions) (*ciliumiov2.CiliumIdentity, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumIdentity, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumIdentityList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumIdentity, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumIdentityList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumIdentity, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumIdentity, err error) CiliumIdentityExpansion } // ciliumIdentities implements CiliumIdentityInterface type ciliumIdentities struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2.CiliumIdentity, *ciliumiov2.CiliumIdentityList] } // newCiliumIdentities returns a CiliumIdentities func newCiliumIdentities(c *CiliumV2Client) *ciliumIdentities { return &ciliumIdentities{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2.CiliumIdentity, *ciliumiov2.CiliumIdentityList]( + "ciliumidentities", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2.CiliumIdentity { return &ciliumiov2.CiliumIdentity{} }, + func() *ciliumiov2.CiliumIdentityList { return &ciliumiov2.CiliumIdentityList{} }, + ), } } - -// Get takes name of the ciliumIdentity, and returns the corresponding ciliumIdentity object, and an error if there is any. -func (c *ciliumIdentities) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumIdentity, err error) { - result = &v2.CiliumIdentity{} - err = c.client.Get(). - Resource("ciliumidentities"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumIdentities that match those selectors. -func (c *ciliumIdentities) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumIdentityList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.CiliumIdentityList{} - err = c.client.Get(). - Resource("ciliumidentities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumIdentities. -func (c *ciliumIdentities) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumidentities"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumIdentity and creates it. Returns the server's representation of the ciliumIdentity, and an error, if there is any. -func (c *ciliumIdentities) Create(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.CreateOptions) (result *v2.CiliumIdentity, err error) { - result = &v2.CiliumIdentity{} - err = c.client.Post(). - Resource("ciliumidentities"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumIdentity). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumIdentity and updates it. Returns the server's representation of the ciliumIdentity, and an error, if there is any. -func (c *ciliumIdentities) Update(ctx context.Context, ciliumIdentity *v2.CiliumIdentity, opts v1.UpdateOptions) (result *v2.CiliumIdentity, err error) { - result = &v2.CiliumIdentity{} - err = c.client.Put(). - Resource("ciliumidentities"). - Name(ciliumIdentity.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumIdentity). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumIdentity and deletes it. Returns an error if one occurs. -func (c *ciliumIdentities) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumidentities"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumIdentities) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumidentities"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumIdentity. -func (c *ciliumIdentities) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumIdentity, err error) { - result = &v2.CiliumIdentity{} - err = c.client.Patch(pt). - Resource("ciliumidentities"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumlocalredirectpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumlocalredirectpolicy.go index 9c839eb942..14a744cf04 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumlocalredirectpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumlocalredirectpolicy.go @@ -6,15 +6,14 @@ package v2 import ( - "context" - "time" + context "context" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumLocalRedirectPoliciesGetter has a method to return a CiliumLocalRedirectPolicyInterface. @@ -25,158 +24,34 @@ type CiliumLocalRedirectPoliciesGetter interface { // CiliumLocalRedirectPolicyInterface has methods to work with CiliumLocalRedirectPolicy resources. type CiliumLocalRedirectPolicyInterface interface { - Create(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.CreateOptions) (*v2.CiliumLocalRedirectPolicy, error) - Update(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (*v2.CiliumLocalRedirectPolicy, error) - UpdateStatus(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (*v2.CiliumLocalRedirectPolicy, error) + Create(ctx context.Context, ciliumLocalRedirectPolicy *ciliumiov2.CiliumLocalRedirectPolicy, opts v1.CreateOptions) (*ciliumiov2.CiliumLocalRedirectPolicy, error) + Update(ctx context.Context, ciliumLocalRedirectPolicy *ciliumiov2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumLocalRedirectPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumLocalRedirectPolicy *ciliumiov2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumLocalRedirectPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumLocalRedirectPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumLocalRedirectPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumLocalRedirectPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumLocalRedirectPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumLocalRedirectPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumLocalRedirectPolicy, err error) CiliumLocalRedirectPolicyExpansion } // ciliumLocalRedirectPolicies implements CiliumLocalRedirectPolicyInterface type ciliumLocalRedirectPolicies struct { - client rest.Interface - ns string + *gentype.ClientWithList[*ciliumiov2.CiliumLocalRedirectPolicy, *ciliumiov2.CiliumLocalRedirectPolicyList] } // newCiliumLocalRedirectPolicies returns a CiliumLocalRedirectPolicies func newCiliumLocalRedirectPolicies(c *CiliumV2Client, namespace string) *ciliumLocalRedirectPolicies { return &ciliumLocalRedirectPolicies{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*ciliumiov2.CiliumLocalRedirectPolicy, *ciliumiov2.CiliumLocalRedirectPolicyList]( + "ciliumlocalredirectpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *ciliumiov2.CiliumLocalRedirectPolicy { return &ciliumiov2.CiliumLocalRedirectPolicy{} }, + func() *ciliumiov2.CiliumLocalRedirectPolicyList { return &ciliumiov2.CiliumLocalRedirectPolicyList{} }, + ), } } - -// Get takes name of the ciliumLocalRedirectPolicy, and returns the corresponding ciliumLocalRedirectPolicy object, and an error if there is any. -func (c *ciliumLocalRedirectPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumLocalRedirectPolicy, err error) { - result = &v2.CiliumLocalRedirectPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ciliumlocalredirectpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumLocalRedirectPolicies that match those selectors. -func (c *ciliumLocalRedirectPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumLocalRedirectPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.CiliumLocalRedirectPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ciliumlocalredirectpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumLocalRedirectPolicies. -func (c *ciliumLocalRedirectPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ciliumlocalredirectpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumLocalRedirectPolicy and creates it. Returns the server's representation of the ciliumLocalRedirectPolicy, and an error, if there is any. -func (c *ciliumLocalRedirectPolicies) Create(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.CreateOptions) (result *v2.CiliumLocalRedirectPolicy, err error) { - result = &v2.CiliumLocalRedirectPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ciliumlocalredirectpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumLocalRedirectPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumLocalRedirectPolicy and updates it. Returns the server's representation of the ciliumLocalRedirectPolicy, and an error, if there is any. -func (c *ciliumLocalRedirectPolicies) Update(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (result *v2.CiliumLocalRedirectPolicy, err error) { - result = &v2.CiliumLocalRedirectPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ciliumlocalredirectpolicies"). - Name(ciliumLocalRedirectPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumLocalRedirectPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ciliumLocalRedirectPolicies) UpdateStatus(ctx context.Context, ciliumLocalRedirectPolicy *v2.CiliumLocalRedirectPolicy, opts v1.UpdateOptions) (result *v2.CiliumLocalRedirectPolicy, err error) { - result = &v2.CiliumLocalRedirectPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ciliumlocalredirectpolicies"). - Name(ciliumLocalRedirectPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumLocalRedirectPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumLocalRedirectPolicy and deletes it. Returns an error if one occurs. -func (c *ciliumLocalRedirectPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ciliumlocalredirectpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumLocalRedirectPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ciliumlocalredirectpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumLocalRedirectPolicy. -func (c *ciliumLocalRedirectPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumLocalRedirectPolicy, err error) { - result = &v2.CiliumLocalRedirectPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ciliumlocalredirectpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnetworkpolicy.go index f5d5ae60a8..bed778cc00 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnetworkpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnetworkpolicy.go @@ -6,15 +6,14 @@ package v2 import ( - "context" - "time" + context "context" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumNetworkPoliciesGetter has a method to return a CiliumNetworkPolicyInterface. @@ -25,158 +24,34 @@ type CiliumNetworkPoliciesGetter interface { // CiliumNetworkPolicyInterface has methods to work with CiliumNetworkPolicy resources. type CiliumNetworkPolicyInterface interface { - Create(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.CreateOptions) (*v2.CiliumNetworkPolicy, error) - Update(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumNetworkPolicy, error) - UpdateStatus(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (*v2.CiliumNetworkPolicy, error) + Create(ctx context.Context, ciliumNetworkPolicy *ciliumiov2.CiliumNetworkPolicy, opts v1.CreateOptions) (*ciliumiov2.CiliumNetworkPolicy, error) + Update(ctx context.Context, ciliumNetworkPolicy *ciliumiov2.CiliumNetworkPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumNetworkPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumNetworkPolicy *ciliumiov2.CiliumNetworkPolicy, opts v1.UpdateOptions) (*ciliumiov2.CiliumNetworkPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumNetworkPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumNetworkPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumNetworkPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumNetworkPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNetworkPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumNetworkPolicy, err error) CiliumNetworkPolicyExpansion } // ciliumNetworkPolicies implements CiliumNetworkPolicyInterface type ciliumNetworkPolicies struct { - client rest.Interface - ns string + *gentype.ClientWithList[*ciliumiov2.CiliumNetworkPolicy, *ciliumiov2.CiliumNetworkPolicyList] } // newCiliumNetworkPolicies returns a CiliumNetworkPolicies func newCiliumNetworkPolicies(c *CiliumV2Client, namespace string) *ciliumNetworkPolicies { return &ciliumNetworkPolicies{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*ciliumiov2.CiliumNetworkPolicy, *ciliumiov2.CiliumNetworkPolicyList]( + "ciliumnetworkpolicies", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *ciliumiov2.CiliumNetworkPolicy { return &ciliumiov2.CiliumNetworkPolicy{} }, + func() *ciliumiov2.CiliumNetworkPolicyList { return &ciliumiov2.CiliumNetworkPolicyList{} }, + ), } } - -// Get takes name of the ciliumNetworkPolicy, and returns the corresponding ciliumNetworkPolicy object, and an error if there is any. -func (c *ciliumNetworkPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumNetworkPolicy, err error) { - result = &v2.CiliumNetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ciliumnetworkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumNetworkPolicies that match those selectors. -func (c *ciliumNetworkPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumNetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.CiliumNetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ciliumnetworkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumNetworkPolicies. -func (c *ciliumNetworkPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ciliumnetworkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumNetworkPolicy and creates it. Returns the server's representation of the ciliumNetworkPolicy, and an error, if there is any. -func (c *ciliumNetworkPolicies) Create(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.CreateOptions) (result *v2.CiliumNetworkPolicy, err error) { - result = &v2.CiliumNetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ciliumnetworkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumNetworkPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumNetworkPolicy and updates it. Returns the server's representation of the ciliumNetworkPolicy, and an error, if there is any. -func (c *ciliumNetworkPolicies) Update(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumNetworkPolicy, err error) { - result = &v2.CiliumNetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ciliumnetworkpolicies"). - Name(ciliumNetworkPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumNetworkPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ciliumNetworkPolicies) UpdateStatus(ctx context.Context, ciliumNetworkPolicy *v2.CiliumNetworkPolicy, opts v1.UpdateOptions) (result *v2.CiliumNetworkPolicy, err error) { - result = &v2.CiliumNetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ciliumnetworkpolicies"). - Name(ciliumNetworkPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumNetworkPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumNetworkPolicy and deletes it. Returns an error if one occurs. -func (c *ciliumNetworkPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ciliumnetworkpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumNetworkPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ciliumnetworkpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumNetworkPolicy. -func (c *ciliumNetworkPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNetworkPolicy, err error) { - result = &v2.CiliumNetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ciliumnetworkpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnode.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnode.go index 9cc2e55550..fafb887c29 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnode.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnode.go @@ -6,15 +6,14 @@ package v2 import ( - "context" - "time" + context "context" - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumNodesGetter has a method to return a CiliumNodeInterface. @@ -25,147 +24,34 @@ type CiliumNodesGetter interface { // CiliumNodeInterface has methods to work with CiliumNode resources. type CiliumNodeInterface interface { - Create(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.CreateOptions) (*v2.CiliumNode, error) - Update(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (*v2.CiliumNode, error) - UpdateStatus(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (*v2.CiliumNode, error) + Create(ctx context.Context, ciliumNode *ciliumiov2.CiliumNode, opts v1.CreateOptions) (*ciliumiov2.CiliumNode, error) + Update(ctx context.Context, ciliumNode *ciliumiov2.CiliumNode, opts v1.UpdateOptions) (*ciliumiov2.CiliumNode, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumNode *ciliumiov2.CiliumNode, opts v1.UpdateOptions) (*ciliumiov2.CiliumNode, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2.CiliumNode, error) - List(ctx context.Context, opts v1.ListOptions) (*v2.CiliumNodeList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumNode, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumNodeList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNode, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumNode, err error) CiliumNodeExpansion } // ciliumNodes implements CiliumNodeInterface type ciliumNodes struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2.CiliumNode, *ciliumiov2.CiliumNodeList] } // newCiliumNodes returns a CiliumNodes func newCiliumNodes(c *CiliumV2Client) *ciliumNodes { return &ciliumNodes{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2.CiliumNode, *ciliumiov2.CiliumNodeList]( + "ciliumnodes", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2.CiliumNode { return &ciliumiov2.CiliumNode{} }, + func() *ciliumiov2.CiliumNodeList { return &ciliumiov2.CiliumNodeList{} }, + ), } } - -// Get takes name of the ciliumNode, and returns the corresponding ciliumNode object, and an error if there is any. -func (c *ciliumNodes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2.CiliumNode, err error) { - result = &v2.CiliumNode{} - err = c.client.Get(). - Resource("ciliumnodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumNodes that match those selectors. -func (c *ciliumNodes) List(ctx context.Context, opts v1.ListOptions) (result *v2.CiliumNodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2.CiliumNodeList{} - err = c.client.Get(). - Resource("ciliumnodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumNodes. -func (c *ciliumNodes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumnodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumNode and creates it. Returns the server's representation of the ciliumNode, and an error, if there is any. -func (c *ciliumNodes) Create(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.CreateOptions) (result *v2.CiliumNode, err error) { - result = &v2.CiliumNode{} - err = c.client.Post(). - Resource("ciliumnodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumNode). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumNode and updates it. Returns the server's representation of the ciliumNode, and an error, if there is any. -func (c *ciliumNodes) Update(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (result *v2.CiliumNode, err error) { - result = &v2.CiliumNode{} - err = c.client.Put(). - Resource("ciliumnodes"). - Name(ciliumNode.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumNode). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ciliumNodes) UpdateStatus(ctx context.Context, ciliumNode *v2.CiliumNode, opts v1.UpdateOptions) (result *v2.CiliumNode, err error) { - result = &v2.CiliumNode{} - err = c.client.Put(). - Resource("ciliumnodes"). - Name(ciliumNode.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumNode). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumNode and deletes it. Returns an error if one occurs. -func (c *ciliumNodes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumnodes"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumNodes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumnodes"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumNode. -func (c *ciliumNodes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2.CiliumNode, err error) { - result = &v2.CiliumNode{} - err = c.client.Patch(pt). - Resource("ciliumnodes"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnodeconfig.go new file mode 100644 index 0000000000..f776b2c92c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/ciliumnodeconfig.go @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2 + +import ( + context "context" + + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// CiliumNodeConfigsGetter has a method to return a CiliumNodeConfigInterface. +// A group's client should implement this interface. +type CiliumNodeConfigsGetter interface { + CiliumNodeConfigs(namespace string) CiliumNodeConfigInterface +} + +// CiliumNodeConfigInterface has methods to work with CiliumNodeConfig resources. +type CiliumNodeConfigInterface interface { + Create(ctx context.Context, ciliumNodeConfig *ciliumiov2.CiliumNodeConfig, opts v1.CreateOptions) (*ciliumiov2.CiliumNodeConfig, error) + Update(ctx context.Context, ciliumNodeConfig *ciliumiov2.CiliumNodeConfig, opts v1.UpdateOptions) (*ciliumiov2.CiliumNodeConfig, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2.CiliumNodeConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2.CiliumNodeConfigList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2.CiliumNodeConfig, err error) + CiliumNodeConfigExpansion +} + +// ciliumNodeConfigs implements CiliumNodeConfigInterface +type ciliumNodeConfigs struct { + *gentype.ClientWithList[*ciliumiov2.CiliumNodeConfig, *ciliumiov2.CiliumNodeConfigList] +} + +// newCiliumNodeConfigs returns a CiliumNodeConfigs +func newCiliumNodeConfigs(c *CiliumV2Client, namespace string) *ciliumNodeConfigs { + return &ciliumNodeConfigs{ + gentype.NewClientWithList[*ciliumiov2.CiliumNodeConfig, *ciliumiov2.CiliumNodeConfigList]( + "ciliumnodeconfigs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *ciliumiov2.CiliumNodeConfig { return &ciliumiov2.CiliumNodeConfig{} }, + func() *ciliumiov2.CiliumNodeConfigList { return &ciliumiov2.CiliumNodeConfigList{} }, + ), + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/generated_expansion.go index 15b434bace..fe5700373b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/generated_expansion.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2/generated_expansion.go @@ -24,3 +24,5 @@ type CiliumLocalRedirectPolicyExpansion interface{} type CiliumNetworkPolicyExpansion interface{} type CiliumNodeExpansion interface{} + +type CiliumNodeConfigExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go index 0e56f45914..8e49ea7ff1 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/cilium.io_client.go @@ -6,15 +6,20 @@ package v2alpha1 import ( - "net/http" + http "net/http" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" - "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) type CiliumV2alpha1Interface interface { RESTClient() rest.Interface + CiliumBGPAdvertisementsGetter + CiliumBGPClusterConfigsGetter + CiliumBGPNodeConfigsGetter + CiliumBGPNodeConfigOverridesGetter + CiliumBGPPeerConfigsGetter CiliumBGPPeeringPoliciesGetter CiliumCIDRGroupsGetter CiliumEndpointSlicesGetter @@ -29,6 +34,26 @@ type CiliumV2alpha1Client struct { restClient rest.Interface } +func (c *CiliumV2alpha1Client) CiliumBGPAdvertisements() CiliumBGPAdvertisementInterface { + return newCiliumBGPAdvertisements(c) +} + +func (c *CiliumV2alpha1Client) CiliumBGPClusterConfigs() CiliumBGPClusterConfigInterface { + return newCiliumBGPClusterConfigs(c) +} + +func (c *CiliumV2alpha1Client) CiliumBGPNodeConfigs() CiliumBGPNodeConfigInterface { + return newCiliumBGPNodeConfigs(c) +} + +func (c *CiliumV2alpha1Client) CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInterface { + return newCiliumBGPNodeConfigOverrides(c) +} + +func (c *CiliumV2alpha1Client) CiliumBGPPeerConfigs() CiliumBGPPeerConfigInterface { + return newCiliumBGPPeerConfigs(c) +} + func (c *CiliumV2alpha1Client) CiliumBGPPeeringPolicies() CiliumBGPPeeringPolicyInterface { return newCiliumBGPPeeringPolicies(c) } @@ -102,10 +127,10 @@ func New(c rest.Interface) *CiliumV2alpha1Client { } func setConfigDefaults(config *rest.Config) error { - gv := v2alpha1.SchemeGroupVersion + gv := ciliumiov2alpha1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + config.NegotiatedSerializer = rest.CodecFactoryForGeneratedClient(scheme.Scheme, scheme.Codecs).WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpadvertisement.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpadvertisement.go new file mode 100644 index 0000000000..66aef037ef --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpadvertisement.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// CiliumBGPAdvertisementsGetter has a method to return a CiliumBGPAdvertisementInterface. +// A group's client should implement this interface. +type CiliumBGPAdvertisementsGetter interface { + CiliumBGPAdvertisements() CiliumBGPAdvertisementInterface +} + +// CiliumBGPAdvertisementInterface has methods to work with CiliumBGPAdvertisement resources. +type CiliumBGPAdvertisementInterface interface { + Create(ctx context.Context, ciliumBGPAdvertisement *ciliumiov2alpha1.CiliumBGPAdvertisement, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPAdvertisement, error) + Update(ctx context.Context, ciliumBGPAdvertisement *ciliumiov2alpha1.CiliumBGPAdvertisement, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPAdvertisement, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPAdvertisement, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPAdvertisementList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPAdvertisement, err error) + CiliumBGPAdvertisementExpansion +} + +// ciliumBGPAdvertisements implements CiliumBGPAdvertisementInterface +type ciliumBGPAdvertisements struct { + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPAdvertisement, *ciliumiov2alpha1.CiliumBGPAdvertisementList] +} + +// newCiliumBGPAdvertisements returns a CiliumBGPAdvertisements +func newCiliumBGPAdvertisements(c *CiliumV2alpha1Client) *ciliumBGPAdvertisements { + return &ciliumBGPAdvertisements{ + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPAdvertisement, *ciliumiov2alpha1.CiliumBGPAdvertisementList]( + "ciliumbgpadvertisements", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumBGPAdvertisement { return &ciliumiov2alpha1.CiliumBGPAdvertisement{} }, + func() *ciliumiov2alpha1.CiliumBGPAdvertisementList { + return &ciliumiov2alpha1.CiliumBGPAdvertisementList{} + }, + ), + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go new file mode 100644 index 0000000000..20a7b98dc0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpclusterconfig.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// CiliumBGPClusterConfigsGetter has a method to return a CiliumBGPClusterConfigInterface. +// A group's client should implement this interface. +type CiliumBGPClusterConfigsGetter interface { + CiliumBGPClusterConfigs() CiliumBGPClusterConfigInterface +} + +// CiliumBGPClusterConfigInterface has methods to work with CiliumBGPClusterConfig resources. +type CiliumBGPClusterConfigInterface interface { + Create(ctx context.Context, ciliumBGPClusterConfig *ciliumiov2alpha1.CiliumBGPClusterConfig, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPClusterConfig, error) + Update(ctx context.Context, ciliumBGPClusterConfig *ciliumiov2alpha1.CiliumBGPClusterConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPClusterConfig, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumBGPClusterConfig *ciliumiov2alpha1.CiliumBGPClusterConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPClusterConfig, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPClusterConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPClusterConfigList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPClusterConfig, err error) + CiliumBGPClusterConfigExpansion +} + +// ciliumBGPClusterConfigs implements CiliumBGPClusterConfigInterface +type ciliumBGPClusterConfigs struct { + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPClusterConfig, *ciliumiov2alpha1.CiliumBGPClusterConfigList] +} + +// newCiliumBGPClusterConfigs returns a CiliumBGPClusterConfigs +func newCiliumBGPClusterConfigs(c *CiliumV2alpha1Client) *ciliumBGPClusterConfigs { + return &ciliumBGPClusterConfigs{ + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPClusterConfig, *ciliumiov2alpha1.CiliumBGPClusterConfigList]( + "ciliumbgpclusterconfigs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumBGPClusterConfig { return &ciliumiov2alpha1.CiliumBGPClusterConfig{} }, + func() *ciliumiov2alpha1.CiliumBGPClusterConfigList { + return &ciliumiov2alpha1.CiliumBGPClusterConfigList{} + }, + ), + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfig.go new file mode 100644 index 0000000000..396ceb78d1 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfig.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// CiliumBGPNodeConfigsGetter has a method to return a CiliumBGPNodeConfigInterface. +// A group's client should implement this interface. +type CiliumBGPNodeConfigsGetter interface { + CiliumBGPNodeConfigs() CiliumBGPNodeConfigInterface +} + +// CiliumBGPNodeConfigInterface has methods to work with CiliumBGPNodeConfig resources. +type CiliumBGPNodeConfigInterface interface { + Create(ctx context.Context, ciliumBGPNodeConfig *ciliumiov2alpha1.CiliumBGPNodeConfig, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfig, error) + Update(ctx context.Context, ciliumBGPNodeConfig *ciliumiov2alpha1.CiliumBGPNodeConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfig, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumBGPNodeConfig *ciliumiov2alpha1.CiliumBGPNodeConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfig, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfigList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPNodeConfig, err error) + CiliumBGPNodeConfigExpansion +} + +// ciliumBGPNodeConfigs implements CiliumBGPNodeConfigInterface +type ciliumBGPNodeConfigs struct { + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPNodeConfig, *ciliumiov2alpha1.CiliumBGPNodeConfigList] +} + +// newCiliumBGPNodeConfigs returns a CiliumBGPNodeConfigs +func newCiliumBGPNodeConfigs(c *CiliumV2alpha1Client) *ciliumBGPNodeConfigs { + return &ciliumBGPNodeConfigs{ + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPNodeConfig, *ciliumiov2alpha1.CiliumBGPNodeConfigList]( + "ciliumbgpnodeconfigs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumBGPNodeConfig { return &ciliumiov2alpha1.CiliumBGPNodeConfig{} }, + func() *ciliumiov2alpha1.CiliumBGPNodeConfigList { return &ciliumiov2alpha1.CiliumBGPNodeConfigList{} }, + ), + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go new file mode 100644 index 0000000000..5ec33c39f6 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// CiliumBGPNodeConfigOverridesGetter has a method to return a CiliumBGPNodeConfigOverrideInterface. +// A group's client should implement this interface. +type CiliumBGPNodeConfigOverridesGetter interface { + CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInterface +} + +// CiliumBGPNodeConfigOverrideInterface has methods to work with CiliumBGPNodeConfigOverride resources. +type CiliumBGPNodeConfigOverrideInterface interface { + Create(ctx context.Context, ciliumBGPNodeConfigOverride *ciliumiov2alpha1.CiliumBGPNodeConfigOverride, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, error) + Update(ctx context.Context, ciliumBGPNodeConfigOverride *ciliumiov2alpha1.CiliumBGPNodeConfigOverride, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPNodeConfigOverrideList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPNodeConfigOverride, err error) + CiliumBGPNodeConfigOverrideExpansion +} + +// ciliumBGPNodeConfigOverrides implements CiliumBGPNodeConfigOverrideInterface +type ciliumBGPNodeConfigOverrides struct { + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, *ciliumiov2alpha1.CiliumBGPNodeConfigOverrideList] +} + +// newCiliumBGPNodeConfigOverrides returns a CiliumBGPNodeConfigOverrides +func newCiliumBGPNodeConfigOverrides(c *CiliumV2alpha1Client) *ciliumBGPNodeConfigOverrides { + return &ciliumBGPNodeConfigOverrides{ + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, *ciliumiov2alpha1.CiliumBGPNodeConfigOverrideList]( + "ciliumbgpnodeconfigoverrides", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumBGPNodeConfigOverride { + return &ciliumiov2alpha1.CiliumBGPNodeConfigOverride{} + }, + func() *ciliumiov2alpha1.CiliumBGPNodeConfigOverrideList { + return &ciliumiov2alpha1.CiliumBGPNodeConfigOverrideList{} + }, + ), + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go new file mode 100644 index 0000000000..b9350d4f65 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeerconfig.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + gentype "k8s.io/client-go/gentype" +) + +// CiliumBGPPeerConfigsGetter has a method to return a CiliumBGPPeerConfigInterface. +// A group's client should implement this interface. +type CiliumBGPPeerConfigsGetter interface { + CiliumBGPPeerConfigs() CiliumBGPPeerConfigInterface +} + +// CiliumBGPPeerConfigInterface has methods to work with CiliumBGPPeerConfig resources. +type CiliumBGPPeerConfigInterface interface { + Create(ctx context.Context, ciliumBGPPeerConfig *ciliumiov2alpha1.CiliumBGPPeerConfig, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPPeerConfig, error) + Update(ctx context.Context, ciliumBGPPeerConfig *ciliumiov2alpha1.CiliumBGPPeerConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPPeerConfig, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumBGPPeerConfig *ciliumiov2alpha1.CiliumBGPPeerConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPPeerConfig, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPPeerConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPPeerConfigList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPPeerConfig, err error) + CiliumBGPPeerConfigExpansion +} + +// ciliumBGPPeerConfigs implements CiliumBGPPeerConfigInterface +type ciliumBGPPeerConfigs struct { + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPPeerConfig, *ciliumiov2alpha1.CiliumBGPPeerConfigList] +} + +// newCiliumBGPPeerConfigs returns a CiliumBGPPeerConfigs +func newCiliumBGPPeerConfigs(c *CiliumV2alpha1Client) *ciliumBGPPeerConfigs { + return &ciliumBGPPeerConfigs{ + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPPeerConfig, *ciliumiov2alpha1.CiliumBGPPeerConfigList]( + "ciliumbgppeerconfigs", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumBGPPeerConfig { return &ciliumiov2alpha1.CiliumBGPPeerConfig{} }, + func() *ciliumiov2alpha1.CiliumBGPPeerConfigList { return &ciliumiov2alpha1.CiliumBGPPeerConfigList{} }, + ), + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go index e19727c07a..b770595ab5 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go @@ -6,15 +6,14 @@ package v2alpha1 import ( - "context" - "time" + context "context" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumBGPPeeringPoliciesGetter has a method to return a CiliumBGPPeeringPolicyInterface. @@ -25,131 +24,34 @@ type CiliumBGPPeeringPoliciesGetter interface { // CiliumBGPPeeringPolicyInterface has methods to work with CiliumBGPPeeringPolicy resources. type CiliumBGPPeeringPolicyInterface interface { - Create(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.CreateOptions) (*v2alpha1.CiliumBGPPeeringPolicy, error) - Update(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.UpdateOptions) (*v2alpha1.CiliumBGPPeeringPolicy, error) + Create(ctx context.Context, ciliumBGPPeeringPolicy *ciliumiov2alpha1.CiliumBGPPeeringPolicy, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumBGPPeeringPolicy, error) + Update(ctx context.Context, ciliumBGPPeeringPolicy *ciliumiov2alpha1.CiliumBGPPeeringPolicy, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumBGPPeeringPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumBGPPeeringPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumBGPPeeringPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumBGPPeeringPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumBGPPeeringPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumBGPPeeringPolicy, err error) CiliumBGPPeeringPolicyExpansion } // ciliumBGPPeeringPolicies implements CiliumBGPPeeringPolicyInterface type ciliumBGPPeeringPolicies struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumBGPPeeringPolicy, *ciliumiov2alpha1.CiliumBGPPeeringPolicyList] } // newCiliumBGPPeeringPolicies returns a CiliumBGPPeeringPolicies func newCiliumBGPPeeringPolicies(c *CiliumV2alpha1Client) *ciliumBGPPeeringPolicies { return &ciliumBGPPeeringPolicies{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumBGPPeeringPolicy, *ciliumiov2alpha1.CiliumBGPPeeringPolicyList]( + "ciliumbgppeeringpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumBGPPeeringPolicy { return &ciliumiov2alpha1.CiliumBGPPeeringPolicy{} }, + func() *ciliumiov2alpha1.CiliumBGPPeeringPolicyList { + return &ciliumiov2alpha1.CiliumBGPPeeringPolicyList{} + }, + ), } } - -// Get takes name of the ciliumBGPPeeringPolicy, and returns the corresponding ciliumBGPPeeringPolicy object, and an error if there is any. -func (c *ciliumBGPPeeringPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) { - result = &v2alpha1.CiliumBGPPeeringPolicy{} - err = c.client.Get(). - Resource("ciliumbgppeeringpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumBGPPeeringPolicies that match those selectors. -func (c *ciliumBGPPeeringPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumBGPPeeringPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2alpha1.CiliumBGPPeeringPolicyList{} - err = c.client.Get(). - Resource("ciliumbgppeeringpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumBGPPeeringPolicies. -func (c *ciliumBGPPeeringPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumbgppeeringpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumBGPPeeringPolicy and creates it. Returns the server's representation of the ciliumBGPPeeringPolicy, and an error, if there is any. -func (c *ciliumBGPPeeringPolicies) Create(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.CreateOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) { - result = &v2alpha1.CiliumBGPPeeringPolicy{} - err = c.client.Post(). - Resource("ciliumbgppeeringpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumBGPPeeringPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumBGPPeeringPolicy and updates it. Returns the server's representation of the ciliumBGPPeeringPolicy, and an error, if there is any. -func (c *ciliumBGPPeeringPolicies) Update(ctx context.Context, ciliumBGPPeeringPolicy *v2alpha1.CiliumBGPPeeringPolicy, opts v1.UpdateOptions) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) { - result = &v2alpha1.CiliumBGPPeeringPolicy{} - err = c.client.Put(). - Resource("ciliumbgppeeringpolicies"). - Name(ciliumBGPPeeringPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumBGPPeeringPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumBGPPeeringPolicy and deletes it. Returns an error if one occurs. -func (c *ciliumBGPPeeringPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumbgppeeringpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumBGPPeeringPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumbgppeeringpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumBGPPeeringPolicy. -func (c *ciliumBGPPeeringPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumBGPPeeringPolicy, err error) { - result = &v2alpha1.CiliumBGPPeeringPolicy{} - err = c.client.Patch(pt). - Resource("ciliumbgppeeringpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumcidrgroup.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumcidrgroup.go index 42f93365bb..21c9312a9c 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumcidrgroup.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumcidrgroup.go @@ -6,15 +6,14 @@ package v2alpha1 import ( - "context" - "time" + context "context" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumCIDRGroupsGetter has a method to return a CiliumCIDRGroupInterface. @@ -25,131 +24,32 @@ type CiliumCIDRGroupsGetter interface { // CiliumCIDRGroupInterface has methods to work with CiliumCIDRGroup resources. type CiliumCIDRGroupInterface interface { - Create(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.CreateOptions) (*v2alpha1.CiliumCIDRGroup, error) - Update(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.UpdateOptions) (*v2alpha1.CiliumCIDRGroup, error) + Create(ctx context.Context, ciliumCIDRGroup *ciliumiov2alpha1.CiliumCIDRGroup, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumCIDRGroup, error) + Update(ctx context.Context, ciliumCIDRGroup *ciliumiov2alpha1.CiliumCIDRGroup, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumCIDRGroup, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumCIDRGroup, error) - List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumCIDRGroupList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumCIDRGroup, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumCIDRGroupList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumCIDRGroup, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumCIDRGroup, err error) CiliumCIDRGroupExpansion } // ciliumCIDRGroups implements CiliumCIDRGroupInterface type ciliumCIDRGroups struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumCIDRGroup, *ciliumiov2alpha1.CiliumCIDRGroupList] } // newCiliumCIDRGroups returns a CiliumCIDRGroups func newCiliumCIDRGroups(c *CiliumV2alpha1Client) *ciliumCIDRGroups { return &ciliumCIDRGroups{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumCIDRGroup, *ciliumiov2alpha1.CiliumCIDRGroupList]( + "ciliumcidrgroups", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumCIDRGroup { return &ciliumiov2alpha1.CiliumCIDRGroup{} }, + func() *ciliumiov2alpha1.CiliumCIDRGroupList { return &ciliumiov2alpha1.CiliumCIDRGroupList{} }, + ), } } - -// Get takes name of the ciliumCIDRGroup, and returns the corresponding ciliumCIDRGroup object, and an error if there is any. -func (c *ciliumCIDRGroups) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumCIDRGroup, err error) { - result = &v2alpha1.CiliumCIDRGroup{} - err = c.client.Get(). - Resource("ciliumcidrgroups"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumCIDRGroups that match those selectors. -func (c *ciliumCIDRGroups) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumCIDRGroupList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2alpha1.CiliumCIDRGroupList{} - err = c.client.Get(). - Resource("ciliumcidrgroups"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumCIDRGroups. -func (c *ciliumCIDRGroups) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumcidrgroups"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumCIDRGroup and creates it. Returns the server's representation of the ciliumCIDRGroup, and an error, if there is any. -func (c *ciliumCIDRGroups) Create(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.CreateOptions) (result *v2alpha1.CiliumCIDRGroup, err error) { - result = &v2alpha1.CiliumCIDRGroup{} - err = c.client.Post(). - Resource("ciliumcidrgroups"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumCIDRGroup). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumCIDRGroup and updates it. Returns the server's representation of the ciliumCIDRGroup, and an error, if there is any. -func (c *ciliumCIDRGroups) Update(ctx context.Context, ciliumCIDRGroup *v2alpha1.CiliumCIDRGroup, opts v1.UpdateOptions) (result *v2alpha1.CiliumCIDRGroup, err error) { - result = &v2alpha1.CiliumCIDRGroup{} - err = c.client.Put(). - Resource("ciliumcidrgroups"). - Name(ciliumCIDRGroup.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumCIDRGroup). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumCIDRGroup and deletes it. Returns an error if one occurs. -func (c *ciliumCIDRGroups) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumcidrgroups"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumCIDRGroups) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumcidrgroups"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumCIDRGroup. -func (c *ciliumCIDRGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumCIDRGroup, err error) { - result = &v2alpha1.CiliumCIDRGroup{} - err = c.client.Patch(pt). - Resource("ciliumcidrgroups"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumendpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumendpointslice.go index 55cdaf11b6..04829640a3 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumendpointslice.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumendpointslice.go @@ -6,15 +6,14 @@ package v2alpha1 import ( - "context" - "time" + context "context" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumEndpointSlicesGetter has a method to return a CiliumEndpointSliceInterface. @@ -25,131 +24,32 @@ type CiliumEndpointSlicesGetter interface { // CiliumEndpointSliceInterface has methods to work with CiliumEndpointSlice resources. type CiliumEndpointSliceInterface interface { - Create(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.CreateOptions) (*v2alpha1.CiliumEndpointSlice, error) - Update(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.UpdateOptions) (*v2alpha1.CiliumEndpointSlice, error) + Create(ctx context.Context, ciliumEndpointSlice *ciliumiov2alpha1.CiliumEndpointSlice, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumEndpointSlice, error) + Update(ctx context.Context, ciliumEndpointSlice *ciliumiov2alpha1.CiliumEndpointSlice, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumEndpointSlice, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumEndpointSlice, error) - List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumEndpointSliceList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumEndpointSlice, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumEndpointSliceList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumEndpointSlice, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumEndpointSlice, err error) CiliumEndpointSliceExpansion } // ciliumEndpointSlices implements CiliumEndpointSliceInterface type ciliumEndpointSlices struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumEndpointSlice, *ciliumiov2alpha1.CiliumEndpointSliceList] } // newCiliumEndpointSlices returns a CiliumEndpointSlices func newCiliumEndpointSlices(c *CiliumV2alpha1Client) *ciliumEndpointSlices { return &ciliumEndpointSlices{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumEndpointSlice, *ciliumiov2alpha1.CiliumEndpointSliceList]( + "ciliumendpointslices", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumEndpointSlice { return &ciliumiov2alpha1.CiliumEndpointSlice{} }, + func() *ciliumiov2alpha1.CiliumEndpointSliceList { return &ciliumiov2alpha1.CiliumEndpointSliceList{} }, + ), } } - -// Get takes name of the ciliumEndpointSlice, and returns the corresponding ciliumEndpointSlice object, and an error if there is any. -func (c *ciliumEndpointSlices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumEndpointSlice, err error) { - result = &v2alpha1.CiliumEndpointSlice{} - err = c.client.Get(). - Resource("ciliumendpointslices"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumEndpointSlices that match those selectors. -func (c *ciliumEndpointSlices) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumEndpointSliceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2alpha1.CiliumEndpointSliceList{} - err = c.client.Get(). - Resource("ciliumendpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumEndpointSlices. -func (c *ciliumEndpointSlices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumendpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumEndpointSlice and creates it. Returns the server's representation of the ciliumEndpointSlice, and an error, if there is any. -func (c *ciliumEndpointSlices) Create(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.CreateOptions) (result *v2alpha1.CiliumEndpointSlice, err error) { - result = &v2alpha1.CiliumEndpointSlice{} - err = c.client.Post(). - Resource("ciliumendpointslices"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumEndpointSlice). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumEndpointSlice and updates it. Returns the server's representation of the ciliumEndpointSlice, and an error, if there is any. -func (c *ciliumEndpointSlices) Update(ctx context.Context, ciliumEndpointSlice *v2alpha1.CiliumEndpointSlice, opts v1.UpdateOptions) (result *v2alpha1.CiliumEndpointSlice, err error) { - result = &v2alpha1.CiliumEndpointSlice{} - err = c.client.Put(). - Resource("ciliumendpointslices"). - Name(ciliumEndpointSlice.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumEndpointSlice). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumEndpointSlice and deletes it. Returns an error if one occurs. -func (c *ciliumEndpointSlices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumendpointslices"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumEndpointSlices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumendpointslices"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumEndpointSlice. -func (c *ciliumEndpointSlices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumEndpointSlice, err error) { - result = &v2alpha1.CiliumEndpointSlice{} - err = c.client.Patch(pt). - Resource("ciliumendpointslices"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliuml2announcementpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliuml2announcementpolicy.go index bed50df467..31651ab612 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliuml2announcementpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliuml2announcementpolicy.go @@ -6,15 +6,14 @@ package v2alpha1 import ( - "context" - "time" + context "context" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumL2AnnouncementPoliciesGetter has a method to return a CiliumL2AnnouncementPolicyInterface. @@ -25,147 +24,38 @@ type CiliumL2AnnouncementPoliciesGetter interface { // CiliumL2AnnouncementPolicyInterface has methods to work with CiliumL2AnnouncementPolicy resources. type CiliumL2AnnouncementPolicyInterface interface { - Create(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.CreateOptions) (*v2alpha1.CiliumL2AnnouncementPolicy, error) - Update(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (*v2alpha1.CiliumL2AnnouncementPolicy, error) - UpdateStatus(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (*v2alpha1.CiliumL2AnnouncementPolicy, error) + Create(ctx context.Context, ciliumL2AnnouncementPolicy *ciliumiov2alpha1.CiliumL2AnnouncementPolicy, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, error) + Update(ctx context.Context, ciliumL2AnnouncementPolicy *ciliumiov2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumL2AnnouncementPolicy *ciliumiov2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumL2AnnouncementPolicy, error) - List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumL2AnnouncementPolicyList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicyList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumL2AnnouncementPolicy, err error) CiliumL2AnnouncementPolicyExpansion } // ciliumL2AnnouncementPolicies implements CiliumL2AnnouncementPolicyInterface type ciliumL2AnnouncementPolicies struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, *ciliumiov2alpha1.CiliumL2AnnouncementPolicyList] } // newCiliumL2AnnouncementPolicies returns a CiliumL2AnnouncementPolicies func newCiliumL2AnnouncementPolicies(c *CiliumV2alpha1Client) *ciliumL2AnnouncementPolicies { return &ciliumL2AnnouncementPolicies{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, *ciliumiov2alpha1.CiliumL2AnnouncementPolicyList]( + "ciliuml2announcementpolicies", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumL2AnnouncementPolicy { + return &ciliumiov2alpha1.CiliumL2AnnouncementPolicy{} + }, + func() *ciliumiov2alpha1.CiliumL2AnnouncementPolicyList { + return &ciliumiov2alpha1.CiliumL2AnnouncementPolicyList{} + }, + ), } } - -// Get takes name of the ciliumL2AnnouncementPolicy, and returns the corresponding ciliumL2AnnouncementPolicy object, and an error if there is any. -func (c *ciliumL2AnnouncementPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) { - result = &v2alpha1.CiliumL2AnnouncementPolicy{} - err = c.client.Get(). - Resource("ciliuml2announcementpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumL2AnnouncementPolicies that match those selectors. -func (c *ciliumL2AnnouncementPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumL2AnnouncementPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2alpha1.CiliumL2AnnouncementPolicyList{} - err = c.client.Get(). - Resource("ciliuml2announcementpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumL2AnnouncementPolicies. -func (c *ciliumL2AnnouncementPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliuml2announcementpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumL2AnnouncementPolicy and creates it. Returns the server's representation of the ciliumL2AnnouncementPolicy, and an error, if there is any. -func (c *ciliumL2AnnouncementPolicies) Create(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.CreateOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) { - result = &v2alpha1.CiliumL2AnnouncementPolicy{} - err = c.client.Post(). - Resource("ciliuml2announcementpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumL2AnnouncementPolicy). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumL2AnnouncementPolicy and updates it. Returns the server's representation of the ciliumL2AnnouncementPolicy, and an error, if there is any. -func (c *ciliumL2AnnouncementPolicies) Update(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) { - result = &v2alpha1.CiliumL2AnnouncementPolicy{} - err = c.client.Put(). - Resource("ciliuml2announcementpolicies"). - Name(ciliumL2AnnouncementPolicy.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumL2AnnouncementPolicy). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ciliumL2AnnouncementPolicies) UpdateStatus(ctx context.Context, ciliumL2AnnouncementPolicy *v2alpha1.CiliumL2AnnouncementPolicy, opts v1.UpdateOptions) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) { - result = &v2alpha1.CiliumL2AnnouncementPolicy{} - err = c.client.Put(). - Resource("ciliuml2announcementpolicies"). - Name(ciliumL2AnnouncementPolicy.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumL2AnnouncementPolicy). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumL2AnnouncementPolicy and deletes it. Returns an error if one occurs. -func (c *ciliumL2AnnouncementPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliuml2announcementpolicies"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumL2AnnouncementPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliuml2announcementpolicies"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumL2AnnouncementPolicy. -func (c *ciliumL2AnnouncementPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumL2AnnouncementPolicy, err error) { - result = &v2alpha1.CiliumL2AnnouncementPolicy{} - err = c.client.Patch(pt). - Resource("ciliuml2announcementpolicies"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumloadbalancerippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumloadbalancerippool.go index bdc76f4a5a..21f307b0c6 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumloadbalancerippool.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumloadbalancerippool.go @@ -6,15 +6,14 @@ package v2alpha1 import ( - "context" - "time" + context "context" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumLoadBalancerIPPoolsGetter has a method to return a CiliumLoadBalancerIPPoolInterface. @@ -25,147 +24,36 @@ type CiliumLoadBalancerIPPoolsGetter interface { // CiliumLoadBalancerIPPoolInterface has methods to work with CiliumLoadBalancerIPPool resources. type CiliumLoadBalancerIPPoolInterface interface { - Create(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.CreateOptions) (*v2alpha1.CiliumLoadBalancerIPPool, error) - Update(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*v2alpha1.CiliumLoadBalancerIPPool, error) - UpdateStatus(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*v2alpha1.CiliumLoadBalancerIPPool, error) + Create(ctx context.Context, ciliumLoadBalancerIPPool *ciliumiov2alpha1.CiliumLoadBalancerIPPool, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumLoadBalancerIPPool, error) + Update(ctx context.Context, ciliumLoadBalancerIPPool *ciliumiov2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumLoadBalancerIPPool, error) + // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + UpdateStatus(ctx context.Context, ciliumLoadBalancerIPPool *ciliumiov2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumLoadBalancerIPPool, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumLoadBalancerIPPool, error) - List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumLoadBalancerIPPoolList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumLoadBalancerIPPool, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumLoadBalancerIPPoolList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumLoadBalancerIPPool, err error) CiliumLoadBalancerIPPoolExpansion } // ciliumLoadBalancerIPPools implements CiliumLoadBalancerIPPoolInterface type ciliumLoadBalancerIPPools struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumLoadBalancerIPPool, *ciliumiov2alpha1.CiliumLoadBalancerIPPoolList] } // newCiliumLoadBalancerIPPools returns a CiliumLoadBalancerIPPools func newCiliumLoadBalancerIPPools(c *CiliumV2alpha1Client) *ciliumLoadBalancerIPPools { return &ciliumLoadBalancerIPPools{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumLoadBalancerIPPool, *ciliumiov2alpha1.CiliumLoadBalancerIPPoolList]( + "ciliumloadbalancerippools", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumLoadBalancerIPPool { return &ciliumiov2alpha1.CiliumLoadBalancerIPPool{} }, + func() *ciliumiov2alpha1.CiliumLoadBalancerIPPoolList { + return &ciliumiov2alpha1.CiliumLoadBalancerIPPoolList{} + }, + ), } } - -// Get takes name of the ciliumLoadBalancerIPPool, and returns the corresponding ciliumLoadBalancerIPPool object, and an error if there is any. -func (c *ciliumLoadBalancerIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) { - result = &v2alpha1.CiliumLoadBalancerIPPool{} - err = c.client.Get(). - Resource("ciliumloadbalancerippools"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumLoadBalancerIPPools that match those selectors. -func (c *ciliumLoadBalancerIPPools) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumLoadBalancerIPPoolList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2alpha1.CiliumLoadBalancerIPPoolList{} - err = c.client.Get(). - Resource("ciliumloadbalancerippools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumLoadBalancerIPPools. -func (c *ciliumLoadBalancerIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumloadbalancerippools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumLoadBalancerIPPool and creates it. Returns the server's representation of the ciliumLoadBalancerIPPool, and an error, if there is any. -func (c *ciliumLoadBalancerIPPools) Create(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.CreateOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) { - result = &v2alpha1.CiliumLoadBalancerIPPool{} - err = c.client.Post(). - Resource("ciliumloadbalancerippools"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumLoadBalancerIPPool). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumLoadBalancerIPPool and updates it. Returns the server's representation of the ciliumLoadBalancerIPPool, and an error, if there is any. -func (c *ciliumLoadBalancerIPPools) Update(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) { - result = &v2alpha1.CiliumLoadBalancerIPPool{} - err = c.client.Put(). - Resource("ciliumloadbalancerippools"). - Name(ciliumLoadBalancerIPPool.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumLoadBalancerIPPool). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *ciliumLoadBalancerIPPools) UpdateStatus(ctx context.Context, ciliumLoadBalancerIPPool *v2alpha1.CiliumLoadBalancerIPPool, opts v1.UpdateOptions) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) { - result = &v2alpha1.CiliumLoadBalancerIPPool{} - err = c.client.Put(). - Resource("ciliumloadbalancerippools"). - Name(ciliumLoadBalancerIPPool.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumLoadBalancerIPPool). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumLoadBalancerIPPool and deletes it. Returns an error if one occurs. -func (c *ciliumLoadBalancerIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumloadbalancerippools"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumLoadBalancerIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumloadbalancerippools"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumLoadBalancerIPPool. -func (c *ciliumLoadBalancerIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumLoadBalancerIPPool, err error) { - result = &v2alpha1.CiliumLoadBalancerIPPool{} - err = c.client.Patch(pt). - Resource("ciliumloadbalancerippools"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumnodeconfig.go index bff4996a03..4ace6fc57a 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumnodeconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumnodeconfig.go @@ -6,15 +6,14 @@ package v2alpha1 import ( - "context" - "time" + context "context" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumNodeConfigsGetter has a method to return a CiliumNodeConfigInterface. @@ -25,141 +24,32 @@ type CiliumNodeConfigsGetter interface { // CiliumNodeConfigInterface has methods to work with CiliumNodeConfig resources. type CiliumNodeConfigInterface interface { - Create(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.CreateOptions) (*v2alpha1.CiliumNodeConfig, error) - Update(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.UpdateOptions) (*v2alpha1.CiliumNodeConfig, error) + Create(ctx context.Context, ciliumNodeConfig *ciliumiov2alpha1.CiliumNodeConfig, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumNodeConfig, error) + Update(ctx context.Context, ciliumNodeConfig *ciliumiov2alpha1.CiliumNodeConfig, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumNodeConfig, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumNodeConfig, error) - List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumNodeConfigList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumNodeConfig, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumNodeConfigList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumNodeConfig, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumNodeConfig, err error) CiliumNodeConfigExpansion } // ciliumNodeConfigs implements CiliumNodeConfigInterface type ciliumNodeConfigs struct { - client rest.Interface - ns string + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumNodeConfig, *ciliumiov2alpha1.CiliumNodeConfigList] } // newCiliumNodeConfigs returns a CiliumNodeConfigs func newCiliumNodeConfigs(c *CiliumV2alpha1Client, namespace string) *ciliumNodeConfigs { return &ciliumNodeConfigs{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumNodeConfig, *ciliumiov2alpha1.CiliumNodeConfigList]( + "ciliumnodeconfigs", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *ciliumiov2alpha1.CiliumNodeConfig { return &ciliumiov2alpha1.CiliumNodeConfig{} }, + func() *ciliumiov2alpha1.CiliumNodeConfigList { return &ciliumiov2alpha1.CiliumNodeConfigList{} }, + ), } } - -// Get takes name of the ciliumNodeConfig, and returns the corresponding ciliumNodeConfig object, and an error if there is any. -func (c *ciliumNodeConfigs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumNodeConfig, err error) { - result = &v2alpha1.CiliumNodeConfig{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ciliumnodeconfigs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumNodeConfigs that match those selectors. -func (c *ciliumNodeConfigs) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumNodeConfigList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2alpha1.CiliumNodeConfigList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ciliumnodeconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumNodeConfigs. -func (c *ciliumNodeConfigs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ciliumnodeconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumNodeConfig and creates it. Returns the server's representation of the ciliumNodeConfig, and an error, if there is any. -func (c *ciliumNodeConfigs) Create(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.CreateOptions) (result *v2alpha1.CiliumNodeConfig, err error) { - result = &v2alpha1.CiliumNodeConfig{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ciliumnodeconfigs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumNodeConfig). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumNodeConfig and updates it. Returns the server's representation of the ciliumNodeConfig, and an error, if there is any. -func (c *ciliumNodeConfigs) Update(ctx context.Context, ciliumNodeConfig *v2alpha1.CiliumNodeConfig, opts v1.UpdateOptions) (result *v2alpha1.CiliumNodeConfig, err error) { - result = &v2alpha1.CiliumNodeConfig{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ciliumnodeconfigs"). - Name(ciliumNodeConfig.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumNodeConfig). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumNodeConfig and deletes it. Returns an error if one occurs. -func (c *ciliumNodeConfigs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ciliumnodeconfigs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumNodeConfigs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ciliumnodeconfigs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumNodeConfig. -func (c *ciliumNodeConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumNodeConfig, err error) { - result = &v2alpha1.CiliumNodeConfig{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ciliumnodeconfigs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumpodippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumpodippool.go index 024442952b..2fe4d07d9c 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumpodippool.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/ciliumpodippool.go @@ -6,15 +6,14 @@ package v2alpha1 import ( - "context" - "time" + context "context" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" scheme "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // CiliumPodIPPoolsGetter has a method to return a CiliumPodIPPoolInterface. @@ -25,131 +24,32 @@ type CiliumPodIPPoolsGetter interface { // CiliumPodIPPoolInterface has methods to work with CiliumPodIPPool resources. type CiliumPodIPPoolInterface interface { - Create(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.CreateOptions) (*v2alpha1.CiliumPodIPPool, error) - Update(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.UpdateOptions) (*v2alpha1.CiliumPodIPPool, error) + Create(ctx context.Context, ciliumPodIPPool *ciliumiov2alpha1.CiliumPodIPPool, opts v1.CreateOptions) (*ciliumiov2alpha1.CiliumPodIPPool, error) + Update(ctx context.Context, ciliumPodIPPool *ciliumiov2alpha1.CiliumPodIPPool, opts v1.UpdateOptions) (*ciliumiov2alpha1.CiliumPodIPPool, error) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v2alpha1.CiliumPodIPPool, error) - List(ctx context.Context, opts v1.ListOptions) (*v2alpha1.CiliumPodIPPoolList, error) + Get(ctx context.Context, name string, opts v1.GetOptions) (*ciliumiov2alpha1.CiliumPodIPPool, error) + List(ctx context.Context, opts v1.ListOptions) (*ciliumiov2alpha1.CiliumPodIPPoolList, error) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumPodIPPool, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *ciliumiov2alpha1.CiliumPodIPPool, err error) CiliumPodIPPoolExpansion } // ciliumPodIPPools implements CiliumPodIPPoolInterface type ciliumPodIPPools struct { - client rest.Interface + *gentype.ClientWithList[*ciliumiov2alpha1.CiliumPodIPPool, *ciliumiov2alpha1.CiliumPodIPPoolList] } // newCiliumPodIPPools returns a CiliumPodIPPools func newCiliumPodIPPools(c *CiliumV2alpha1Client) *ciliumPodIPPools { return &ciliumPodIPPools{ - client: c.RESTClient(), + gentype.NewClientWithList[*ciliumiov2alpha1.CiliumPodIPPool, *ciliumiov2alpha1.CiliumPodIPPoolList]( + "ciliumpodippools", + c.RESTClient(), + scheme.ParameterCodec, + "", + func() *ciliumiov2alpha1.CiliumPodIPPool { return &ciliumiov2alpha1.CiliumPodIPPool{} }, + func() *ciliumiov2alpha1.CiliumPodIPPoolList { return &ciliumiov2alpha1.CiliumPodIPPoolList{} }, + ), } } - -// Get takes name of the ciliumPodIPPool, and returns the corresponding ciliumPodIPPool object, and an error if there is any. -func (c *ciliumPodIPPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v2alpha1.CiliumPodIPPool, err error) { - result = &v2alpha1.CiliumPodIPPool{} - err = c.client.Get(). - Resource("ciliumpodippools"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CiliumPodIPPools that match those selectors. -func (c *ciliumPodIPPools) List(ctx context.Context, opts v1.ListOptions) (result *v2alpha1.CiliumPodIPPoolList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v2alpha1.CiliumPodIPPoolList{} - err = c.client.Get(). - Resource("ciliumpodippools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ciliumPodIPPools. -func (c *ciliumPodIPPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("ciliumpodippools"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a ciliumPodIPPool and creates it. Returns the server's representation of the ciliumPodIPPool, and an error, if there is any. -func (c *ciliumPodIPPools) Create(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.CreateOptions) (result *v2alpha1.CiliumPodIPPool, err error) { - result = &v2alpha1.CiliumPodIPPool{} - err = c.client.Post(). - Resource("ciliumpodippools"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumPodIPPool). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a ciliumPodIPPool and updates it. Returns the server's representation of the ciliumPodIPPool, and an error, if there is any. -func (c *ciliumPodIPPools) Update(ctx context.Context, ciliumPodIPPool *v2alpha1.CiliumPodIPPool, opts v1.UpdateOptions) (result *v2alpha1.CiliumPodIPPool, err error) { - result = &v2alpha1.CiliumPodIPPool{} - err = c.client.Put(). - Resource("ciliumpodippools"). - Name(ciliumPodIPPool.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(ciliumPodIPPool). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the ciliumPodIPPool and deletes it. Returns an error if one occurs. -func (c *ciliumPodIPPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("ciliumpodippools"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ciliumPodIPPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("ciliumpodippools"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched ciliumPodIPPool. -func (c *ciliumPodIPPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v2alpha1.CiliumPodIPPool, err error) { - result = &v2alpha1.CiliumPodIPPool{} - err = c.client.Patch(pt). - Resource("ciliumpodippools"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go index 7dc4b1076c..4b25b32bb8 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/generated_expansion.go @@ -5,6 +5,16 @@ package v2alpha1 +type CiliumBGPAdvertisementExpansion interface{} + +type CiliumBGPClusterConfigExpansion interface{} + +type CiliumBGPNodeConfigExpansion interface{} + +type CiliumBGPNodeConfigOverrideExpansion interface{} + +type CiliumBGPPeerConfigExpansion interface{} + type CiliumBGPPeeringPolicyExpansion interface{} type CiliumCIDRGroupExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumclusterwideenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumclusterwideenvoyconfig.go index ab11f76feb..a0a66bbca1 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumclusterwideenvoyconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumclusterwideenvoyconfig.go @@ -6,13 +6,13 @@ package v2 import ( - "context" + context "context" time "time" - ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumClusterwideEnvoyConfigs. type CiliumClusterwideEnvoyConfigInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.CiliumClusterwideEnvoyConfigLister + Lister() ciliumiov2.CiliumClusterwideEnvoyConfigLister } type ciliumClusterwideEnvoyConfigInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumClusterwideEnvoyConfigInformer(client versioned.Interface, return client.CiliumV2().CiliumClusterwideEnvoyConfigs().Watch(context.TODO(), options) }, }, - &ciliumiov2.CiliumClusterwideEnvoyConfig{}, + &apisciliumiov2.CiliumClusterwideEnvoyConfig{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumClusterwideEnvoyConfigInformer) defaultInformer(client versioned. } func (f *ciliumClusterwideEnvoyConfigInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2.CiliumClusterwideEnvoyConfig{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2.CiliumClusterwideEnvoyConfig{}, f.defaultInformer) } -func (f *ciliumClusterwideEnvoyConfigInformer) Lister() v2.CiliumClusterwideEnvoyConfigLister { - return v2.NewCiliumClusterwideEnvoyConfigLister(f.Informer().GetIndexer()) +func (f *ciliumClusterwideEnvoyConfigInformer) Lister() ciliumiov2.CiliumClusterwideEnvoyConfigLister { + return ciliumiov2.NewCiliumClusterwideEnvoyConfigLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumclusterwidenetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumclusterwidenetworkpolicy.go index 68db1b7413..60a32547ea 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumclusterwidenetworkpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumclusterwidenetworkpolicy.go @@ -6,13 +6,13 @@ package v2 import ( - "context" + context "context" time "time" - ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumClusterwideNetworkPolicies. type CiliumClusterwideNetworkPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.CiliumClusterwideNetworkPolicyLister + Lister() ciliumiov2.CiliumClusterwideNetworkPolicyLister } type ciliumClusterwideNetworkPolicyInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumClusterwideNetworkPolicyInformer(client versioned.Interfac return client.CiliumV2().CiliumClusterwideNetworkPolicies().Watch(context.TODO(), options) }, }, - &ciliumiov2.CiliumClusterwideNetworkPolicy{}, + &apisciliumiov2.CiliumClusterwideNetworkPolicy{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumClusterwideNetworkPolicyInformer) defaultInformer(client versione } func (f *ciliumClusterwideNetworkPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2.CiliumClusterwideNetworkPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2.CiliumClusterwideNetworkPolicy{}, f.defaultInformer) } -func (f *ciliumClusterwideNetworkPolicyInformer) Lister() v2.CiliumClusterwideNetworkPolicyLister { - return v2.NewCiliumClusterwideNetworkPolicyLister(f.Informer().GetIndexer()) +func (f *ciliumClusterwideNetworkPolicyInformer) Lister() ciliumiov2.CiliumClusterwideNetworkPolicyLister { + return ciliumiov2.NewCiliumClusterwideNetworkPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumegressgatewaypolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumegressgatewaypolicy.go index 0fc551c08c..0a60eda9d8 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumegressgatewaypolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumegressgatewaypolicy.go @@ -6,13 +6,13 @@ package v2 import ( - "context" + context "context" time "time" - ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumEgressGatewayPolicies. type CiliumEgressGatewayPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.CiliumEgressGatewayPolicyLister + Lister() ciliumiov2.CiliumEgressGatewayPolicyLister } type ciliumEgressGatewayPolicyInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumEgressGatewayPolicyInformer(client versioned.Interface, re return client.CiliumV2().CiliumEgressGatewayPolicies().Watch(context.TODO(), options) }, }, - &ciliumiov2.CiliumEgressGatewayPolicy{}, + &apisciliumiov2.CiliumEgressGatewayPolicy{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumEgressGatewayPolicyInformer) defaultInformer(client versioned.Int } func (f *ciliumEgressGatewayPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2.CiliumEgressGatewayPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2.CiliumEgressGatewayPolicy{}, f.defaultInformer) } -func (f *ciliumEgressGatewayPolicyInformer) Lister() v2.CiliumEgressGatewayPolicyLister { - return v2.NewCiliumEgressGatewayPolicyLister(f.Informer().GetIndexer()) +func (f *ciliumEgressGatewayPolicyInformer) Lister() ciliumiov2.CiliumEgressGatewayPolicyLister { + return ciliumiov2.NewCiliumEgressGatewayPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumendpoint.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumendpoint.go index 6c4aae1657..e754ad71cb 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumendpoint.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumendpoint.go @@ -6,13 +6,13 @@ package v2 import ( - "context" + context "context" time "time" - ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumEndpoints. type CiliumEndpointInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.CiliumEndpointLister + Lister() ciliumiov2.CiliumEndpointLister } type ciliumEndpointInformer struct { @@ -58,7 +58,7 @@ func NewFilteredCiliumEndpointInformer(client versioned.Interface, namespace str return client.CiliumV2().CiliumEndpoints(namespace).Watch(context.TODO(), options) }, }, - &ciliumiov2.CiliumEndpoint{}, + &apisciliumiov2.CiliumEndpoint{}, resyncPeriod, indexers, ) @@ -69,9 +69,9 @@ func (f *ciliumEndpointInformer) defaultInformer(client versioned.Interface, res } func (f *ciliumEndpointInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2.CiliumEndpoint{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2.CiliumEndpoint{}, f.defaultInformer) } -func (f *ciliumEndpointInformer) Lister() v2.CiliumEndpointLister { - return v2.NewCiliumEndpointLister(f.Informer().GetIndexer()) +func (f *ciliumEndpointInformer) Lister() ciliumiov2.CiliumEndpointLister { + return ciliumiov2.NewCiliumEndpointLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumenvoyconfig.go index 9d2b06ddc9..03883a2148 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumenvoyconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumenvoyconfig.go @@ -6,13 +6,13 @@ package v2 import ( - "context" + context "context" time "time" - ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumEnvoyConfigs. type CiliumEnvoyConfigInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.CiliumEnvoyConfigLister + Lister() ciliumiov2.CiliumEnvoyConfigLister } type ciliumEnvoyConfigInformer struct { @@ -58,7 +58,7 @@ func NewFilteredCiliumEnvoyConfigInformer(client versioned.Interface, namespace return client.CiliumV2().CiliumEnvoyConfigs(namespace).Watch(context.TODO(), options) }, }, - &ciliumiov2.CiliumEnvoyConfig{}, + &apisciliumiov2.CiliumEnvoyConfig{}, resyncPeriod, indexers, ) @@ -69,9 +69,9 @@ func (f *ciliumEnvoyConfigInformer) defaultInformer(client versioned.Interface, } func (f *ciliumEnvoyConfigInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2.CiliumEnvoyConfig{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2.CiliumEnvoyConfig{}, f.defaultInformer) } -func (f *ciliumEnvoyConfigInformer) Lister() v2.CiliumEnvoyConfigLister { - return v2.NewCiliumEnvoyConfigLister(f.Informer().GetIndexer()) +func (f *ciliumEnvoyConfigInformer) Lister() ciliumiov2.CiliumEnvoyConfigLister { + return ciliumiov2.NewCiliumEnvoyConfigLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumexternalworkload.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumexternalworkload.go index 39755c67d2..06a682179a 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumexternalworkload.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumexternalworkload.go @@ -6,13 +6,13 @@ package v2 import ( - "context" + context "context" time "time" - ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumExternalWorkloads. type CiliumExternalWorkloadInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.CiliumExternalWorkloadLister + Lister() ciliumiov2.CiliumExternalWorkloadLister } type ciliumExternalWorkloadInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumExternalWorkloadInformer(client versioned.Interface, resyn return client.CiliumV2().CiliumExternalWorkloads().Watch(context.TODO(), options) }, }, - &ciliumiov2.CiliumExternalWorkload{}, + &apisciliumiov2.CiliumExternalWorkload{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumExternalWorkloadInformer) defaultInformer(client versioned.Interf } func (f *ciliumExternalWorkloadInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2.CiliumExternalWorkload{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2.CiliumExternalWorkload{}, f.defaultInformer) } -func (f *ciliumExternalWorkloadInformer) Lister() v2.CiliumExternalWorkloadLister { - return v2.NewCiliumExternalWorkloadLister(f.Informer().GetIndexer()) +func (f *ciliumExternalWorkloadInformer) Lister() ciliumiov2.CiliumExternalWorkloadLister { + return ciliumiov2.NewCiliumExternalWorkloadLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumidentity.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumidentity.go index 4d09241937..e15c251adb 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumidentity.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumidentity.go @@ -6,13 +6,13 @@ package v2 import ( - "context" + context "context" time "time" - ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumIdentities. type CiliumIdentityInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.CiliumIdentityLister + Lister() ciliumiov2.CiliumIdentityLister } type ciliumIdentityInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumIdentityInformer(client versioned.Interface, resyncPeriod return client.CiliumV2().CiliumIdentities().Watch(context.TODO(), options) }, }, - &ciliumiov2.CiliumIdentity{}, + &apisciliumiov2.CiliumIdentity{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumIdentityInformer) defaultInformer(client versioned.Interface, res } func (f *ciliumIdentityInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2.CiliumIdentity{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2.CiliumIdentity{}, f.defaultInformer) } -func (f *ciliumIdentityInformer) Lister() v2.CiliumIdentityLister { - return v2.NewCiliumIdentityLister(f.Informer().GetIndexer()) +func (f *ciliumIdentityInformer) Lister() ciliumiov2.CiliumIdentityLister { + return ciliumiov2.NewCiliumIdentityLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumlocalredirectpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumlocalredirectpolicy.go index 23c7bd60c9..f9913fe052 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumlocalredirectpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumlocalredirectpolicy.go @@ -6,13 +6,13 @@ package v2 import ( - "context" + context "context" time "time" - ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumLocalRedirectPolicies. type CiliumLocalRedirectPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.CiliumLocalRedirectPolicyLister + Lister() ciliumiov2.CiliumLocalRedirectPolicyLister } type ciliumLocalRedirectPolicyInformer struct { @@ -58,7 +58,7 @@ func NewFilteredCiliumLocalRedirectPolicyInformer(client versioned.Interface, na return client.CiliumV2().CiliumLocalRedirectPolicies(namespace).Watch(context.TODO(), options) }, }, - &ciliumiov2.CiliumLocalRedirectPolicy{}, + &apisciliumiov2.CiliumLocalRedirectPolicy{}, resyncPeriod, indexers, ) @@ -69,9 +69,9 @@ func (f *ciliumLocalRedirectPolicyInformer) defaultInformer(client versioned.Int } func (f *ciliumLocalRedirectPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2.CiliumLocalRedirectPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2.CiliumLocalRedirectPolicy{}, f.defaultInformer) } -func (f *ciliumLocalRedirectPolicyInformer) Lister() v2.CiliumLocalRedirectPolicyLister { - return v2.NewCiliumLocalRedirectPolicyLister(f.Informer().GetIndexer()) +func (f *ciliumLocalRedirectPolicyInformer) Lister() ciliumiov2.CiliumLocalRedirectPolicyLister { + return ciliumiov2.NewCiliumLocalRedirectPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnetworkpolicy.go index 84812f67d2..b011c4fc76 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnetworkpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnetworkpolicy.go @@ -6,13 +6,13 @@ package v2 import ( - "context" + context "context" time "time" - ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumNetworkPolicies. type CiliumNetworkPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.CiliumNetworkPolicyLister + Lister() ciliumiov2.CiliumNetworkPolicyLister } type ciliumNetworkPolicyInformer struct { @@ -58,7 +58,7 @@ func NewFilteredCiliumNetworkPolicyInformer(client versioned.Interface, namespac return client.CiliumV2().CiliumNetworkPolicies(namespace).Watch(context.TODO(), options) }, }, - &ciliumiov2.CiliumNetworkPolicy{}, + &apisciliumiov2.CiliumNetworkPolicy{}, resyncPeriod, indexers, ) @@ -69,9 +69,9 @@ func (f *ciliumNetworkPolicyInformer) defaultInformer(client versioned.Interface } func (f *ciliumNetworkPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2.CiliumNetworkPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2.CiliumNetworkPolicy{}, f.defaultInformer) } -func (f *ciliumNetworkPolicyInformer) Lister() v2.CiliumNetworkPolicyLister { - return v2.NewCiliumNetworkPolicyLister(f.Informer().GetIndexer()) +func (f *ciliumNetworkPolicyInformer) Lister() ciliumiov2.CiliumNetworkPolicyLister { + return ciliumiov2.NewCiliumNetworkPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnode.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnode.go index c040b33069..b5b3315f29 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnode.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnode.go @@ -6,13 +6,13 @@ package v2 import ( - "context" + context "context" time "time" - ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumNodes. type CiliumNodeInformer interface { Informer() cache.SharedIndexInformer - Lister() v2.CiliumNodeLister + Lister() ciliumiov2.CiliumNodeLister } type ciliumNodeInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumNodeInformer(client versioned.Interface, resyncPeriod time return client.CiliumV2().CiliumNodes().Watch(context.TODO(), options) }, }, - &ciliumiov2.CiliumNode{}, + &apisciliumiov2.CiliumNode{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumNodeInformer) defaultInformer(client versioned.Interface, resyncP } func (f *ciliumNodeInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2.CiliumNode{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2.CiliumNode{}, f.defaultInformer) } -func (f *ciliumNodeInformer) Lister() v2.CiliumNodeLister { - return v2.NewCiliumNodeLister(f.Informer().GetIndexer()) +func (f *ciliumNodeInformer) Lister() ciliumiov2.CiliumNodeLister { + return ciliumiov2.NewCiliumNodeLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnodeconfig.go new file mode 100644 index 0000000000..3d589a48f2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/ciliumnodeconfig.go @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2 + +import ( + context "context" + time "time" + + apisciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumNodeConfigInformer provides access to a shared informer and lister for +// CiliumNodeConfigs. +type CiliumNodeConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() ciliumiov2.CiliumNodeConfigLister +} + +type ciliumNodeConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCiliumNodeConfigInformer constructs a new informer for CiliumNodeConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumNodeConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumNodeConfigInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumNodeConfigInformer constructs a new informer for CiliumNodeConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumNodeConfigInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2().CiliumNodeConfigs(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2().CiliumNodeConfigs(namespace).Watch(context.TODO(), options) + }, + }, + &apisciliumiov2.CiliumNodeConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumNodeConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumNodeConfigInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumNodeConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apisciliumiov2.CiliumNodeConfig{}, f.defaultInformer) +} + +func (f *ciliumNodeConfigInformer) Lister() ciliumiov2.CiliumNodeConfigLister { + return ciliumiov2.NewCiliumNodeConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/interface.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/interface.go index 7a0247d9b2..b169b8fe49 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/interface.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2/interface.go @@ -31,6 +31,8 @@ type Interface interface { CiliumNetworkPolicies() CiliumNetworkPolicyInformer // CiliumNodes returns a CiliumNodeInformer. CiliumNodes() CiliumNodeInformer + // CiliumNodeConfigs returns a CiliumNodeConfigInformer. + CiliumNodeConfigs() CiliumNodeConfigInformer } type version struct { @@ -93,3 +95,8 @@ func (v *version) CiliumNetworkPolicies() CiliumNetworkPolicyInformer { func (v *version) CiliumNodes() CiliumNodeInformer { return &ciliumNodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } + +// CiliumNodeConfigs returns a CiliumNodeConfigInformer. +func (v *version) CiliumNodeConfigs() CiliumNodeConfigInformer { + return &ciliumNodeConfigInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpadvertisement.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpadvertisement.go new file mode 100644 index 0000000000..ffcbede089 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpadvertisement.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + time "time" + + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPAdvertisementInformer provides access to a shared informer and lister for +// CiliumBGPAdvertisements. +type CiliumBGPAdvertisementInformer interface { + Informer() cache.SharedIndexInformer + Lister() ciliumiov2alpha1.CiliumBGPAdvertisementLister +} + +type ciliumBGPAdvertisementInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumBGPAdvertisementInformer constructs a new informer for CiliumBGPAdvertisement type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumBGPAdvertisementInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumBGPAdvertisementInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumBGPAdvertisementInformer constructs a new informer for CiliumBGPAdvertisement type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumBGPAdvertisementInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPAdvertisements().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPAdvertisements().Watch(context.TODO(), options) + }, + }, + &apisciliumiov2alpha1.CiliumBGPAdvertisement{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumBGPAdvertisementInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumBGPAdvertisementInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumBGPAdvertisementInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumBGPAdvertisement{}, f.defaultInformer) +} + +func (f *ciliumBGPAdvertisementInformer) Lister() ciliumiov2alpha1.CiliumBGPAdvertisementLister { + return ciliumiov2alpha1.NewCiliumBGPAdvertisementLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpclusterconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpclusterconfig.go new file mode 100644 index 0000000000..5de8c28386 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpclusterconfig.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + time "time" + + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPClusterConfigInformer provides access to a shared informer and lister for +// CiliumBGPClusterConfigs. +type CiliumBGPClusterConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() ciliumiov2alpha1.CiliumBGPClusterConfigLister +} + +type ciliumBGPClusterConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumBGPClusterConfigInformer constructs a new informer for CiliumBGPClusterConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumBGPClusterConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumBGPClusterConfigInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumBGPClusterConfigInformer constructs a new informer for CiliumBGPClusterConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumBGPClusterConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPClusterConfigs().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPClusterConfigs().Watch(context.TODO(), options) + }, + }, + &apisciliumiov2alpha1.CiliumBGPClusterConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumBGPClusterConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumBGPClusterConfigInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumBGPClusterConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumBGPClusterConfig{}, f.defaultInformer) +} + +func (f *ciliumBGPClusterConfigInformer) Lister() ciliumiov2alpha1.CiliumBGPClusterConfigLister { + return ciliumiov2alpha1.NewCiliumBGPClusterConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfig.go new file mode 100644 index 0000000000..807e6da8d5 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfig.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + time "time" + + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPNodeConfigInformer provides access to a shared informer and lister for +// CiliumBGPNodeConfigs. +type CiliumBGPNodeConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() ciliumiov2alpha1.CiliumBGPNodeConfigLister +} + +type ciliumBGPNodeConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumBGPNodeConfigInformer constructs a new informer for CiliumBGPNodeConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumBGPNodeConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumBGPNodeConfigInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumBGPNodeConfigInformer constructs a new informer for CiliumBGPNodeConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumBGPNodeConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPNodeConfigs().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPNodeConfigs().Watch(context.TODO(), options) + }, + }, + &apisciliumiov2alpha1.CiliumBGPNodeConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumBGPNodeConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumBGPNodeConfigInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumBGPNodeConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumBGPNodeConfig{}, f.defaultInformer) +} + +func (f *ciliumBGPNodeConfigInformer) Lister() ciliumiov2alpha1.CiliumBGPNodeConfigLister { + return ciliumiov2alpha1.NewCiliumBGPNodeConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go new file mode 100644 index 0000000000..e9ae86f54a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + time "time" + + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPNodeConfigOverrideInformer provides access to a shared informer and lister for +// CiliumBGPNodeConfigOverrides. +type CiliumBGPNodeConfigOverrideInformer interface { + Informer() cache.SharedIndexInformer + Lister() ciliumiov2alpha1.CiliumBGPNodeConfigOverrideLister +} + +type ciliumBGPNodeConfigOverrideInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumBGPNodeConfigOverrideInformer constructs a new informer for CiliumBGPNodeConfigOverride type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumBGPNodeConfigOverrideInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumBGPNodeConfigOverrideInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumBGPNodeConfigOverrideInformer constructs a new informer for CiliumBGPNodeConfigOverride type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumBGPNodeConfigOverrideInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPNodeConfigOverrides().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPNodeConfigOverrides().Watch(context.TODO(), options) + }, + }, + &apisciliumiov2alpha1.CiliumBGPNodeConfigOverride{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumBGPNodeConfigOverrideInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumBGPNodeConfigOverrideInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumBGPNodeConfigOverrideInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumBGPNodeConfigOverride{}, f.defaultInformer) +} + +func (f *ciliumBGPNodeConfigOverrideInformer) Lister() ciliumiov2alpha1.CiliumBGPNodeConfigOverrideLister { + return ciliumiov2alpha1.NewCiliumBGPNodeConfigOverrideLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeerconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeerconfig.go new file mode 100644 index 0000000000..0a103c1f5c --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeerconfig.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by informer-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + context "context" + time "time" + + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" + internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPPeerConfigInformer provides access to a shared informer and lister for +// CiliumBGPPeerConfigs. +type CiliumBGPPeerConfigInformer interface { + Informer() cache.SharedIndexInformer + Lister() ciliumiov2alpha1.CiliumBGPPeerConfigLister +} + +type ciliumBGPPeerConfigInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCiliumBGPPeerConfigInformer constructs a new informer for CiliumBGPPeerConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCiliumBGPPeerConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCiliumBGPPeerConfigInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCiliumBGPPeerConfigInformer constructs a new informer for CiliumBGPPeerConfig type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCiliumBGPPeerConfigInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPPeerConfigs().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CiliumV2alpha1().CiliumBGPPeerConfigs().Watch(context.TODO(), options) + }, + }, + &apisciliumiov2alpha1.CiliumBGPPeerConfig{}, + resyncPeriod, + indexers, + ) +} + +func (f *ciliumBGPPeerConfigInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCiliumBGPPeerConfigInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ciliumBGPPeerConfigInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumBGPPeerConfig{}, f.defaultInformer) +} + +func (f *ciliumBGPPeerConfigInformer) Lister() ciliumiov2alpha1.CiliumBGPPeerConfigLister { + return ciliumiov2alpha1.NewCiliumBGPPeerConfigLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go index d046ac8d3a..bdd7dca058 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go @@ -6,13 +6,13 @@ package v2alpha1 import ( - "context" + context "context" time "time" - ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumBGPPeeringPolicies. type CiliumBGPPeeringPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v2alpha1.CiliumBGPPeeringPolicyLister + Lister() ciliumiov2alpha1.CiliumBGPPeeringPolicyLister } type ciliumBGPPeeringPolicyInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumBGPPeeringPolicyInformer(client versioned.Interface, resyn return client.CiliumV2alpha1().CiliumBGPPeeringPolicies().Watch(context.TODO(), options) }, }, - &ciliumiov2alpha1.CiliumBGPPeeringPolicy{}, + &apisciliumiov2alpha1.CiliumBGPPeeringPolicy{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumBGPPeeringPolicyInformer) defaultInformer(client versioned.Interf } func (f *ciliumBGPPeeringPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2alpha1.CiliumBGPPeeringPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumBGPPeeringPolicy{}, f.defaultInformer) } -func (f *ciliumBGPPeeringPolicyInformer) Lister() v2alpha1.CiliumBGPPeeringPolicyLister { - return v2alpha1.NewCiliumBGPPeeringPolicyLister(f.Informer().GetIndexer()) +func (f *ciliumBGPPeeringPolicyInformer) Lister() ciliumiov2alpha1.CiliumBGPPeeringPolicyLister { + return ciliumiov2alpha1.NewCiliumBGPPeeringPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumcidrgroup.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumcidrgroup.go index 94d6c59b5e..9b7576a0d9 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumcidrgroup.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumcidrgroup.go @@ -6,13 +6,13 @@ package v2alpha1 import ( - "context" + context "context" time "time" - ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumCIDRGroups. type CiliumCIDRGroupInformer interface { Informer() cache.SharedIndexInformer - Lister() v2alpha1.CiliumCIDRGroupLister + Lister() ciliumiov2alpha1.CiliumCIDRGroupLister } type ciliumCIDRGroupInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumCIDRGroupInformer(client versioned.Interface, resyncPeriod return client.CiliumV2alpha1().CiliumCIDRGroups().Watch(context.TODO(), options) }, }, - &ciliumiov2alpha1.CiliumCIDRGroup{}, + &apisciliumiov2alpha1.CiliumCIDRGroup{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumCIDRGroupInformer) defaultInformer(client versioned.Interface, re } func (f *ciliumCIDRGroupInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2alpha1.CiliumCIDRGroup{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumCIDRGroup{}, f.defaultInformer) } -func (f *ciliumCIDRGroupInformer) Lister() v2alpha1.CiliumCIDRGroupLister { - return v2alpha1.NewCiliumCIDRGroupLister(f.Informer().GetIndexer()) +func (f *ciliumCIDRGroupInformer) Lister() ciliumiov2alpha1.CiliumCIDRGroupLister { + return ciliumiov2alpha1.NewCiliumCIDRGroupLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumendpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumendpointslice.go index 0c2768da10..ab657d6d6e 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumendpointslice.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumendpointslice.go @@ -6,13 +6,13 @@ package v2alpha1 import ( - "context" + context "context" time "time" - ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumEndpointSlices. type CiliumEndpointSliceInformer interface { Informer() cache.SharedIndexInformer - Lister() v2alpha1.CiliumEndpointSliceLister + Lister() ciliumiov2alpha1.CiliumEndpointSliceLister } type ciliumEndpointSliceInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumEndpointSliceInformer(client versioned.Interface, resyncPe return client.CiliumV2alpha1().CiliumEndpointSlices().Watch(context.TODO(), options) }, }, - &ciliumiov2alpha1.CiliumEndpointSlice{}, + &apisciliumiov2alpha1.CiliumEndpointSlice{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumEndpointSliceInformer) defaultInformer(client versioned.Interface } func (f *ciliumEndpointSliceInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2alpha1.CiliumEndpointSlice{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumEndpointSlice{}, f.defaultInformer) } -func (f *ciliumEndpointSliceInformer) Lister() v2alpha1.CiliumEndpointSliceLister { - return v2alpha1.NewCiliumEndpointSliceLister(f.Informer().GetIndexer()) +func (f *ciliumEndpointSliceInformer) Lister() ciliumiov2alpha1.CiliumEndpointSliceLister { + return ciliumiov2alpha1.NewCiliumEndpointSliceLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliuml2announcementpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliuml2announcementpolicy.go index 932096e554..88ad3b1e82 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliuml2announcementpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliuml2announcementpolicy.go @@ -6,13 +6,13 @@ package v2alpha1 import ( - "context" + context "context" time "time" - ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumL2AnnouncementPolicies. type CiliumL2AnnouncementPolicyInformer interface { Informer() cache.SharedIndexInformer - Lister() v2alpha1.CiliumL2AnnouncementPolicyLister + Lister() ciliumiov2alpha1.CiliumL2AnnouncementPolicyLister } type ciliumL2AnnouncementPolicyInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumL2AnnouncementPolicyInformer(client versioned.Interface, r return client.CiliumV2alpha1().CiliumL2AnnouncementPolicies().Watch(context.TODO(), options) }, }, - &ciliumiov2alpha1.CiliumL2AnnouncementPolicy{}, + &apisciliumiov2alpha1.CiliumL2AnnouncementPolicy{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumL2AnnouncementPolicyInformer) defaultInformer(client versioned.In } func (f *ciliumL2AnnouncementPolicyInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2alpha1.CiliumL2AnnouncementPolicy{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumL2AnnouncementPolicy{}, f.defaultInformer) } -func (f *ciliumL2AnnouncementPolicyInformer) Lister() v2alpha1.CiliumL2AnnouncementPolicyLister { - return v2alpha1.NewCiliumL2AnnouncementPolicyLister(f.Informer().GetIndexer()) +func (f *ciliumL2AnnouncementPolicyInformer) Lister() ciliumiov2alpha1.CiliumL2AnnouncementPolicyLister { + return ciliumiov2alpha1.NewCiliumL2AnnouncementPolicyLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumloadbalancerippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumloadbalancerippool.go index 122ef626c5..303a37bd35 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumloadbalancerippool.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumloadbalancerippool.go @@ -6,13 +6,13 @@ package v2alpha1 import ( - "context" + context "context" time "time" - ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumLoadBalancerIPPools. type CiliumLoadBalancerIPPoolInformer interface { Informer() cache.SharedIndexInformer - Lister() v2alpha1.CiliumLoadBalancerIPPoolLister + Lister() ciliumiov2alpha1.CiliumLoadBalancerIPPoolLister } type ciliumLoadBalancerIPPoolInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumLoadBalancerIPPoolInformer(client versioned.Interface, res return client.CiliumV2alpha1().CiliumLoadBalancerIPPools().Watch(context.TODO(), options) }, }, - &ciliumiov2alpha1.CiliumLoadBalancerIPPool{}, + &apisciliumiov2alpha1.CiliumLoadBalancerIPPool{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumLoadBalancerIPPoolInformer) defaultInformer(client versioned.Inte } func (f *ciliumLoadBalancerIPPoolInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2alpha1.CiliumLoadBalancerIPPool{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumLoadBalancerIPPool{}, f.defaultInformer) } -func (f *ciliumLoadBalancerIPPoolInformer) Lister() v2alpha1.CiliumLoadBalancerIPPoolLister { - return v2alpha1.NewCiliumLoadBalancerIPPoolLister(f.Informer().GetIndexer()) +func (f *ciliumLoadBalancerIPPoolInformer) Lister() ciliumiov2alpha1.CiliumLoadBalancerIPPoolLister { + return ciliumiov2alpha1.NewCiliumLoadBalancerIPPoolLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumnodeconfig.go index 25a605ddc0..e1cde017d4 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumnodeconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumnodeconfig.go @@ -6,13 +6,13 @@ package v2alpha1 import ( - "context" + context "context" time "time" - ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumNodeConfigs. type CiliumNodeConfigInformer interface { Informer() cache.SharedIndexInformer - Lister() v2alpha1.CiliumNodeConfigLister + Lister() ciliumiov2alpha1.CiliumNodeConfigLister } type ciliumNodeConfigInformer struct { @@ -58,7 +58,7 @@ func NewFilteredCiliumNodeConfigInformer(client versioned.Interface, namespace s return client.CiliumV2alpha1().CiliumNodeConfigs(namespace).Watch(context.TODO(), options) }, }, - &ciliumiov2alpha1.CiliumNodeConfig{}, + &apisciliumiov2alpha1.CiliumNodeConfig{}, resyncPeriod, indexers, ) @@ -69,9 +69,9 @@ func (f *ciliumNodeConfigInformer) defaultInformer(client versioned.Interface, r } func (f *ciliumNodeConfigInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2alpha1.CiliumNodeConfig{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumNodeConfig{}, f.defaultInformer) } -func (f *ciliumNodeConfigInformer) Lister() v2alpha1.CiliumNodeConfigLister { - return v2alpha1.NewCiliumNodeConfigLister(f.Informer().GetIndexer()) +func (f *ciliumNodeConfigInformer) Lister() ciliumiov2alpha1.CiliumNodeConfigLister { + return ciliumiov2alpha1.NewCiliumNodeConfigLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumpodippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumpodippool.go index 4995488207..751365f078 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumpodippool.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/ciliumpodippool.go @@ -6,13 +6,13 @@ package v2alpha1 import ( - "context" + context "context" time "time" - ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + apisciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" versioned "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned" internalinterfaces "github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/internalinterfaces" - v2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" @@ -23,7 +23,7 @@ import ( // CiliumPodIPPools. type CiliumPodIPPoolInformer interface { Informer() cache.SharedIndexInformer - Lister() v2alpha1.CiliumPodIPPoolLister + Lister() ciliumiov2alpha1.CiliumPodIPPoolLister } type ciliumPodIPPoolInformer struct { @@ -57,7 +57,7 @@ func NewFilteredCiliumPodIPPoolInformer(client versioned.Interface, resyncPeriod return client.CiliumV2alpha1().CiliumPodIPPools().Watch(context.TODO(), options) }, }, - &ciliumiov2alpha1.CiliumPodIPPool{}, + &apisciliumiov2alpha1.CiliumPodIPPool{}, resyncPeriod, indexers, ) @@ -68,9 +68,9 @@ func (f *ciliumPodIPPoolInformer) defaultInformer(client versioned.Interface, re } func (f *ciliumPodIPPoolInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&ciliumiov2alpha1.CiliumPodIPPool{}, f.defaultInformer) + return f.factory.InformerFor(&apisciliumiov2alpha1.CiliumPodIPPool{}, f.defaultInformer) } -func (f *ciliumPodIPPoolInformer) Lister() v2alpha1.CiliumPodIPPoolLister { - return v2alpha1.NewCiliumPodIPPoolLister(f.Informer().GetIndexer()) +func (f *ciliumPodIPPoolInformer) Lister() ciliumiov2alpha1.CiliumPodIPPoolLister { + return ciliumiov2alpha1.NewCiliumPodIPPoolLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go index 0301a05ac0..5d508bcbc2 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/cilium.io/v2alpha1/interface.go @@ -11,6 +11,16 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // CiliumBGPAdvertisements returns a CiliumBGPAdvertisementInformer. + CiliumBGPAdvertisements() CiliumBGPAdvertisementInformer + // CiliumBGPClusterConfigs returns a CiliumBGPClusterConfigInformer. + CiliumBGPClusterConfigs() CiliumBGPClusterConfigInformer + // CiliumBGPNodeConfigs returns a CiliumBGPNodeConfigInformer. + CiliumBGPNodeConfigs() CiliumBGPNodeConfigInformer + // CiliumBGPNodeConfigOverrides returns a CiliumBGPNodeConfigOverrideInformer. + CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInformer + // CiliumBGPPeerConfigs returns a CiliumBGPPeerConfigInformer. + CiliumBGPPeerConfigs() CiliumBGPPeerConfigInformer // CiliumBGPPeeringPolicies returns a CiliumBGPPeeringPolicyInformer. CiliumBGPPeeringPolicies() CiliumBGPPeeringPolicyInformer // CiliumCIDRGroups returns a CiliumCIDRGroupInformer. @@ -38,6 +48,31 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// CiliumBGPAdvertisements returns a CiliumBGPAdvertisementInformer. +func (v *version) CiliumBGPAdvertisements() CiliumBGPAdvertisementInformer { + return &ciliumBGPAdvertisementInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// CiliumBGPClusterConfigs returns a CiliumBGPClusterConfigInformer. +func (v *version) CiliumBGPClusterConfigs() CiliumBGPClusterConfigInformer { + return &ciliumBGPClusterConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// CiliumBGPNodeConfigs returns a CiliumBGPNodeConfigInformer. +func (v *version) CiliumBGPNodeConfigs() CiliumBGPNodeConfigInformer { + return &ciliumBGPNodeConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// CiliumBGPNodeConfigOverrides returns a CiliumBGPNodeConfigOverrideInformer. +func (v *version) CiliumBGPNodeConfigOverrides() CiliumBGPNodeConfigOverrideInformer { + return &ciliumBGPNodeConfigOverrideInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// CiliumBGPPeerConfigs returns a CiliumBGPPeerConfigInformer. +func (v *version) CiliumBGPPeerConfigs() CiliumBGPPeerConfigInformer { + return &ciliumBGPPeerConfigInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // CiliumBGPPeeringPolicies returns a CiliumBGPPeeringPolicyInformer. func (v *version) CiliumBGPPeeringPolicies() CiliumBGPPeeringPolicyInformer { return &ciliumBGPPeeringPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/factory.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/factory.go index 21a6ed6b58..75b30f9342 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/factory.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/factory.go @@ -29,6 +29,7 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration + transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. @@ -67,6 +68,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -153,7 +162,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -171,6 +180,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer @@ -205,6 +215,7 @@ type SharedInformerFactory interface { // Start initializes all requested informers. They are handled in goroutines // which run until the stop channel gets closed. + // Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync. Start(stopCh <-chan struct{}) // Shutdown marks a factory as shutting down. At that point no new @@ -226,7 +237,7 @@ type SharedInformerFactory interface { // ForResource gives generic access to a shared informer of the matching type. ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - // InternalInformerFor returns the SharedIndexInformer for obj using an internal + // InformerFor returns the SharedIndexInformer for obj using an internal // client. InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/generic.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/generic.go index 7ed1e8f6ea..71493201ee 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/generic.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/informers/externalversions/generic.go @@ -6,7 +6,7 @@ package externalversions import ( - "fmt" + fmt "fmt" v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" @@ -61,8 +61,20 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2().CiliumNetworkPolicies().Informer()}, nil case v2.SchemeGroupVersion.WithResource("ciliumnodes"): return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2().CiliumNodes().Informer()}, nil + case v2.SchemeGroupVersion.WithResource("ciliumnodeconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2().CiliumNodeConfigs().Informer()}, nil // Group=cilium.io, Version=v2alpha1 + case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpadvertisements"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPAdvertisements().Informer()}, nil + case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpclusterconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPClusterConfigs().Informer()}, nil + case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpnodeconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPNodeConfigs().Informer()}, nil + case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgpnodeconfigoverrides"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPNodeConfigOverrides().Informer()}, nil + case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgppeerconfigs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPPeerConfigs().Informer()}, nil case v2alpha1.SchemeGroupVersion.WithResource("ciliumbgppeeringpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Cilium().V2alpha1().CiliumBGPPeeringPolicies().Informer()}, nil case v2alpha1.SchemeGroupVersion.WithResource("ciliumcidrgroups"): diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumclusterwideenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumclusterwideenvoyconfig.go index 50b8b32fce..ff0ddedf98 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumclusterwideenvoyconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumclusterwideenvoyconfig.go @@ -6,10 +6,10 @@ package v2 import ( - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumClusterwideEnvoyConfigLister helps list CiliumClusterwideEnvoyConfigs. @@ -17,39 +17,19 @@ import ( type CiliumClusterwideEnvoyConfigLister interface { // List lists all CiliumClusterwideEnvoyConfigs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumClusterwideEnvoyConfig, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumClusterwideEnvoyConfig, err error) // Get retrieves the CiliumClusterwideEnvoyConfig from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.CiliumClusterwideEnvoyConfig, error) + Get(name string) (*ciliumiov2.CiliumClusterwideEnvoyConfig, error) CiliumClusterwideEnvoyConfigListerExpansion } // ciliumClusterwideEnvoyConfigLister implements the CiliumClusterwideEnvoyConfigLister interface. type ciliumClusterwideEnvoyConfigLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2.CiliumClusterwideEnvoyConfig] } // NewCiliumClusterwideEnvoyConfigLister returns a new CiliumClusterwideEnvoyConfigLister. func NewCiliumClusterwideEnvoyConfigLister(indexer cache.Indexer) CiliumClusterwideEnvoyConfigLister { - return &ciliumClusterwideEnvoyConfigLister{indexer: indexer} -} - -// List lists all CiliumClusterwideEnvoyConfigs in the indexer. -func (s *ciliumClusterwideEnvoyConfigLister) List(selector labels.Selector) (ret []*v2.CiliumClusterwideEnvoyConfig, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumClusterwideEnvoyConfig)) - }) - return ret, err -} - -// Get retrieves the CiliumClusterwideEnvoyConfig from the index for a given name. -func (s *ciliumClusterwideEnvoyConfigLister) Get(name string) (*v2.CiliumClusterwideEnvoyConfig, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("ciliumclusterwideenvoyconfig"), name) - } - return obj.(*v2.CiliumClusterwideEnvoyConfig), nil + return &ciliumClusterwideEnvoyConfigLister{listers.New[*ciliumiov2.CiliumClusterwideEnvoyConfig](indexer, ciliumiov2.Resource("ciliumclusterwideenvoyconfig"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumclusterwidenetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumclusterwidenetworkpolicy.go index 827ad05713..3a39a6e76e 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumclusterwidenetworkpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumclusterwidenetworkpolicy.go @@ -6,10 +6,10 @@ package v2 import ( - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumClusterwideNetworkPolicyLister helps list CiliumClusterwideNetworkPolicies. @@ -17,39 +17,19 @@ import ( type CiliumClusterwideNetworkPolicyLister interface { // List lists all CiliumClusterwideNetworkPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumClusterwideNetworkPolicy, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumClusterwideNetworkPolicy, err error) // Get retrieves the CiliumClusterwideNetworkPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.CiliumClusterwideNetworkPolicy, error) + Get(name string) (*ciliumiov2.CiliumClusterwideNetworkPolicy, error) CiliumClusterwideNetworkPolicyListerExpansion } // ciliumClusterwideNetworkPolicyLister implements the CiliumClusterwideNetworkPolicyLister interface. type ciliumClusterwideNetworkPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2.CiliumClusterwideNetworkPolicy] } // NewCiliumClusterwideNetworkPolicyLister returns a new CiliumClusterwideNetworkPolicyLister. func NewCiliumClusterwideNetworkPolicyLister(indexer cache.Indexer) CiliumClusterwideNetworkPolicyLister { - return &ciliumClusterwideNetworkPolicyLister{indexer: indexer} -} - -// List lists all CiliumClusterwideNetworkPolicies in the indexer. -func (s *ciliumClusterwideNetworkPolicyLister) List(selector labels.Selector) (ret []*v2.CiliumClusterwideNetworkPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumClusterwideNetworkPolicy)) - }) - return ret, err -} - -// Get retrieves the CiliumClusterwideNetworkPolicy from the index for a given name. -func (s *ciliumClusterwideNetworkPolicyLister) Get(name string) (*v2.CiliumClusterwideNetworkPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("ciliumclusterwidenetworkpolicy"), name) - } - return obj.(*v2.CiliumClusterwideNetworkPolicy), nil + return &ciliumClusterwideNetworkPolicyLister{listers.New[*ciliumiov2.CiliumClusterwideNetworkPolicy](indexer, ciliumiov2.Resource("ciliumclusterwidenetworkpolicy"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumegressgatewaypolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumegressgatewaypolicy.go index ec9d0e56aa..3733bb9eb4 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumegressgatewaypolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumegressgatewaypolicy.go @@ -6,10 +6,10 @@ package v2 import ( - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumEgressGatewayPolicyLister helps list CiliumEgressGatewayPolicies. @@ -17,39 +17,19 @@ import ( type CiliumEgressGatewayPolicyLister interface { // List lists all CiliumEgressGatewayPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumEgressGatewayPolicy, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumEgressGatewayPolicy, err error) // Get retrieves the CiliumEgressGatewayPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.CiliumEgressGatewayPolicy, error) + Get(name string) (*ciliumiov2.CiliumEgressGatewayPolicy, error) CiliumEgressGatewayPolicyListerExpansion } // ciliumEgressGatewayPolicyLister implements the CiliumEgressGatewayPolicyLister interface. type ciliumEgressGatewayPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2.CiliumEgressGatewayPolicy] } // NewCiliumEgressGatewayPolicyLister returns a new CiliumEgressGatewayPolicyLister. func NewCiliumEgressGatewayPolicyLister(indexer cache.Indexer) CiliumEgressGatewayPolicyLister { - return &ciliumEgressGatewayPolicyLister{indexer: indexer} -} - -// List lists all CiliumEgressGatewayPolicies in the indexer. -func (s *ciliumEgressGatewayPolicyLister) List(selector labels.Selector) (ret []*v2.CiliumEgressGatewayPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumEgressGatewayPolicy)) - }) - return ret, err -} - -// Get retrieves the CiliumEgressGatewayPolicy from the index for a given name. -func (s *ciliumEgressGatewayPolicyLister) Get(name string) (*v2.CiliumEgressGatewayPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("ciliumegressgatewaypolicy"), name) - } - return obj.(*v2.CiliumEgressGatewayPolicy), nil + return &ciliumEgressGatewayPolicyLister{listers.New[*ciliumiov2.CiliumEgressGatewayPolicy](indexer, ciliumiov2.Resource("ciliumegressgatewaypolicy"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumendpoint.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumendpoint.go index ebb6c5db97..6d3c3b26bf 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumendpoint.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumendpoint.go @@ -6,10 +6,10 @@ package v2 import ( - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumEndpointLister helps list CiliumEndpoints. @@ -17,7 +17,7 @@ import ( type CiliumEndpointLister interface { // List lists all CiliumEndpoints in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumEndpoint, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumEndpoint, err error) // CiliumEndpoints returns an object that can list and get CiliumEndpoints. CiliumEndpoints(namespace string) CiliumEndpointNamespaceLister CiliumEndpointListerExpansion @@ -25,25 +25,17 @@ type CiliumEndpointLister interface { // ciliumEndpointLister implements the CiliumEndpointLister interface. type ciliumEndpointLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2.CiliumEndpoint] } // NewCiliumEndpointLister returns a new CiliumEndpointLister. func NewCiliumEndpointLister(indexer cache.Indexer) CiliumEndpointLister { - return &ciliumEndpointLister{indexer: indexer} -} - -// List lists all CiliumEndpoints in the indexer. -func (s *ciliumEndpointLister) List(selector labels.Selector) (ret []*v2.CiliumEndpoint, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumEndpoint)) - }) - return ret, err + return &ciliumEndpointLister{listers.New[*ciliumiov2.CiliumEndpoint](indexer, ciliumiov2.Resource("ciliumendpoint"))} } // CiliumEndpoints returns an object that can list and get CiliumEndpoints. func (s *ciliumEndpointLister) CiliumEndpoints(namespace string) CiliumEndpointNamespaceLister { - return ciliumEndpointNamespaceLister{indexer: s.indexer, namespace: namespace} + return ciliumEndpointNamespaceLister{listers.NewNamespaced[*ciliumiov2.CiliumEndpoint](s.ResourceIndexer, namespace)} } // CiliumEndpointNamespaceLister helps list and get CiliumEndpoints. @@ -51,36 +43,15 @@ func (s *ciliumEndpointLister) CiliumEndpoints(namespace string) CiliumEndpointN type CiliumEndpointNamespaceLister interface { // List lists all CiliumEndpoints in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumEndpoint, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumEndpoint, err error) // Get retrieves the CiliumEndpoint from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.CiliumEndpoint, error) + Get(name string) (*ciliumiov2.CiliumEndpoint, error) CiliumEndpointNamespaceListerExpansion } // ciliumEndpointNamespaceLister implements the CiliumEndpointNamespaceLister // interface. type ciliumEndpointNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CiliumEndpoints in the indexer for a given namespace. -func (s ciliumEndpointNamespaceLister) List(selector labels.Selector) (ret []*v2.CiliumEndpoint, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumEndpoint)) - }) - return ret, err -} - -// Get retrieves the CiliumEndpoint from the indexer for a given namespace and name. -func (s ciliumEndpointNamespaceLister) Get(name string) (*v2.CiliumEndpoint, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("ciliumendpoint"), name) - } - return obj.(*v2.CiliumEndpoint), nil + listers.ResourceIndexer[*ciliumiov2.CiliumEndpoint] } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumenvoyconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumenvoyconfig.go index eb5d0e68f5..bd29a2f6ff 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumenvoyconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumenvoyconfig.go @@ -6,10 +6,10 @@ package v2 import ( - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumEnvoyConfigLister helps list CiliumEnvoyConfigs. @@ -17,7 +17,7 @@ import ( type CiliumEnvoyConfigLister interface { // List lists all CiliumEnvoyConfigs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumEnvoyConfig, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumEnvoyConfig, err error) // CiliumEnvoyConfigs returns an object that can list and get CiliumEnvoyConfigs. CiliumEnvoyConfigs(namespace string) CiliumEnvoyConfigNamespaceLister CiliumEnvoyConfigListerExpansion @@ -25,25 +25,17 @@ type CiliumEnvoyConfigLister interface { // ciliumEnvoyConfigLister implements the CiliumEnvoyConfigLister interface. type ciliumEnvoyConfigLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2.CiliumEnvoyConfig] } // NewCiliumEnvoyConfigLister returns a new CiliumEnvoyConfigLister. func NewCiliumEnvoyConfigLister(indexer cache.Indexer) CiliumEnvoyConfigLister { - return &ciliumEnvoyConfigLister{indexer: indexer} -} - -// List lists all CiliumEnvoyConfigs in the indexer. -func (s *ciliumEnvoyConfigLister) List(selector labels.Selector) (ret []*v2.CiliumEnvoyConfig, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumEnvoyConfig)) - }) - return ret, err + return &ciliumEnvoyConfigLister{listers.New[*ciliumiov2.CiliumEnvoyConfig](indexer, ciliumiov2.Resource("ciliumenvoyconfig"))} } // CiliumEnvoyConfigs returns an object that can list and get CiliumEnvoyConfigs. func (s *ciliumEnvoyConfigLister) CiliumEnvoyConfigs(namespace string) CiliumEnvoyConfigNamespaceLister { - return ciliumEnvoyConfigNamespaceLister{indexer: s.indexer, namespace: namespace} + return ciliumEnvoyConfigNamespaceLister{listers.NewNamespaced[*ciliumiov2.CiliumEnvoyConfig](s.ResourceIndexer, namespace)} } // CiliumEnvoyConfigNamespaceLister helps list and get CiliumEnvoyConfigs. @@ -51,36 +43,15 @@ func (s *ciliumEnvoyConfigLister) CiliumEnvoyConfigs(namespace string) CiliumEnv type CiliumEnvoyConfigNamespaceLister interface { // List lists all CiliumEnvoyConfigs in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumEnvoyConfig, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumEnvoyConfig, err error) // Get retrieves the CiliumEnvoyConfig from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.CiliumEnvoyConfig, error) + Get(name string) (*ciliumiov2.CiliumEnvoyConfig, error) CiliumEnvoyConfigNamespaceListerExpansion } // ciliumEnvoyConfigNamespaceLister implements the CiliumEnvoyConfigNamespaceLister // interface. type ciliumEnvoyConfigNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CiliumEnvoyConfigs in the indexer for a given namespace. -func (s ciliumEnvoyConfigNamespaceLister) List(selector labels.Selector) (ret []*v2.CiliumEnvoyConfig, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumEnvoyConfig)) - }) - return ret, err -} - -// Get retrieves the CiliumEnvoyConfig from the indexer for a given namespace and name. -func (s ciliumEnvoyConfigNamespaceLister) Get(name string) (*v2.CiliumEnvoyConfig, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("ciliumenvoyconfig"), name) - } - return obj.(*v2.CiliumEnvoyConfig), nil + listers.ResourceIndexer[*ciliumiov2.CiliumEnvoyConfig] } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumexternalworkload.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumexternalworkload.go index 75306ed2cd..723958964a 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumexternalworkload.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumexternalworkload.go @@ -6,10 +6,10 @@ package v2 import ( - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumExternalWorkloadLister helps list CiliumExternalWorkloads. @@ -17,39 +17,19 @@ import ( type CiliumExternalWorkloadLister interface { // List lists all CiliumExternalWorkloads in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumExternalWorkload, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumExternalWorkload, err error) // Get retrieves the CiliumExternalWorkload from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.CiliumExternalWorkload, error) + Get(name string) (*ciliumiov2.CiliumExternalWorkload, error) CiliumExternalWorkloadListerExpansion } // ciliumExternalWorkloadLister implements the CiliumExternalWorkloadLister interface. type ciliumExternalWorkloadLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2.CiliumExternalWorkload] } // NewCiliumExternalWorkloadLister returns a new CiliumExternalWorkloadLister. func NewCiliumExternalWorkloadLister(indexer cache.Indexer) CiliumExternalWorkloadLister { - return &ciliumExternalWorkloadLister{indexer: indexer} -} - -// List lists all CiliumExternalWorkloads in the indexer. -func (s *ciliumExternalWorkloadLister) List(selector labels.Selector) (ret []*v2.CiliumExternalWorkload, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumExternalWorkload)) - }) - return ret, err -} - -// Get retrieves the CiliumExternalWorkload from the index for a given name. -func (s *ciliumExternalWorkloadLister) Get(name string) (*v2.CiliumExternalWorkload, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("ciliumexternalworkload"), name) - } - return obj.(*v2.CiliumExternalWorkload), nil + return &ciliumExternalWorkloadLister{listers.New[*ciliumiov2.CiliumExternalWorkload](indexer, ciliumiov2.Resource("ciliumexternalworkload"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumidentity.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumidentity.go index 81c1bbd304..01e53a2c3b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumidentity.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumidentity.go @@ -6,10 +6,10 @@ package v2 import ( - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumIdentityLister helps list CiliumIdentities. @@ -17,39 +17,19 @@ import ( type CiliumIdentityLister interface { // List lists all CiliumIdentities in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumIdentity, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumIdentity, err error) // Get retrieves the CiliumIdentity from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.CiliumIdentity, error) + Get(name string) (*ciliumiov2.CiliumIdentity, error) CiliumIdentityListerExpansion } // ciliumIdentityLister implements the CiliumIdentityLister interface. type ciliumIdentityLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2.CiliumIdentity] } // NewCiliumIdentityLister returns a new CiliumIdentityLister. func NewCiliumIdentityLister(indexer cache.Indexer) CiliumIdentityLister { - return &ciliumIdentityLister{indexer: indexer} -} - -// List lists all CiliumIdentities in the indexer. -func (s *ciliumIdentityLister) List(selector labels.Selector) (ret []*v2.CiliumIdentity, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumIdentity)) - }) - return ret, err -} - -// Get retrieves the CiliumIdentity from the index for a given name. -func (s *ciliumIdentityLister) Get(name string) (*v2.CiliumIdentity, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("ciliumidentity"), name) - } - return obj.(*v2.CiliumIdentity), nil + return &ciliumIdentityLister{listers.New[*ciliumiov2.CiliumIdentity](indexer, ciliumiov2.Resource("ciliumidentity"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumlocalredirectpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumlocalredirectpolicy.go index c0ea955adc..db68fc3ef0 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumlocalredirectpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumlocalredirectpolicy.go @@ -6,10 +6,10 @@ package v2 import ( - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumLocalRedirectPolicyLister helps list CiliumLocalRedirectPolicies. @@ -17,7 +17,7 @@ import ( type CiliumLocalRedirectPolicyLister interface { // List lists all CiliumLocalRedirectPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumLocalRedirectPolicy, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumLocalRedirectPolicy, err error) // CiliumLocalRedirectPolicies returns an object that can list and get CiliumLocalRedirectPolicies. CiliumLocalRedirectPolicies(namespace string) CiliumLocalRedirectPolicyNamespaceLister CiliumLocalRedirectPolicyListerExpansion @@ -25,25 +25,17 @@ type CiliumLocalRedirectPolicyLister interface { // ciliumLocalRedirectPolicyLister implements the CiliumLocalRedirectPolicyLister interface. type ciliumLocalRedirectPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2.CiliumLocalRedirectPolicy] } // NewCiliumLocalRedirectPolicyLister returns a new CiliumLocalRedirectPolicyLister. func NewCiliumLocalRedirectPolicyLister(indexer cache.Indexer) CiliumLocalRedirectPolicyLister { - return &ciliumLocalRedirectPolicyLister{indexer: indexer} -} - -// List lists all CiliumLocalRedirectPolicies in the indexer. -func (s *ciliumLocalRedirectPolicyLister) List(selector labels.Selector) (ret []*v2.CiliumLocalRedirectPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumLocalRedirectPolicy)) - }) - return ret, err + return &ciliumLocalRedirectPolicyLister{listers.New[*ciliumiov2.CiliumLocalRedirectPolicy](indexer, ciliumiov2.Resource("ciliumlocalredirectpolicy"))} } // CiliumLocalRedirectPolicies returns an object that can list and get CiliumLocalRedirectPolicies. func (s *ciliumLocalRedirectPolicyLister) CiliumLocalRedirectPolicies(namespace string) CiliumLocalRedirectPolicyNamespaceLister { - return ciliumLocalRedirectPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} + return ciliumLocalRedirectPolicyNamespaceLister{listers.NewNamespaced[*ciliumiov2.CiliumLocalRedirectPolicy](s.ResourceIndexer, namespace)} } // CiliumLocalRedirectPolicyNamespaceLister helps list and get CiliumLocalRedirectPolicies. @@ -51,36 +43,15 @@ func (s *ciliumLocalRedirectPolicyLister) CiliumLocalRedirectPolicies(namespace type CiliumLocalRedirectPolicyNamespaceLister interface { // List lists all CiliumLocalRedirectPolicies in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumLocalRedirectPolicy, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumLocalRedirectPolicy, err error) // Get retrieves the CiliumLocalRedirectPolicy from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.CiliumLocalRedirectPolicy, error) + Get(name string) (*ciliumiov2.CiliumLocalRedirectPolicy, error) CiliumLocalRedirectPolicyNamespaceListerExpansion } // ciliumLocalRedirectPolicyNamespaceLister implements the CiliumLocalRedirectPolicyNamespaceLister // interface. type ciliumLocalRedirectPolicyNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CiliumLocalRedirectPolicies in the indexer for a given namespace. -func (s ciliumLocalRedirectPolicyNamespaceLister) List(selector labels.Selector) (ret []*v2.CiliumLocalRedirectPolicy, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumLocalRedirectPolicy)) - }) - return ret, err -} - -// Get retrieves the CiliumLocalRedirectPolicy from the indexer for a given namespace and name. -func (s ciliumLocalRedirectPolicyNamespaceLister) Get(name string) (*v2.CiliumLocalRedirectPolicy, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("ciliumlocalredirectpolicy"), name) - } - return obj.(*v2.CiliumLocalRedirectPolicy), nil + listers.ResourceIndexer[*ciliumiov2.CiliumLocalRedirectPolicy] } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnetworkpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnetworkpolicy.go index c32abb501f..5747872b41 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnetworkpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnetworkpolicy.go @@ -6,10 +6,10 @@ package v2 import ( - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumNetworkPolicyLister helps list CiliumNetworkPolicies. @@ -17,7 +17,7 @@ import ( type CiliumNetworkPolicyLister interface { // List lists all CiliumNetworkPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumNetworkPolicy, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumNetworkPolicy, err error) // CiliumNetworkPolicies returns an object that can list and get CiliumNetworkPolicies. CiliumNetworkPolicies(namespace string) CiliumNetworkPolicyNamespaceLister CiliumNetworkPolicyListerExpansion @@ -25,25 +25,17 @@ type CiliumNetworkPolicyLister interface { // ciliumNetworkPolicyLister implements the CiliumNetworkPolicyLister interface. type ciliumNetworkPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2.CiliumNetworkPolicy] } // NewCiliumNetworkPolicyLister returns a new CiliumNetworkPolicyLister. func NewCiliumNetworkPolicyLister(indexer cache.Indexer) CiliumNetworkPolicyLister { - return &ciliumNetworkPolicyLister{indexer: indexer} -} - -// List lists all CiliumNetworkPolicies in the indexer. -func (s *ciliumNetworkPolicyLister) List(selector labels.Selector) (ret []*v2.CiliumNetworkPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumNetworkPolicy)) - }) - return ret, err + return &ciliumNetworkPolicyLister{listers.New[*ciliumiov2.CiliumNetworkPolicy](indexer, ciliumiov2.Resource("ciliumnetworkpolicy"))} } // CiliumNetworkPolicies returns an object that can list and get CiliumNetworkPolicies. func (s *ciliumNetworkPolicyLister) CiliumNetworkPolicies(namespace string) CiliumNetworkPolicyNamespaceLister { - return ciliumNetworkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} + return ciliumNetworkPolicyNamespaceLister{listers.NewNamespaced[*ciliumiov2.CiliumNetworkPolicy](s.ResourceIndexer, namespace)} } // CiliumNetworkPolicyNamespaceLister helps list and get CiliumNetworkPolicies. @@ -51,36 +43,15 @@ func (s *ciliumNetworkPolicyLister) CiliumNetworkPolicies(namespace string) Cili type CiliumNetworkPolicyNamespaceLister interface { // List lists all CiliumNetworkPolicies in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumNetworkPolicy, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumNetworkPolicy, err error) // Get retrieves the CiliumNetworkPolicy from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.CiliumNetworkPolicy, error) + Get(name string) (*ciliumiov2.CiliumNetworkPolicy, error) CiliumNetworkPolicyNamespaceListerExpansion } // ciliumNetworkPolicyNamespaceLister implements the CiliumNetworkPolicyNamespaceLister // interface. type ciliumNetworkPolicyNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CiliumNetworkPolicies in the indexer for a given namespace. -func (s ciliumNetworkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v2.CiliumNetworkPolicy, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumNetworkPolicy)) - }) - return ret, err -} - -// Get retrieves the CiliumNetworkPolicy from the indexer for a given namespace and name. -func (s ciliumNetworkPolicyNamespaceLister) Get(name string) (*v2.CiliumNetworkPolicy, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("ciliumnetworkpolicy"), name) - } - return obj.(*v2.CiliumNetworkPolicy), nil + listers.ResourceIndexer[*ciliumiov2.CiliumNetworkPolicy] } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnode.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnode.go index 4b30e09255..32e01fab8b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnode.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnode.go @@ -6,10 +6,10 @@ package v2 import ( - v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumNodeLister helps list CiliumNodes. @@ -17,39 +17,19 @@ import ( type CiliumNodeLister interface { // List lists all CiliumNodes in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2.CiliumNode, err error) + List(selector labels.Selector) (ret []*ciliumiov2.CiliumNode, err error) // Get retrieves the CiliumNode from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2.CiliumNode, error) + Get(name string) (*ciliumiov2.CiliumNode, error) CiliumNodeListerExpansion } // ciliumNodeLister implements the CiliumNodeLister interface. type ciliumNodeLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2.CiliumNode] } // NewCiliumNodeLister returns a new CiliumNodeLister. func NewCiliumNodeLister(indexer cache.Indexer) CiliumNodeLister { - return &ciliumNodeLister{indexer: indexer} -} - -// List lists all CiliumNodes in the indexer. -func (s *ciliumNodeLister) List(selector labels.Selector) (ret []*v2.CiliumNode, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2.CiliumNode)) - }) - return ret, err -} - -// Get retrieves the CiliumNode from the index for a given name. -func (s *ciliumNodeLister) Get(name string) (*v2.CiliumNode, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2.Resource("ciliumnode"), name) - } - return obj.(*v2.CiliumNode), nil + return &ciliumNodeLister{listers.New[*ciliumiov2.CiliumNode](indexer, ciliumiov2.Resource("ciliumnode"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnodeconfig.go new file mode 100644 index 0000000000..45ac73847d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/ciliumnodeconfig.go @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2 + +import ( + ciliumiov2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumNodeConfigLister helps list CiliumNodeConfigs. +// All objects returned here must be treated as read-only. +type CiliumNodeConfigLister interface { + // List lists all CiliumNodeConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*ciliumiov2.CiliumNodeConfig, err error) + // CiliumNodeConfigs returns an object that can list and get CiliumNodeConfigs. + CiliumNodeConfigs(namespace string) CiliumNodeConfigNamespaceLister + CiliumNodeConfigListerExpansion +} + +// ciliumNodeConfigLister implements the CiliumNodeConfigLister interface. +type ciliumNodeConfigLister struct { + listers.ResourceIndexer[*ciliumiov2.CiliumNodeConfig] +} + +// NewCiliumNodeConfigLister returns a new CiliumNodeConfigLister. +func NewCiliumNodeConfigLister(indexer cache.Indexer) CiliumNodeConfigLister { + return &ciliumNodeConfigLister{listers.New[*ciliumiov2.CiliumNodeConfig](indexer, ciliumiov2.Resource("ciliumnodeconfig"))} +} + +// CiliumNodeConfigs returns an object that can list and get CiliumNodeConfigs. +func (s *ciliumNodeConfigLister) CiliumNodeConfigs(namespace string) CiliumNodeConfigNamespaceLister { + return ciliumNodeConfigNamespaceLister{listers.NewNamespaced[*ciliumiov2.CiliumNodeConfig](s.ResourceIndexer, namespace)} +} + +// CiliumNodeConfigNamespaceLister helps list and get CiliumNodeConfigs. +// All objects returned here must be treated as read-only. +type CiliumNodeConfigNamespaceLister interface { + // List lists all CiliumNodeConfigs in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*ciliumiov2.CiliumNodeConfig, err error) + // Get retrieves the CiliumNodeConfig from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*ciliumiov2.CiliumNodeConfig, error) + CiliumNodeConfigNamespaceListerExpansion +} + +// ciliumNodeConfigNamespaceLister implements the CiliumNodeConfigNamespaceLister +// interface. +type ciliumNodeConfigNamespaceLister struct { + listers.ResourceIndexer[*ciliumiov2.CiliumNodeConfig] +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/expansion_generated.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/expansion_generated.go index 79ae72836c..16fadca15b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/expansion_generated.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2/expansion_generated.go @@ -60,3 +60,11 @@ type CiliumNetworkPolicyNamespaceListerExpansion interface{} // CiliumNodeListerExpansion allows custom methods to be added to // CiliumNodeLister. type CiliumNodeListerExpansion interface{} + +// CiliumNodeConfigListerExpansion allows custom methods to be added to +// CiliumNodeConfigLister. +type CiliumNodeConfigListerExpansion interface{} + +// CiliumNodeConfigNamespaceListerExpansion allows custom methods to be added to +// CiliumNodeConfigNamespaceLister. +type CiliumNodeConfigNamespaceListerExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpadvertisement.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpadvertisement.go new file mode 100644 index 0000000000..eb92c303ef --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpadvertisement.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPAdvertisementLister helps list CiliumBGPAdvertisements. +// All objects returned here must be treated as read-only. +type CiliumBGPAdvertisementLister interface { + // List lists all CiliumBGPAdvertisements in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumBGPAdvertisement, err error) + // Get retrieves the CiliumBGPAdvertisement from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*ciliumiov2alpha1.CiliumBGPAdvertisement, error) + CiliumBGPAdvertisementListerExpansion +} + +// ciliumBGPAdvertisementLister implements the CiliumBGPAdvertisementLister interface. +type ciliumBGPAdvertisementLister struct { + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumBGPAdvertisement] +} + +// NewCiliumBGPAdvertisementLister returns a new CiliumBGPAdvertisementLister. +func NewCiliumBGPAdvertisementLister(indexer cache.Indexer) CiliumBGPAdvertisementLister { + return &ciliumBGPAdvertisementLister{listers.New[*ciliumiov2alpha1.CiliumBGPAdvertisement](indexer, ciliumiov2alpha1.Resource("ciliumbgpadvertisement"))} +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpclusterconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpclusterconfig.go new file mode 100644 index 0000000000..a743d33c9d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpclusterconfig.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPClusterConfigLister helps list CiliumBGPClusterConfigs. +// All objects returned here must be treated as read-only. +type CiliumBGPClusterConfigLister interface { + // List lists all CiliumBGPClusterConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumBGPClusterConfig, err error) + // Get retrieves the CiliumBGPClusterConfig from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*ciliumiov2alpha1.CiliumBGPClusterConfig, error) + CiliumBGPClusterConfigListerExpansion +} + +// ciliumBGPClusterConfigLister implements the CiliumBGPClusterConfigLister interface. +type ciliumBGPClusterConfigLister struct { + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumBGPClusterConfig] +} + +// NewCiliumBGPClusterConfigLister returns a new CiliumBGPClusterConfigLister. +func NewCiliumBGPClusterConfigLister(indexer cache.Indexer) CiliumBGPClusterConfigLister { + return &ciliumBGPClusterConfigLister{listers.New[*ciliumiov2alpha1.CiliumBGPClusterConfig](indexer, ciliumiov2alpha1.Resource("ciliumbgpclusterconfig"))} +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfig.go new file mode 100644 index 0000000000..796827fb22 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfig.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPNodeConfigLister helps list CiliumBGPNodeConfigs. +// All objects returned here must be treated as read-only. +type CiliumBGPNodeConfigLister interface { + // List lists all CiliumBGPNodeConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumBGPNodeConfig, err error) + // Get retrieves the CiliumBGPNodeConfig from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*ciliumiov2alpha1.CiliumBGPNodeConfig, error) + CiliumBGPNodeConfigListerExpansion +} + +// ciliumBGPNodeConfigLister implements the CiliumBGPNodeConfigLister interface. +type ciliumBGPNodeConfigLister struct { + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumBGPNodeConfig] +} + +// NewCiliumBGPNodeConfigLister returns a new CiliumBGPNodeConfigLister. +func NewCiliumBGPNodeConfigLister(indexer cache.Indexer) CiliumBGPNodeConfigLister { + return &ciliumBGPNodeConfigLister{listers.New[*ciliumiov2alpha1.CiliumBGPNodeConfig](indexer, ciliumiov2alpha1.Resource("ciliumbgpnodeconfig"))} +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go new file mode 100644 index 0000000000..ef3b2c0e10 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgpnodeconfigoverride.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPNodeConfigOverrideLister helps list CiliumBGPNodeConfigOverrides. +// All objects returned here must be treated as read-only. +type CiliumBGPNodeConfigOverrideLister interface { + // List lists all CiliumBGPNodeConfigOverrides in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, err error) + // Get retrieves the CiliumBGPNodeConfigOverride from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*ciliumiov2alpha1.CiliumBGPNodeConfigOverride, error) + CiliumBGPNodeConfigOverrideListerExpansion +} + +// ciliumBGPNodeConfigOverrideLister implements the CiliumBGPNodeConfigOverrideLister interface. +type ciliumBGPNodeConfigOverrideLister struct { + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumBGPNodeConfigOverride] +} + +// NewCiliumBGPNodeConfigOverrideLister returns a new CiliumBGPNodeConfigOverrideLister. +func NewCiliumBGPNodeConfigOverrideLister(indexer cache.Indexer) CiliumBGPNodeConfigOverrideLister { + return &ciliumBGPNodeConfigOverrideLister{listers.New[*ciliumiov2alpha1.CiliumBGPNodeConfigOverride](indexer, ciliumiov2alpha1.Resource("ciliumbgpnodeconfigoverride"))} +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeerconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeerconfig.go new file mode 100644 index 0000000000..5dd5dc8e90 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeerconfig.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by lister-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" +) + +// CiliumBGPPeerConfigLister helps list CiliumBGPPeerConfigs. +// All objects returned here must be treated as read-only. +type CiliumBGPPeerConfigLister interface { + // List lists all CiliumBGPPeerConfigs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumBGPPeerConfig, err error) + // Get retrieves the CiliumBGPPeerConfig from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*ciliumiov2alpha1.CiliumBGPPeerConfig, error) + CiliumBGPPeerConfigListerExpansion +} + +// ciliumBGPPeerConfigLister implements the CiliumBGPPeerConfigLister interface. +type ciliumBGPPeerConfigLister struct { + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumBGPPeerConfig] +} + +// NewCiliumBGPPeerConfigLister returns a new CiliumBGPPeerConfigLister. +func NewCiliumBGPPeerConfigLister(indexer cache.Indexer) CiliumBGPPeerConfigLister { + return &ciliumBGPPeerConfigLister{listers.New[*ciliumiov2alpha1.CiliumBGPPeerConfig](indexer, ciliumiov2alpha1.Resource("ciliumbgppeerconfig"))} +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go index 29c6b221a1..99b45d2fcb 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumbgppeeringpolicy.go @@ -6,10 +6,10 @@ package v2alpha1 import ( - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumBGPPeeringPolicyLister helps list CiliumBGPPeeringPolicies. @@ -17,39 +17,19 @@ import ( type CiliumBGPPeeringPolicyLister interface { // List lists all CiliumBGPPeeringPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPPeeringPolicy, err error) + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumBGPPeeringPolicy, err error) // Get retrieves the CiliumBGPPeeringPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2alpha1.CiliumBGPPeeringPolicy, error) + Get(name string) (*ciliumiov2alpha1.CiliumBGPPeeringPolicy, error) CiliumBGPPeeringPolicyListerExpansion } // ciliumBGPPeeringPolicyLister implements the CiliumBGPPeeringPolicyLister interface. type ciliumBGPPeeringPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumBGPPeeringPolicy] } // NewCiliumBGPPeeringPolicyLister returns a new CiliumBGPPeeringPolicyLister. func NewCiliumBGPPeeringPolicyLister(indexer cache.Indexer) CiliumBGPPeeringPolicyLister { - return &ciliumBGPPeeringPolicyLister{indexer: indexer} -} - -// List lists all CiliumBGPPeeringPolicies in the indexer. -func (s *ciliumBGPPeeringPolicyLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumBGPPeeringPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2alpha1.CiliumBGPPeeringPolicy)) - }) - return ret, err -} - -// Get retrieves the CiliumBGPPeeringPolicy from the index for a given name. -func (s *ciliumBGPPeeringPolicyLister) Get(name string) (*v2alpha1.CiliumBGPPeeringPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2alpha1.Resource("ciliumbgppeeringpolicy"), name) - } - return obj.(*v2alpha1.CiliumBGPPeeringPolicy), nil + return &ciliumBGPPeeringPolicyLister{listers.New[*ciliumiov2alpha1.CiliumBGPPeeringPolicy](indexer, ciliumiov2alpha1.Resource("ciliumbgppeeringpolicy"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumcidrgroup.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumcidrgroup.go index e3aa75c008..7137723ae6 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumcidrgroup.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumcidrgroup.go @@ -6,10 +6,10 @@ package v2alpha1 import ( - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumCIDRGroupLister helps list CiliumCIDRGroups. @@ -17,39 +17,19 @@ import ( type CiliumCIDRGroupLister interface { // List lists all CiliumCIDRGroups in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2alpha1.CiliumCIDRGroup, err error) + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumCIDRGroup, err error) // Get retrieves the CiliumCIDRGroup from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2alpha1.CiliumCIDRGroup, error) + Get(name string) (*ciliumiov2alpha1.CiliumCIDRGroup, error) CiliumCIDRGroupListerExpansion } // ciliumCIDRGroupLister implements the CiliumCIDRGroupLister interface. type ciliumCIDRGroupLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumCIDRGroup] } // NewCiliumCIDRGroupLister returns a new CiliumCIDRGroupLister. func NewCiliumCIDRGroupLister(indexer cache.Indexer) CiliumCIDRGroupLister { - return &ciliumCIDRGroupLister{indexer: indexer} -} - -// List lists all CiliumCIDRGroups in the indexer. -func (s *ciliumCIDRGroupLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumCIDRGroup, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2alpha1.CiliumCIDRGroup)) - }) - return ret, err -} - -// Get retrieves the CiliumCIDRGroup from the index for a given name. -func (s *ciliumCIDRGroupLister) Get(name string) (*v2alpha1.CiliumCIDRGroup, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2alpha1.Resource("ciliumcidrgroup"), name) - } - return obj.(*v2alpha1.CiliumCIDRGroup), nil + return &ciliumCIDRGroupLister{listers.New[*ciliumiov2alpha1.CiliumCIDRGroup](indexer, ciliumiov2alpha1.Resource("ciliumcidrgroup"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumendpointslice.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumendpointslice.go index 8b12f3dc04..b805160416 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumendpointslice.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumendpointslice.go @@ -6,10 +6,10 @@ package v2alpha1 import ( - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumEndpointSliceLister helps list CiliumEndpointSlices. @@ -17,39 +17,19 @@ import ( type CiliumEndpointSliceLister interface { // List lists all CiliumEndpointSlices in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2alpha1.CiliumEndpointSlice, err error) + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumEndpointSlice, err error) // Get retrieves the CiliumEndpointSlice from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2alpha1.CiliumEndpointSlice, error) + Get(name string) (*ciliumiov2alpha1.CiliumEndpointSlice, error) CiliumEndpointSliceListerExpansion } // ciliumEndpointSliceLister implements the CiliumEndpointSliceLister interface. type ciliumEndpointSliceLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumEndpointSlice] } // NewCiliumEndpointSliceLister returns a new CiliumEndpointSliceLister. func NewCiliumEndpointSliceLister(indexer cache.Indexer) CiliumEndpointSliceLister { - return &ciliumEndpointSliceLister{indexer: indexer} -} - -// List lists all CiliumEndpointSlices in the indexer. -func (s *ciliumEndpointSliceLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumEndpointSlice, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2alpha1.CiliumEndpointSlice)) - }) - return ret, err -} - -// Get retrieves the CiliumEndpointSlice from the index for a given name. -func (s *ciliumEndpointSliceLister) Get(name string) (*v2alpha1.CiliumEndpointSlice, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2alpha1.Resource("ciliumendpointslice"), name) - } - return obj.(*v2alpha1.CiliumEndpointSlice), nil + return &ciliumEndpointSliceLister{listers.New[*ciliumiov2alpha1.CiliumEndpointSlice](indexer, ciliumiov2alpha1.Resource("ciliumendpointslice"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliuml2announcementpolicy.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliuml2announcementpolicy.go index cf33b37427..c66731144f 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliuml2announcementpolicy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliuml2announcementpolicy.go @@ -6,10 +6,10 @@ package v2alpha1 import ( - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumL2AnnouncementPolicyLister helps list CiliumL2AnnouncementPolicies. @@ -17,39 +17,19 @@ import ( type CiliumL2AnnouncementPolicyLister interface { // List lists all CiliumL2AnnouncementPolicies in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2alpha1.CiliumL2AnnouncementPolicy, err error) + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, err error) // Get retrieves the CiliumL2AnnouncementPolicy from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2alpha1.CiliumL2AnnouncementPolicy, error) + Get(name string) (*ciliumiov2alpha1.CiliumL2AnnouncementPolicy, error) CiliumL2AnnouncementPolicyListerExpansion } // ciliumL2AnnouncementPolicyLister implements the CiliumL2AnnouncementPolicyLister interface. type ciliumL2AnnouncementPolicyLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumL2AnnouncementPolicy] } // NewCiliumL2AnnouncementPolicyLister returns a new CiliumL2AnnouncementPolicyLister. func NewCiliumL2AnnouncementPolicyLister(indexer cache.Indexer) CiliumL2AnnouncementPolicyLister { - return &ciliumL2AnnouncementPolicyLister{indexer: indexer} -} - -// List lists all CiliumL2AnnouncementPolicies in the indexer. -func (s *ciliumL2AnnouncementPolicyLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumL2AnnouncementPolicy, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2alpha1.CiliumL2AnnouncementPolicy)) - }) - return ret, err -} - -// Get retrieves the CiliumL2AnnouncementPolicy from the index for a given name. -func (s *ciliumL2AnnouncementPolicyLister) Get(name string) (*v2alpha1.CiliumL2AnnouncementPolicy, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2alpha1.Resource("ciliuml2announcementpolicy"), name) - } - return obj.(*v2alpha1.CiliumL2AnnouncementPolicy), nil + return &ciliumL2AnnouncementPolicyLister{listers.New[*ciliumiov2alpha1.CiliumL2AnnouncementPolicy](indexer, ciliumiov2alpha1.Resource("ciliuml2announcementpolicy"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumloadbalancerippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumloadbalancerippool.go index fdc950f3d2..0b2abee1d4 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumloadbalancerippool.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumloadbalancerippool.go @@ -6,10 +6,10 @@ package v2alpha1 import ( - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumLoadBalancerIPPoolLister helps list CiliumLoadBalancerIPPools. @@ -17,39 +17,19 @@ import ( type CiliumLoadBalancerIPPoolLister interface { // List lists all CiliumLoadBalancerIPPools in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2alpha1.CiliumLoadBalancerIPPool, err error) + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumLoadBalancerIPPool, err error) // Get retrieves the CiliumLoadBalancerIPPool from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2alpha1.CiliumLoadBalancerIPPool, error) + Get(name string) (*ciliumiov2alpha1.CiliumLoadBalancerIPPool, error) CiliumLoadBalancerIPPoolListerExpansion } // ciliumLoadBalancerIPPoolLister implements the CiliumLoadBalancerIPPoolLister interface. type ciliumLoadBalancerIPPoolLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumLoadBalancerIPPool] } // NewCiliumLoadBalancerIPPoolLister returns a new CiliumLoadBalancerIPPoolLister. func NewCiliumLoadBalancerIPPoolLister(indexer cache.Indexer) CiliumLoadBalancerIPPoolLister { - return &ciliumLoadBalancerIPPoolLister{indexer: indexer} -} - -// List lists all CiliumLoadBalancerIPPools in the indexer. -func (s *ciliumLoadBalancerIPPoolLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumLoadBalancerIPPool, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2alpha1.CiliumLoadBalancerIPPool)) - }) - return ret, err -} - -// Get retrieves the CiliumLoadBalancerIPPool from the index for a given name. -func (s *ciliumLoadBalancerIPPoolLister) Get(name string) (*v2alpha1.CiliumLoadBalancerIPPool, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2alpha1.Resource("ciliumloadbalancerippool"), name) - } - return obj.(*v2alpha1.CiliumLoadBalancerIPPool), nil + return &ciliumLoadBalancerIPPoolLister{listers.New[*ciliumiov2alpha1.CiliumLoadBalancerIPPool](indexer, ciliumiov2alpha1.Resource("ciliumloadbalancerippool"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumnodeconfig.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumnodeconfig.go index 0ad5c0315a..8d655359e9 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumnodeconfig.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumnodeconfig.go @@ -6,10 +6,10 @@ package v2alpha1 import ( - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumNodeConfigLister helps list CiliumNodeConfigs. @@ -17,7 +17,7 @@ import ( type CiliumNodeConfigLister interface { // List lists all CiliumNodeConfigs in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2alpha1.CiliumNodeConfig, err error) + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumNodeConfig, err error) // CiliumNodeConfigs returns an object that can list and get CiliumNodeConfigs. CiliumNodeConfigs(namespace string) CiliumNodeConfigNamespaceLister CiliumNodeConfigListerExpansion @@ -25,25 +25,17 @@ type CiliumNodeConfigLister interface { // ciliumNodeConfigLister implements the CiliumNodeConfigLister interface. type ciliumNodeConfigLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumNodeConfig] } // NewCiliumNodeConfigLister returns a new CiliumNodeConfigLister. func NewCiliumNodeConfigLister(indexer cache.Indexer) CiliumNodeConfigLister { - return &ciliumNodeConfigLister{indexer: indexer} -} - -// List lists all CiliumNodeConfigs in the indexer. -func (s *ciliumNodeConfigLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumNodeConfig, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2alpha1.CiliumNodeConfig)) - }) - return ret, err + return &ciliumNodeConfigLister{listers.New[*ciliumiov2alpha1.CiliumNodeConfig](indexer, ciliumiov2alpha1.Resource("ciliumnodeconfig"))} } // CiliumNodeConfigs returns an object that can list and get CiliumNodeConfigs. func (s *ciliumNodeConfigLister) CiliumNodeConfigs(namespace string) CiliumNodeConfigNamespaceLister { - return ciliumNodeConfigNamespaceLister{indexer: s.indexer, namespace: namespace} + return ciliumNodeConfigNamespaceLister{listers.NewNamespaced[*ciliumiov2alpha1.CiliumNodeConfig](s.ResourceIndexer, namespace)} } // CiliumNodeConfigNamespaceLister helps list and get CiliumNodeConfigs. @@ -51,36 +43,15 @@ func (s *ciliumNodeConfigLister) CiliumNodeConfigs(namespace string) CiliumNodeC type CiliumNodeConfigNamespaceLister interface { // List lists all CiliumNodeConfigs in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2alpha1.CiliumNodeConfig, err error) + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumNodeConfig, err error) // Get retrieves the CiliumNodeConfig from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v2alpha1.CiliumNodeConfig, error) + Get(name string) (*ciliumiov2alpha1.CiliumNodeConfig, error) CiliumNodeConfigNamespaceListerExpansion } // ciliumNodeConfigNamespaceLister implements the CiliumNodeConfigNamespaceLister // interface. type ciliumNodeConfigNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all CiliumNodeConfigs in the indexer for a given namespace. -func (s ciliumNodeConfigNamespaceLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumNodeConfig, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v2alpha1.CiliumNodeConfig)) - }) - return ret, err -} - -// Get retrieves the CiliumNodeConfig from the indexer for a given namespace and name. -func (s ciliumNodeConfigNamespaceLister) Get(name string) (*v2alpha1.CiliumNodeConfig, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2alpha1.Resource("ciliumnodeconfig"), name) - } - return obj.(*v2alpha1.CiliumNodeConfig), nil + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumNodeConfig] } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumpodippool.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumpodippool.go index c48ef82837..8bc1f2a92c 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumpodippool.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/ciliumpodippool.go @@ -6,10 +6,10 @@ package v2alpha1 import ( - v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" + ciliumiov2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + labels "k8s.io/apimachinery/pkg/labels" + listers "k8s.io/client-go/listers" + cache "k8s.io/client-go/tools/cache" ) // CiliumPodIPPoolLister helps list CiliumPodIPPools. @@ -17,39 +17,19 @@ import ( type CiliumPodIPPoolLister interface { // List lists all CiliumPodIPPools in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v2alpha1.CiliumPodIPPool, err error) + List(selector labels.Selector) (ret []*ciliumiov2alpha1.CiliumPodIPPool, err error) // Get retrieves the CiliumPodIPPool from the index for a given name. // Objects returned here must be treated as read-only. - Get(name string) (*v2alpha1.CiliumPodIPPool, error) + Get(name string) (*ciliumiov2alpha1.CiliumPodIPPool, error) CiliumPodIPPoolListerExpansion } // ciliumPodIPPoolLister implements the CiliumPodIPPoolLister interface. type ciliumPodIPPoolLister struct { - indexer cache.Indexer + listers.ResourceIndexer[*ciliumiov2alpha1.CiliumPodIPPool] } // NewCiliumPodIPPoolLister returns a new CiliumPodIPPoolLister. func NewCiliumPodIPPoolLister(indexer cache.Indexer) CiliumPodIPPoolLister { - return &ciliumPodIPPoolLister{indexer: indexer} -} - -// List lists all CiliumPodIPPools in the indexer. -func (s *ciliumPodIPPoolLister) List(selector labels.Selector) (ret []*v2alpha1.CiliumPodIPPool, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v2alpha1.CiliumPodIPPool)) - }) - return ret, err -} - -// Get retrieves the CiliumPodIPPool from the index for a given name. -func (s *ciliumPodIPPoolLister) Get(name string) (*v2alpha1.CiliumPodIPPool, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v2alpha1.Resource("ciliumpodippool"), name) - } - return obj.(*v2alpha1.CiliumPodIPPool), nil + return &ciliumPodIPPoolLister{listers.New[*ciliumiov2alpha1.CiliumPodIPPool](indexer, ciliumiov2alpha1.Resource("ciliumpodippool"))} } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go index 8d348d108d..0a2e72c1f3 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/client/listers/cilium.io/v2alpha1/expansion_generated.go @@ -5,6 +5,26 @@ package v2alpha1 +// CiliumBGPAdvertisementListerExpansion allows custom methods to be added to +// CiliumBGPAdvertisementLister. +type CiliumBGPAdvertisementListerExpansion interface{} + +// CiliumBGPClusterConfigListerExpansion allows custom methods to be added to +// CiliumBGPClusterConfigLister. +type CiliumBGPClusterConfigListerExpansion interface{} + +// CiliumBGPNodeConfigListerExpansion allows custom methods to be added to +// CiliumBGPNodeConfigLister. +type CiliumBGPNodeConfigListerExpansion interface{} + +// CiliumBGPNodeConfigOverrideListerExpansion allows custom methods to be added to +// CiliumBGPNodeConfigOverrideLister. +type CiliumBGPNodeConfigOverrideListerExpansion interface{} + +// CiliumBGPPeerConfigListerExpansion allows custom methods to be added to +// CiliumBGPPeerConfigLister. +type CiliumBGPPeerConfigListerExpansion interface{} + // CiliumBGPPeeringPolicyListerExpansion allows custom methods to be added to // CiliumBGPPeeringPolicyLister. type CiliumBGPPeeringPolicyListerExpansion interface{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/doc.go index bbb73f80cc..014e72e567 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/doc.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/doc.go @@ -6,6 +6,7 @@ // +k8s:protobuf-gen=package // +deepequal-gen=package // +groupGoName=core +// +groupName= -// Package v1 contains slimmer versions of k8s core types. +// Package v1 is the v1 version of the core API. package v1 diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go index 63de1bc6a4..6ab9ecf4d7 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go @@ -733,10 +733,38 @@ func (m *PodList) XXX_DiscardUnknown() { var xxx_messageInfo_PodList proto.InternalMessageInfo +func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } +func (*PodReadinessGate) ProtoMessage() {} +func (*PodReadinessGate) Descriptor() ([]byte, []int) { + return fileDescriptor_871504499faea14d, []int{25} +} +func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodReadinessGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodReadinessGate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodReadinessGate.Merge(m, src) +} +func (m *PodReadinessGate) XXX_Size() int { + return m.Size() +} +func (m *PodReadinessGate) XXX_DiscardUnknown() { + xxx_messageInfo_PodReadinessGate.DiscardUnknown(m) +} + +var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo + func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{25} + return fileDescriptor_871504499faea14d, []int{26} } func (m *PodSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -764,7 +792,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{26} + return fileDescriptor_871504499faea14d, []int{27} } func (m *PodStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -792,7 +820,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo func (m *PortStatus) Reset() { *m = PortStatus{} } func (*PortStatus) ProtoMessage() {} func (*PortStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{27} + return fileDescriptor_871504499faea14d, []int{28} } func (m *PortStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -820,7 +848,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{28} + return fileDescriptor_871504499faea14d, []int{29} } func (m *Secret) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -848,7 +876,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{29} + return fileDescriptor_871504499faea14d, []int{30} } func (m *SecretList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -876,7 +904,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{30} + return fileDescriptor_871504499faea14d, []int{31} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -904,7 +932,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{31} + return fileDescriptor_871504499faea14d, []int{32} } func (m *ServiceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -932,7 +960,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{32} + return fileDescriptor_871504499faea14d, []int{33} } func (m *ServicePort) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -960,7 +988,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{33} + return fileDescriptor_871504499faea14d, []int{34} } func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -988,7 +1016,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{34} + return fileDescriptor_871504499faea14d, []int{35} } func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1016,7 +1044,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{35} + return fileDescriptor_871504499faea14d, []int{36} } func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1044,7 +1072,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{36} + return fileDescriptor_871504499faea14d, []int{37} } func (m *Taint) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1072,7 +1100,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } func (*TypedLocalObjectReference) ProtoMessage() {} func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{37} + return fileDescriptor_871504499faea14d, []int{38} } func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1100,7 +1128,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_871504499faea14d, []int{38} + return fileDescriptor_871504499faea14d, []int{39} } func (m *VolumeMount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1151,6 +1179,7 @@ func init() { proto.RegisterType((*PodCondition)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodCondition") proto.RegisterType((*PodIP)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodIP") proto.RegisterType((*PodList)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodList") + proto.RegisterType((*PodReadinessGate)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodReadinessGate") proto.RegisterType((*PodSpec)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodSpec") proto.RegisterType((*PodStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PodStatus") proto.RegisterType((*PortStatus)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1.PortStatus") @@ -1175,172 +1204,182 @@ func init() { } var fileDescriptor_871504499faea14d = []byte{ - // 2636 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0xcf, 0x6f, 0x23, 0x49, - 0xf5, 0x9f, 0xb6, 0xe3, 0xc4, 0x7e, 0x4e, 0x9c, 0x4d, 0xcd, 0x66, 0xd5, 0x93, 0xef, 0x6c, 0x92, - 0x6f, 0x0f, 0x42, 0xc3, 0x2f, 0x5b, 0x33, 0x10, 0x98, 0x9d, 0xd9, 0x19, 0x26, 0x76, 0x32, 0x3b, - 0xde, 0x4d, 0x66, 0x9b, 0x72, 0xb4, 0x20, 0x38, 0xb0, 0x9d, 0xee, 0x8a, 0xdd, 0x1b, 0xbb, 0xdb, - 0x74, 0x95, 0x33, 0x6b, 0x09, 0xc1, 0x22, 0x04, 0x5a, 0x60, 0x24, 0xf8, 0x07, 0xb8, 0x70, 0xe3, - 0x0c, 0x17, 0xc4, 0x19, 0x31, 0xdc, 0x96, 0xdb, 0x08, 0xa1, 0xb0, 0x13, 0x24, 0x6e, 0x1c, 0x10, - 0x27, 0x82, 0x84, 0x50, 0x55, 0x57, 0x57, 0x77, 0x3b, 0xf6, 0x4c, 0xc6, 0x8e, 0x14, 0x71, 0x72, - 0xf7, 0x7b, 0xaf, 0xde, 0xa7, 0x5e, 0xd5, 0xab, 0xf7, 0xa3, 0xdc, 0xb0, 0xd9, 0x74, 0x59, 0xab, - 0xb7, 0x5b, 0xb6, 0xfd, 0x4e, 0xc5, 0x76, 0xdb, 0x6e, 0x4f, 0xfd, 0x74, 0xf7, 0x9b, 0x95, 0xfd, - 0x1b, 0xb4, 0x42, 0xdb, 0x6e, 0x47, 0x3c, 0x58, 0x5d, 0xb7, 0x62, 0xfb, 0x01, 0xa9, 0x1c, 0x5c, - 0xab, 0x34, 0x89, 0x47, 0x02, 0x8b, 0x11, 0xa7, 0xdc, 0x0d, 0x7c, 0xe6, 0xa3, 0xb5, 0x58, 0x4d, - 0x39, 0x1c, 0x1f, 0xfd, 0x74, 0xf7, 0x9b, 0xe5, 0xfd, 0x1b, 0xb4, 0xcc, 0xd5, 0x88, 0x07, 0xab, - 0xeb, 0x96, 0xb9, 0x9a, 0xf2, 0xc1, 0xb5, 0xa5, 0x7b, 0x2f, 0x84, 0x4e, 0x2b, 0x1d, 0xc2, 0xac, - 0x21, 0xf0, 0x4b, 0x9f, 0x4b, 0xe8, 0x69, 0xfa, 0x4d, 0xbf, 0x22, 0xc8, 0xbb, 0xbd, 0x3d, 0xf1, - 0x26, 0x5e, 0xc4, 0x93, 0x14, 0xe7, 0x0a, 0xcb, 0xae, 0xcf, 0x75, 0x76, 0x2c, 0xbb, 0xe5, 0x7a, - 0x24, 0xe8, 0x0b, 0xc4, 0xa0, 0xe7, 0x31, 0xb7, 0x43, 0x4e, 0xe8, 0xff, 0xe2, 0xf3, 0x06, 0x50, - 0xbb, 0x45, 0x3a, 0xd6, 0xe0, 0x38, 0x63, 0x0b, 0x4a, 0xb5, 0xb6, 0x4b, 0x3c, 0x56, 0x37, 0x6b, - 0xbe, 0xb7, 0xe7, 0x36, 0xd1, 0x4d, 0x28, 0xf1, 0x01, 0x7e, 0x8f, 0x35, 0x88, 0xed, 0x7b, 0x0e, - 0xd5, 0xb5, 0x55, 0xed, 0x6a, 0xae, 0x8a, 0x8e, 0x0e, 0x57, 0x4a, 0x3b, 0x29, 0x0e, 0x1e, 0x90, - 0x34, 0x7e, 0x9b, 0x81, 0x42, 0xcd, 0xf7, 0x98, 0xc5, 0xf1, 0xd1, 0x2a, 0x4c, 0x79, 0x56, 0x87, - 0x88, 0xf1, 0x85, 0xea, 0xec, 0xe3, 0xc3, 0x95, 0x0b, 0x47, 0x87, 0x2b, 0x53, 0x0f, 0xac, 0x0e, - 0xc1, 0x82, 0x83, 0xae, 0x40, 0xce, 0xed, 0x58, 0x4d, 0xa2, 0x67, 0x84, 0xc8, 0x9c, 0x14, 0xc9, - 0xd5, 0x39, 0x11, 0x87, 0x3c, 0xe4, 0x42, 0xae, 0xeb, 0x07, 0x8c, 0xea, 0xd3, 0xab, 0xd9, 0xab, - 0xc5, 0xeb, 0x1b, 0xe5, 0xb1, 0x76, 0xb2, 0xac, 0xe6, 0x65, 0xfa, 0x01, 0x8b, 0xa1, 0xf8, 0x1b, - 0xc5, 0x21, 0x02, 0xfa, 0x36, 0xcc, 0x1e, 0xf8, 0xed, 0x5e, 0x87, 0x6c, 0xfb, 0x3d, 0x8f, 0x51, - 0xbd, 0x20, 0x10, 0xab, 0x63, 0x22, 0xbe, 0x13, 0xab, 0xaa, 0xbe, 0x2c, 0xf1, 0x66, 0x13, 0x44, - 0x8a, 0x53, 0x68, 0xc6, 0x7f, 0x34, 0x98, 0x4b, 0xcd, 0xf2, 0x14, 0x2b, 0xf8, 0x59, 0xc8, 0xb7, - 0x7c, 0xca, 0xb8, 0xb4, 0x58, 0xc4, 0x5c, 0xf5, 0x25, 0x29, 0x95, 0xbf, 0x2f, 0xe9, 0x58, 0x49, - 0xa0, 0x5b, 0x30, 0x67, 0x27, 0x01, 0xf4, 0xac, 0x18, 0xb2, 0x28, 0x87, 0xa4, 0xd1, 0x71, 0x5a, - 0x16, 0xdd, 0x80, 0xbc, 0xf0, 0x19, 0xdb, 0x6f, 0xeb, 0x53, 0x62, 0x42, 0x97, 0x23, 0x28, 0x53, - 0xd2, 0x8f, 0x13, 0xcf, 0x58, 0x49, 0xa3, 0x4f, 0xc2, 0x34, 0x9f, 0x42, 0xdd, 0xd4, 0x73, 0x62, - 0x5c, 0x49, 0x8e, 0x9b, 0xbe, 0x2f, 0xa8, 0x58, 0x72, 0x8d, 0x1f, 0x68, 0x50, 0x52, 0x53, 0x68, - 0x30, 0x8b, 0x11, 0x44, 0x61, 0x26, 0xe8, 0x79, 0x9e, 0xeb, 0x35, 0x85, 0x79, 0xc5, 0xeb, 0x5b, - 0x93, 0x6e, 0xbf, 0xd0, 0x8b, 0x43, 0x9d, 0xd5, 0xe2, 0xd1, 0xe1, 0xca, 0x8c, 0x7c, 0xc1, 0x11, - 0x92, 0xf1, 0x43, 0x0d, 0x16, 0x87, 0xca, 0xa3, 0x0e, 0x14, 0x28, 0xb3, 0x02, 0x46, 0x9c, 0x75, - 0x26, 0x76, 0xa5, 0x78, 0xfd, 0xf5, 0x17, 0x9b, 0x10, 0x2d, 0xf3, 0x10, 0xc1, 0x67, 0xc4, 0xcf, - 0x52, 0x75, 0x41, 0x2e, 0x45, 0xa1, 0x11, 0xa9, 0xc5, 0x31, 0x82, 0xf1, 0x6b, 0x0d, 0xe6, 0x53, - 0x13, 0xe9, 0x51, 0xf4, 0x1e, 0xe4, 0x28, 0x9f, 0x92, 0x5c, 0x8f, 0xcd, 0x33, 0x59, 0x8f, 0xf8, - 0x3c, 0x84, 0xe6, 0x86, 0x10, 0x68, 0x0d, 0x8a, 0xca, 0x07, 0xea, 0x1b, 0x7a, 0x5e, 0xec, 0xde, - 0x45, 0x29, 0x5a, 0xac, 0xc5, 0x2c, 0x9c, 0x94, 0x33, 0xbe, 0x0a, 0xf3, 0x9b, 0x9e, 0xd3, 0xf5, - 0x5d, 0x8f, 0xad, 0x3b, 0x4e, 0x40, 0x28, 0x45, 0x4b, 0x90, 0x71, 0xbb, 0xd2, 0x8f, 0x41, 0x2a, - 0xc8, 0xd4, 0x4d, 0x9c, 0x71, 0xbb, 0xe8, 0x2a, 0xe4, 0x3d, 0xdf, 0x21, 0xdc, 0xab, 0xa5, 0x63, - 0xcd, 0x72, 0xa7, 0x7a, 0x20, 0x69, 0x58, 0x71, 0x8d, 0x47, 0x1a, 0xcc, 0x46, 0x9a, 0x4f, 0x79, - 0x40, 0x56, 0x61, 0xaa, 0x1b, 0x1f, 0x0e, 0x25, 0x21, 0x1c, 0x5c, 0x70, 0x52, 0x7e, 0x9d, 0x7d, - 0x11, 0xbf, 0x36, 0xfe, 0xa9, 0x41, 0x29, 0x9a, 0x4e, 0xa3, 0xb7, 0x4b, 0x09, 0x43, 0x0f, 0xa1, - 0x60, 0x85, 0x26, 0x13, 0x1e, 0x38, 0x79, 0xf8, 0xb8, 0x37, 0xe6, 0x0e, 0x0d, 0x2c, 0x61, 0xec, - 0x2a, 0xeb, 0x11, 0x00, 0x8e, 0xb1, 0x50, 0x2b, 0x8a, 0x92, 0x59, 0x01, 0x5a, 0x9b, 0x10, 0x74, - 0x74, 0x90, 0x34, 0xfe, 0xa1, 0x41, 0x21, 0x12, 0xa3, 0x28, 0x80, 0x3c, 0x77, 0x68, 0xc7, 0x62, - 0x96, 0x3c, 0x10, 0xd5, 0x71, 0x0f, 0xc4, 0xdb, 0xbb, 0xef, 0x11, 0x9b, 0x6d, 0x13, 0x66, 0x55, - 0x91, 0x44, 0x86, 0x98, 0x86, 0x15, 0x0e, 0xea, 0xc2, 0x0c, 0x15, 0xcb, 0x4d, 0xf5, 0x8c, 0xb0, - 0x76, 0x73, 0x42, 0x6b, 0xc3, 0xcd, 0xab, 0xce, 0x4b, 0xd4, 0x99, 0xf0, 0x9d, 0xe2, 0x08, 0xc6, - 0xf8, 0x9b, 0x06, 0x73, 0xca, 0xe6, 0x2d, 0x97, 0x32, 0xe4, 0x9d, 0xb0, 0xfb, 0xee, 0xb8, 0x76, - 0x73, 0x7d, 0xc2, 0x6a, 0x15, 0xba, 0x23, 0x4a, 0xc2, 0x66, 0x02, 0x39, 0x97, 0x91, 0x4e, 0x64, - 0xf1, 0xdd, 0x09, 0x2d, 0xa6, 0x89, 0x64, 0xcb, 0xd5, 0xe2, 0x50, 0xbb, 0xf1, 0x7b, 0x0d, 0x2e, - 0x6e, 0xf9, 0x96, 0x53, 0xb5, 0xda, 0x96, 0x67, 0x93, 0xa0, 0xee, 0x35, 0x9f, 0x7b, 0x7e, 0x65, - 0x0e, 0x12, 0x07, 0x31, 0x4c, 0xe4, 0xa9, 0x1c, 0xe4, 0x89, 0x33, 0x1c, 0x49, 0xa0, 0xbd, 0xc8, - 0x51, 0xa7, 0x84, 0x21, 0xeb, 0x63, 0x1a, 0xc2, 0x5d, 0x32, 0x8c, 0x88, 0x23, 0xdc, 0xf4, 0x27, - 0x1a, 0xa0, 0xa4, 0x25, 0x32, 0x7c, 0xf6, 0x60, 0xc6, 0x0d, 0x6d, 0x92, 0xc7, 0xf3, 0xcd, 0x31, - 0x27, 0x30, 0x64, 0x95, 0x62, 0x07, 0x92, 0x04, 0x1c, 0x61, 0x19, 0xdf, 0x85, 0x02, 0x0f, 0x4a, - 0xb4, 0x6b, 0xd9, 0xe4, 0x3c, 0xce, 0x8c, 0xf0, 0x60, 0x35, 0x83, 0xff, 0x65, 0x0f, 0x56, 0x46, - 0x8c, 0xf0, 0xe0, 0xc7, 0x19, 0x98, 0xe2, 0xa9, 0xe3, 0x5c, 0x22, 0x93, 0x05, 0x53, 0xb4, 0x4b, - 0x6c, 0x99, 0x9b, 0xbf, 0x3c, 0xae, 0x89, 0xbe, 0x43, 0x1a, 0x5d, 0x62, 0xc7, 0xe9, 0x8a, 0xbf, - 0x61, 0xa1, 0x1a, 0xb9, 0x30, 0x4d, 0x85, 0x2b, 0x8b, 0x64, 0x35, 0xfe, 0x01, 0x12, 0x20, 0xe1, - 0x01, 0x52, 0xf5, 0x58, 0xf8, 0x8e, 0x25, 0x80, 0xd1, 0x81, 0x22, 0x97, 0x8a, 0x72, 0xf8, 0xe7, - 0x61, 0x8a, 0xf5, 0xbb, 0x51, 0xb2, 0x5d, 0x89, 0xe6, 0xb6, 0xd3, 0xef, 0x92, 0xe3, 0xc3, 0x95, - 0xf9, 0x84, 0x28, 0x27, 0x61, 0x21, 0x8c, 0x3e, 0x05, 0x33, 0x32, 0x49, 0xc9, 0xd8, 0xa0, 0xce, - 0x88, 0x94, 0xc5, 0x11, 0xdf, 0xf8, 0x25, 0x77, 0x51, 0xdf, 0x21, 0x35, 0xdf, 0x73, 0x5c, 0xe6, - 0xfa, 0x1e, 0x5a, 0x4b, 0x21, 0xfe, 0xff, 0x00, 0xe2, 0x42, 0x4a, 0x38, 0x81, 0xf9, 0x9a, 0x5a, - 0xa2, 0x4c, 0x6a, 0xa0, 0xb4, 0x8f, 0x4f, 0x56, 0x0d, 0x4b, 0x9b, 0xcc, 0x4b, 0xd5, 0x80, 0x58, - 0xd4, 0xf7, 0x06, 0x4b, 0x55, 0x2c, 0xa8, 0x58, 0x72, 0x8d, 0x3f, 0x6b, 0x20, 0x0a, 0x94, 0x73, - 0x39, 0x49, 0xef, 0xa6, 0x4f, 0xd2, 0xad, 0x09, 0x3c, 0x60, 0xc4, 0x21, 0xfa, 0x97, 0x34, 0x8f, - 0xfb, 0x1d, 0xdf, 0xc2, 0xae, 0xef, 0xd4, 0xea, 0x1b, 0x58, 0x6e, 0x84, 0xda, 0x42, 0x33, 0x24, - 0xe3, 0x88, 0xcf, 0x4b, 0x39, 0xf9, 0x48, 0xf5, 0x99, 0xd5, 0x6c, 0x54, 0xca, 0x49, 0x39, 0x8a, - 0x15, 0x17, 0x5d, 0x07, 0xe8, 0x06, 0xfe, 0x81, 0xeb, 0x88, 0xca, 0x32, 0xac, 0xbb, 0xd4, 0xd9, - 0x32, 0x15, 0x07, 0x27, 0xa4, 0x90, 0x03, 0xd3, 0xbc, 0xc6, 0x64, 0x54, 0xcf, 0x09, 0xc3, 0x5f, - 0x1f, 0xd3, 0xf0, 0x1d, 0xae, 0x24, 0xde, 0x5a, 0xf1, 0x4a, 0xb1, 0xd4, 0x6d, 0xfc, 0x5b, 0x03, - 0x88, 0x0f, 0x07, 0x7a, 0x1f, 0xc0, 0x8e, 0x9c, 0x25, 0x4a, 0x5a, 0x1b, 0x13, 0xac, 0xb8, 0xf2, - 0xbc, 0xd8, 0x5c, 0x45, 0xa2, 0x38, 0x81, 0x85, 0x68, 0xb2, 0x96, 0xcc, 0x4d, 0xd4, 0x8a, 0x26, - 0xce, 0xe6, 0xb3, 0xeb, 0x48, 0xe3, 0x77, 0x19, 0xc8, 0x9a, 0xbe, 0x73, 0x2e, 0xd1, 0xf3, 0xdd, - 0x54, 0xf4, 0xbc, 0x33, 0x76, 0x65, 0xe0, 0x8c, 0x0c, 0x9e, 0xad, 0x81, 0xe0, 0x79, 0x77, 0x02, - 0x8c, 0x67, 0xc7, 0xce, 0x27, 0x59, 0x98, 0xe5, 0x6e, 0xaf, 0x62, 0xd9, 0x17, 0x52, 0xb1, 0x6c, - 0x75, 0x20, 0x96, 0xbd, 0x94, 0x94, 0x3d, 0x9b, 0x50, 0xd6, 0x87, 0xb9, 0xb6, 0x45, 0x99, 0x19, - 0xf8, 0xbb, 0x84, 0xf7, 0x9a, 0xd2, 0xe4, 0xc9, 0xfa, 0x55, 0x75, 0x55, 0xb0, 0x95, 0x54, 0x8d, - 0xd3, 0x48, 0xe8, 0x43, 0x0d, 0x10, 0xa7, 0xec, 0x04, 0x96, 0x47, 0x43, 0x93, 0x5c, 0xd9, 0xdc, - 0x4d, 0x3a, 0x81, 0x25, 0x39, 0x01, 0xb4, 0x75, 0x42, 0x3f, 0x1e, 0x82, 0x79, 0xda, 0x80, 0xce, - 0x83, 0x5c, 0x87, 0x50, 0x6a, 0x35, 0x89, 0x3e, 0x9d, 0x0e, 0x72, 0xdb, 0x21, 0x19, 0x47, 0x7c, - 0xe3, 0x0a, 0xe4, 0x4c, 0xdf, 0xa9, 0x9b, 0xcf, 0x2a, 0x8a, 0x8d, 0x3f, 0x69, 0xc0, 0xc3, 0xe3, - 0xb9, 0xe4, 0x87, 0x6f, 0xa6, 0xf3, 0xc3, 0xcd, 0xf1, 0x9d, 0x7c, 0x44, 0x7a, 0xf8, 0x55, 0x56, - 0x18, 0x27, 0xb2, 0xc3, 0x07, 0x1a, 0x94, 0x5c, 0xcf, 0x65, 0xea, 0x36, 0x80, 0xea, 0x2f, 0x4f, - 0x54, 0xe0, 0x29, 0x45, 0xd5, 0x57, 0x24, 0x78, 0xa9, 0x9e, 0xd2, 0x8f, 0x07, 0xf0, 0x10, 0x13, - 0x21, 0x3a, 0x42, 0xcf, 0x9c, 0x11, 0x7a, 0x32, 0x3c, 0x47, 0xc8, 0x09, 0x1c, 0xf4, 0x26, 0x20, - 0x4a, 0x82, 0x03, 0xd7, 0x26, 0xeb, 0xb6, 0xed, 0xf7, 0x3c, 0x26, 0x2e, 0x30, 0xc2, 0x3b, 0x12, - 0xe5, 0xa5, 0x8d, 0x13, 0x12, 0x78, 0xc8, 0x28, 0xde, 0x42, 0xa9, 0x2b, 0x10, 0x48, 0xb7, 0x50, - 0x27, 0xaf, 0x41, 0xd0, 0x1a, 0x14, 0x79, 0x3b, 0xf5, 0x80, 0xb0, 0x87, 0x7e, 0xb0, 0xaf, 0x17, - 0x57, 0xb5, 0xab, 0xf9, 0xf8, 0x5a, 0xe6, 0x7e, 0xcc, 0xc2, 0x49, 0x39, 0xe3, 0x17, 0x39, 0x28, - 0xa8, 0xc0, 0x85, 0x2a, 0x90, 0xeb, 0xb6, 0x2c, 0x1a, 0x05, 0xa4, 0x4b, 0xaa, 0x89, 0xe2, 0xc4, - 0xe3, 0x30, 0x69, 0x8b, 0x67, 0x1c, 0xca, 0xa1, 0x87, 0xa9, 0x44, 0x98, 0x99, 0xe8, 0x9a, 0x21, - 0x19, 0xed, 0x9e, 0x9b, 0x07, 0x4f, 0x79, 0x7d, 0x88, 0xae, 0xf0, 0xce, 0xd2, 0xa9, 0x9b, 0xf2, - 0x00, 0x27, 0xda, 0x42, 0xa7, 0x6e, 0xe2, 0x90, 0xc7, 0x6b, 0x08, 0xf1, 0x40, 0xf5, 0xd9, 0x89, - 0x6a, 0x08, 0xa1, 0x34, 0x9e, 0x8a, 0x78, 0xa5, 0x58, 0xea, 0x46, 0xae, 0xbc, 0x27, 0x14, 0x61, - 0x6f, 0xe6, 0x0c, 0xc2, 0xde, 0x9c, 0xba, 0x23, 0x14, 0x91, 0x2e, 0xd6, 0x8e, 0x7e, 0xaa, 0xc1, - 0x82, 0x9d, 0xbe, 0x23, 0x24, 0x54, 0xcf, 0x4f, 0x74, 0xf5, 0x34, 0x70, 0xe7, 0xa8, 0x9c, 0x63, - 0xa1, 0x36, 0x08, 0x84, 0x4f, 0x62, 0xa3, 0x5b, 0x90, 0xff, 0x96, 0x4f, 0x6b, 0x6d, 0x8b, 0x52, - 0xbd, 0x90, 0xea, 0x15, 0xf2, 0x5f, 0x79, 0xbb, 0x21, 0xe8, 0xc7, 0x87, 0x2b, 0x45, 0xd3, 0x77, - 0xa2, 0x57, 0xac, 0x06, 0x18, 0x3f, 0xd2, 0x00, 0xe2, 0xde, 0x5e, 0x5d, 0xdf, 0x69, 0xa7, 0xba, - 0xbe, 0xcb, 0xbc, 0xd0, 0xb5, 0xf4, 0x0a, 0xe4, 0x48, 0x10, 0xf8, 0x81, 0xac, 0x3e, 0x0b, 0xdc, - 0x57, 0x36, 0x39, 0x01, 0x87, 0x74, 0xe3, 0x0f, 0x53, 0x30, 0xdd, 0x20, 0x76, 0x40, 0xd8, 0xb9, - 0x94, 0x43, 0x9f, 0x81, 0x82, 0xdb, 0xe9, 0xf4, 0x98, 0xb5, 0xdb, 0x26, 0xc2, 0xf5, 0xf3, 0xa1, - 0x1b, 0xd4, 0x23, 0x22, 0x8e, 0xf9, 0x28, 0x80, 0x29, 0x31, 0xb9, 0xf0, 0x5c, 0xbe, 0x31, 0xe6, - 0xc6, 0x87, 0xd6, 0x96, 0x37, 0x2c, 0x66, 0x6d, 0x7a, 0x2c, 0xe8, 0xab, 0x7c, 0x3f, 0xc5, 0x49, - 0x3f, 0xfe, 0xcb, 0x4a, 0xae, 0xda, 0x67, 0x84, 0x62, 0x81, 0x85, 0xbe, 0xa7, 0x01, 0x50, 0x16, - 0xb8, 0x5e, 0x93, 0x73, 0x65, 0x6d, 0xbc, 0x3d, 0x19, 0x74, 0x43, 0xe9, 0x0b, 0x27, 0xa0, 0x96, - 0x28, 0x66, 0xe0, 0x04, 0x28, 0x2a, 0xcb, 0xb2, 0x2a, 0x9b, 0x8a, 0xbb, 0x51, 0x59, 0x05, 0xa1, - 0xd6, 0xb8, 0xa0, 0x5a, 0xfa, 0x12, 0x14, 0x94, 0x72, 0xf4, 0x12, 0x64, 0xf7, 0x49, 0x3f, 0x8c, - 0x80, 0x98, 0x3f, 0xa2, 0x97, 0x21, 0x77, 0x60, 0xb5, 0x7b, 0xe1, 0x45, 0xd6, 0x2c, 0x0e, 0x5f, - 0x6e, 0x66, 0x6e, 0x68, 0x4b, 0xb7, 0x61, 0x7e, 0x60, 0x6e, 0xcf, 0x1b, 0x5e, 0x48, 0x0c, 0x37, - 0x3e, 0xd6, 0x40, 0x4e, 0xe6, 0x5c, 0x4a, 0x82, 0xdd, 0x74, 0x49, 0x70, 0x7b, 0xa2, 0x4d, 0x1a, - 0x51, 0x15, 0xfc, 0x31, 0x03, 0x33, 0x32, 0xdf, 0x9d, 0xcb, 0x79, 0x71, 0x52, 0xed, 0x43, 0x75, - 0x6c, 0x13, 0x85, 0x05, 0x23, 0x5b, 0x88, 0xf6, 0x40, 0x0b, 0xb1, 0x31, 0x21, 0xce, 0xb3, 0xdb, - 0x88, 0x23, 0x0d, 0x8a, 0x52, 0xf2, 0x5c, 0xfc, 0xc6, 0x4e, 0xfb, 0xcd, 0x9d, 0xc9, 0x8c, 0x1d, - 0xe1, 0x38, 0xbf, 0x89, 0x8d, 0x3c, 0xe5, 0xbf, 0x3a, 0xe3, 0x07, 0xfd, 0x28, 0xa1, 0x64, 0x47, - 0x26, 0x14, 0x59, 0x8b, 0x89, 0xff, 0x47, 0x73, 0xe9, 0xbf, 0x54, 0x1f, 0x48, 0x3a, 0x56, 0x12, - 0xc6, 0xa3, 0xa2, 0x9a, 0xbb, 0x28, 0x87, 0x9b, 0xd1, 0xf5, 0xb6, 0x36, 0x51, 0xc3, 0x9e, 0x58, - 0x8e, 0x11, 0xff, 0x55, 0x7f, 0x07, 0xf2, 0x94, 0xb4, 0x89, 0xcd, 0xfc, 0x40, 0x6e, 0x8e, 0x39, - 0xb9, 0xc7, 0x97, 0x1b, 0x52, 0x65, 0x18, 0x7c, 0x95, 0xe1, 0x11, 0x19, 0x2b, 0x4c, 0x54, 0x81, - 0x82, 0xdd, 0xee, 0x51, 0x46, 0x82, 0xba, 0x29, 0xa3, 0xaf, 0xba, 0x59, 0xa8, 0x45, 0x0c, 0x1c, - 0xcb, 0xa0, 0x32, 0x80, 0x7a, 0xa1, 0x3a, 0x12, 0xb7, 0x43, 0x25, 0x51, 0xf6, 0x29, 0x2a, 0x4e, - 0x48, 0xa0, 0x8a, 0x8c, 0xec, 0xe1, 0x5f, 0x82, 0xff, 0x37, 0x10, 0xd9, 0xa3, 0x45, 0x4f, 0xf4, - 0xca, 0xd7, 0xa0, 0x48, 0xde, 0x67, 0x24, 0xf0, 0xac, 0x36, 0x47, 0xc8, 0x09, 0x84, 0x79, 0x5e, - 0x12, 0x6f, 0xc6, 0x64, 0x9c, 0x94, 0x41, 0x3b, 0x30, 0x4f, 0x09, 0xa5, 0xae, 0xef, 0xad, 0xef, - 0xed, 0xf1, 0xae, 0xa2, 0x2f, 0xaa, 0xb5, 0x42, 0xf5, 0xd3, 0x12, 0x6e, 0xbe, 0x91, 0x66, 0x1f, - 0x0b, 0x52, 0x58, 0xbf, 0x4b, 0x12, 0x1e, 0x54, 0x81, 0xee, 0x40, 0xa9, 0x9d, 0xfc, 0x77, 0xc0, - 0x94, 0x5d, 0x81, 0xea, 0x67, 0x52, 0xff, 0x1d, 0x98, 0x78, 0x40, 0x1a, 0x7d, 0x0d, 0xf4, 0x24, - 0xa5, 0xe1, 0xf7, 0x02, 0x9b, 0x60, 0xcb, 0x6b, 0x92, 0xf0, 0x93, 0x84, 0x42, 0xf5, 0xf2, 0xd1, - 0xe1, 0x8a, 0xbe, 0x35, 0x42, 0x06, 0x8f, 0x1c, 0x8d, 0x28, 0x2c, 0x46, 0xe6, 0xef, 0x04, 0xd6, - 0xde, 0x9e, 0x6b, 0x9b, 0x7e, 0xdb, 0xb5, 0xfb, 0xa2, 0x87, 0x28, 0x54, 0x6f, 0xcb, 0x09, 0x2e, - 0x6e, 0x0e, 0x13, 0x3a, 0x3e, 0x5c, 0xb9, 0x2c, 0x6d, 0x1f, 0xca, 0xc7, 0xc3, 0x75, 0xa3, 0x6d, - 0xb8, 0xd8, 0x22, 0x56, 0x9b, 0xb5, 0x6a, 0x2d, 0x62, 0xef, 0x47, 0x67, 0x48, 0x9f, 0x15, 0x67, - 0x2b, 0xda, 0xd7, 0x8b, 0xf7, 0x4f, 0x8a, 0xe0, 0x61, 0xe3, 0xd0, 0xcf, 0x35, 0x58, 0x1c, 0x58, - 0xf1, 0xf0, 0xd3, 0x15, 0xbd, 0x34, 0xd1, 0x17, 0x02, 0x8d, 0x61, 0x3a, 0xab, 0x97, 0xf8, 0x72, - 0x0c, 0x65, 0xe1, 0xe1, 0xb3, 0x40, 0x37, 0x01, 0xdc, 0xee, 0x3d, 0xab, 0xe3, 0xb6, 0x5d, 0x42, - 0xf5, 0x8b, 0x62, 0xbf, 0x96, 0xb8, 0x9f, 0xd7, 0xcd, 0x88, 0xca, 0x63, 0x93, 0x7c, 0xeb, 0xe3, - 0x84, 0x34, 0xda, 0x82, 0x92, 0x7c, 0xeb, 0xcb, 0x8d, 0x59, 0x10, 0x1b, 0xf3, 0x09, 0xd1, 0x05, - 0x9b, 0x49, 0xce, 0xf1, 0x09, 0x0a, 0x1e, 0x18, 0x8b, 0x6a, 0xb0, 0x90, 0xf4, 0x84, 0xb0, 0x22, - 0x5f, 0x14, 0x0a, 0x17, 0x79, 0x35, 0xbf, 0x35, 0xc8, 0xc4, 0x27, 0xe5, 0x91, 0x0f, 0x8b, 0xae, - 0x37, 0xcc, 0x65, 0x5e, 0x11, 0x8a, 0x5e, 0xe3, 0xeb, 0x53, 0xf7, 0x9e, 0xed, 0x2e, 0x43, 0xf9, - 0x78, 0xb8, 0xde, 0xa5, 0x5b, 0x30, 0x97, 0x8a, 0x42, 0x2f, 0x54, 0x66, 0x3d, 0xca, 0xf0, 0xd1, - 0x89, 0xcc, 0x8a, 0xbe, 0xaf, 0xc1, 0x6c, 0xd2, 0x2a, 0x99, 0x36, 0xeb, 0x67, 0xf0, 0xb7, 0x9f, - 0xcc, 0xdd, 0xea, 0xdb, 0x9e, 0x24, 0x0f, 0xa7, 0x40, 0x51, 0x6f, 0x48, 0xf3, 0xbc, 0x3e, 0x6e, - 0xe6, 0x3e, 0x75, 0xeb, 0x6c, 0x7c, 0xa8, 0xc1, 0x70, 0xe7, 0x45, 0x3e, 0xe4, 0x6d, 0xf9, 0xe1, - 0x97, 0x5c, 0x91, 0xb1, 0xbf, 0x24, 0x49, 0x7d, 0x3f, 0x16, 0x5e, 0xf8, 0x47, 0x34, 0xac, 0x40, - 0x8c, 0xbf, 0x6b, 0x90, 0x13, 0x37, 0xed, 0xe8, 0xd5, 0xc4, 0x7e, 0x56, 0x8b, 0xd2, 0x82, 0xec, - 0x5b, 0xa4, 0x1f, 0x6e, 0xee, 0x95, 0xd4, 0xe6, 0xc6, 0xd9, 0xef, 0x1d, 0x4e, 0x94, 0x7b, 0x8d, - 0xd6, 0x60, 0x9a, 0xec, 0xed, 0x11, 0x9b, 0xc9, 0xd4, 0xf3, 0x6a, 0x54, 0x3f, 0x6d, 0x0a, 0x2a, - 0x4f, 0x10, 0x02, 0x2c, 0x7c, 0xc5, 0x52, 0x98, 0xf7, 0xe5, 0xcc, 0xed, 0x90, 0x75, 0xc7, 0x21, - 0xce, 0x99, 0x5c, 0x47, 0x8a, 0x86, 0x6c, 0x27, 0x52, 0x89, 0x63, 0xed, 0xbc, 0x91, 0xbd, 0xc4, - 0x93, 0x93, 0xb3, 0xe5, 0xdb, 0x56, 0x3b, 0x2c, 0x58, 0x31, 0xd9, 0x23, 0x01, 0xf1, 0x6c, 0x82, - 0xae, 0x42, 0xde, 0xea, 0xba, 0x6f, 0x04, 0x7e, 0x2f, 0xba, 0x40, 0x14, 0xeb, 0xb6, 0x6e, 0xd6, - 0x05, 0x0d, 0x2b, 0x2e, 0x2f, 0x58, 0xf6, 0x5d, 0xcf, 0x91, 0xab, 0xa1, 0x0a, 0x96, 0xb7, 0x5c, - 0xcf, 0xc1, 0x82, 0xa3, 0xca, 0xa5, 0xec, 0xa8, 0x72, 0xc9, 0xb8, 0x03, 0xc5, 0xc4, 0x77, 0x67, - 0x3c, 0x75, 0x77, 0xf8, 0x83, 0x69, 0xb1, 0xd6, 0x60, 0xea, 0xde, 0x8e, 0x18, 0x38, 0x96, 0xa9, - 0x7e, 0xe3, 0xf1, 0xd3, 0xe5, 0x0b, 0x1f, 0x3d, 0x5d, 0xbe, 0xf0, 0xe4, 0xe9, 0xf2, 0x85, 0x0f, - 0x8e, 0x96, 0xb5, 0xc7, 0x47, 0xcb, 0xda, 0x47, 0x47, 0xcb, 0xda, 0x93, 0xa3, 0x65, 0xed, 0xe3, - 0xa3, 0x65, 0xed, 0x67, 0x7f, 0x5d, 0xbe, 0xf0, 0xf5, 0xb5, 0xb1, 0xbe, 0xd4, 0xfc, 0x6f, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x3f, 0xf4, 0xc9, 0x6c, 0xe1, 0x29, 0x00, 0x00, + // 2800 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0xcf, 0x6f, 0x24, 0x47, + 0xf5, 0xdf, 0x9e, 0xf1, 0xd8, 0x33, 0x6f, 0xfc, 0x23, 0x5b, 0x1b, 0x27, 0x1d, 0x7f, 0x13, 0xdb, + 0xdf, 0x5e, 0x84, 0x36, 0xfc, 0x98, 0xd1, 0x2e, 0x2c, 0x6c, 0x76, 0x93, 0x25, 0x9e, 0xb1, 0xb3, + 0x3b, 0x89, 0xbd, 0x3b, 0xd4, 0x58, 0x11, 0x82, 0x03, 0x69, 0x77, 0x97, 0xc7, 0x15, 0xcf, 0x74, + 0x37, 0x5d, 0x35, 0x9b, 0x8c, 0x40, 0x90, 0x08, 0x81, 0xc2, 0x0f, 0x09, 0xfe, 0x01, 0x2e, 0xdc, + 0x38, 0xc3, 0x8d, 0x03, 0x27, 0xa4, 0xe5, 0x16, 0x4e, 0x44, 0x08, 0x99, 0xac, 0x91, 0x90, 0x38, + 0x70, 0x40, 0x9c, 0x30, 0x12, 0x42, 0x55, 0x5d, 0x5d, 0xdd, 0x3d, 0x9e, 0xd9, 0xb5, 0x67, 0x2c, + 0x59, 0x9c, 0x66, 0xfa, 0xbd, 0x57, 0xef, 0x53, 0xaf, 0xea, 0xbd, 0x57, 0xaf, 0x5e, 0x37, 0x6c, + 0xb4, 0x29, 0xdf, 0xeb, 0xed, 0x54, 0x1c, 0xbf, 0x5b, 0x75, 0x68, 0x87, 0xf6, 0xf4, 0x4f, 0xb0, + 0xdf, 0xae, 0xee, 0xdf, 0x60, 0x55, 0xd6, 0xa1, 0x5d, 0xf9, 0xc7, 0x0e, 0x68, 0xd5, 0xf1, 0x43, + 0x52, 0x7d, 0x70, 0xb5, 0xda, 0x26, 0x1e, 0x09, 0x6d, 0x4e, 0xdc, 0x4a, 0x10, 0xfa, 0xdc, 0x47, + 0xd7, 0x13, 0x35, 0x95, 0x68, 0x7c, 0xfc, 0x13, 0xec, 0xb7, 0x2b, 0xfb, 0x37, 0x58, 0x45, 0xa8, + 0x91, 0x7f, 0xec, 0x80, 0x56, 0x84, 0x9a, 0xca, 0x83, 0xab, 0x4b, 0xaf, 0x9d, 0x0a, 0x9d, 0x55, + 0xbb, 0x84, 0xdb, 0x43, 0xe0, 0x97, 0x5e, 0x3f, 0xa5, 0x9e, 0x1e, 0xa7, 0x9d, 0x2a, 0xf5, 0x38, + 0xe3, 0xe1, 0x31, 0x5d, 0x9f, 0x4d, 0xe9, 0x6a, 0xfb, 0x6d, 0xbf, 0x2a, 0xc9, 0x3b, 0xbd, 0x5d, + 0xf9, 0x24, 0x1f, 0xe4, 0x3f, 0x25, 0x2e, 0x94, 0x56, 0xa8, 0x2f, 0xf4, 0x76, 0x6d, 0x67, 0x8f, + 0x7a, 0x24, 0xec, 0x4b, 0xd4, 0xb0, 0xe7, 0x71, 0xda, 0x25, 0xc7, 0xf4, 0x7f, 0xe1, 0x49, 0x03, + 0x98, 0xb3, 0x47, 0xba, 0xf6, 0xe0, 0x38, 0x6b, 0x13, 0xe6, 0xeb, 0x1d, 0x4a, 0x3c, 0xde, 0x68, + 0xd6, 0x7d, 0x6f, 0x97, 0xb6, 0xd1, 0x4d, 0x98, 0x17, 0x03, 0xfc, 0x1e, 0x6f, 0x11, 0xc7, 0xf7, + 0x5c, 0x66, 0x1a, 0xab, 0xc6, 0x95, 0x42, 0x0d, 0x1d, 0x1e, 0xac, 0xcc, 0x6f, 0x67, 0x38, 0x78, + 0x40, 0xd2, 0xfa, 0x75, 0x0e, 0x4a, 0x75, 0xdf, 0xe3, 0xb6, 0xc0, 0x47, 0xab, 0x30, 0xe5, 0xd9, + 0x5d, 0x22, 0xc7, 0x97, 0x6a, 0xb3, 0x0f, 0x0f, 0x56, 0x2e, 0x1c, 0x1e, 0xac, 0x4c, 0xdd, 0xb3, + 0xbb, 0x04, 0x4b, 0x0e, 0xba, 0x0c, 0x05, 0xda, 0xb5, 0xdb, 0xc4, 0xcc, 0x49, 0x91, 0x39, 0x25, + 0x52, 0x68, 0x08, 0x22, 0x8e, 0x78, 0x88, 0x42, 0x21, 0xf0, 0x43, 0xce, 0xcc, 0xe9, 0xd5, 0xfc, + 0x95, 0xf2, 0xb5, 0xf5, 0xca, 0x58, 0x5e, 0x51, 0xd1, 0xf3, 0x6a, 0xfa, 0x21, 0x4f, 0xa0, 0xc4, + 0x13, 0xc3, 0x11, 0x02, 0xfa, 0x16, 0xcc, 0x3e, 0xf0, 0x3b, 0xbd, 0x2e, 0xd9, 0xf2, 0x7b, 0x1e, + 0x67, 0x66, 0x49, 0x22, 0xd6, 0xc6, 0x44, 0x7c, 0x33, 0x51, 0x55, 0x7b, 0x5a, 0xe1, 0xcd, 0xa6, + 0x88, 0x0c, 0x67, 0xd0, 0xac, 0xff, 0x18, 0x30, 0x97, 0x99, 0xe5, 0x09, 0x56, 0xf0, 0x33, 0x50, + 0xdc, 0xf3, 0x19, 0x17, 0xd2, 0x72, 0x11, 0x0b, 0xb5, 0xa7, 0x94, 0x54, 0xf1, 0xae, 0xa2, 0x63, + 0x2d, 0x81, 0x6e, 0xc1, 0x9c, 0x93, 0x06, 0x30, 0xf3, 0x72, 0xc8, 0xa2, 0x1a, 0x92, 0x45, 0xc7, + 0x59, 0x59, 0x74, 0x03, 0x8a, 0xd2, 0x67, 0x1c, 0xbf, 0x63, 0x4e, 0xc9, 0x09, 0x3d, 0x1f, 0x43, + 0x35, 0x15, 0xfd, 0x28, 0xf5, 0x1f, 0x6b, 0x69, 0xf4, 0x49, 0x98, 0x16, 0x53, 0x68, 0x34, 0xcd, + 0x82, 0x1c, 0x37, 0xaf, 0xc6, 0x4d, 0xdf, 0x95, 0x54, 0xac, 0xb8, 0xd6, 0xf7, 0x0c, 0x98, 0xd7, + 0x53, 0x68, 0x71, 0x9b, 0x13, 0xc4, 0x60, 0x26, 0xec, 0x79, 0x1e, 0xf5, 0xda, 0xd2, 0xbc, 0xf2, + 0xb5, 0xcd, 0x49, 0xb7, 0x5f, 0xea, 0xc5, 0x91, 0xce, 0x5a, 0xf9, 0xf0, 0x60, 0x65, 0x46, 0x3d, + 0xe0, 0x18, 0xc9, 0xfa, 0xbe, 0x01, 0x8b, 0x43, 0xe5, 0x51, 0x17, 0x4a, 0x8c, 0xdb, 0x21, 0x27, + 0xee, 0x1a, 0x97, 0xbb, 0x52, 0xbe, 0xf6, 0xf2, 0xe9, 0x26, 0xc4, 0x2a, 0x22, 0xdd, 0x88, 0x19, + 0x89, 0x58, 0xaa, 0x5d, 0x54, 0x4b, 0x51, 0x6a, 0xc5, 0x6a, 0x71, 0x82, 0x60, 0xfd, 0xca, 0x80, + 0x85, 0xcc, 0x44, 0x7a, 0x0c, 0xbd, 0x0d, 0x05, 0x26, 0xa6, 0xa4, 0xd6, 0x63, 0xe3, 0x4c, 0xd6, + 0x23, 0x89, 0x87, 0xc8, 0xdc, 0x08, 0x02, 0x5d, 0x87, 0xb2, 0xf6, 0x81, 0xc6, 0xba, 0x59, 0x94, + 0xbb, 0x77, 0x49, 0x89, 0x96, 0xeb, 0x09, 0x0b, 0xa7, 0xe5, 0xc4, 0x3e, 0x2e, 0x6c, 0x78, 0x6e, + 0xe0, 0x53, 0x8f, 0xaf, 0xb9, 0x6e, 0x48, 0x18, 0x43, 0x4b, 0x90, 0xa3, 0x81, 0x72, 0x64, 0x50, + 0x1a, 0x72, 0x8d, 0x26, 0xce, 0xd1, 0x20, 0x76, 0x62, 0xe9, 0xea, 0x79, 0x29, 0x91, 0x71, 0x62, + 0x41, 0xc7, 0x5a, 0x02, 0x5d, 0x81, 0xa2, 0xe7, 0xbb, 0x44, 0x04, 0x81, 0xf2, 0xc3, 0x59, 0x21, + 0x79, 0x4f, 0xd1, 0xb0, 0xe6, 0x5a, 0xbf, 0x31, 0x60, 0x36, 0x9e, 0xc7, 0x09, 0xe3, 0x69, 0x15, + 0xa6, 0x82, 0x24, 0x96, 0xb4, 0x84, 0x8c, 0x07, 0xc9, 0xc9, 0x84, 0x41, 0xfe, 0x54, 0x61, 0x70, + 0x15, 0xca, 0x76, 0x10, 0x34, 0xb3, 0x31, 0xb4, 0x20, 0x56, 0x72, 0x2d, 0x21, 0xe3, 0xb4, 0x8c, + 0xf5, 0x4f, 0x03, 0xe6, 0x63, 0x0b, 0x5a, 0xbd, 0x1d, 0x46, 0x38, 0x7a, 0x07, 0x4a, 0x76, 0xb4, + 0xa6, 0x44, 0xa4, 0x66, 0x91, 0xa0, 0x5e, 0x1b, 0xd3, 0x07, 0x06, 0xf6, 0x28, 0x71, 0xc6, 0xb5, + 0x18, 0x00, 0x27, 0x58, 0x68, 0x2f, 0xce, 0xc3, 0x79, 0x09, 0x5a, 0x9f, 0x10, 0x74, 0x74, 0x1a, + 0xb6, 0xfe, 0x61, 0x40, 0x29, 0x16, 0x63, 0x28, 0x84, 0xa2, 0x08, 0x19, 0xd7, 0xe6, 0xb6, 0x0a, + 0xb9, 0xda, 0xb8, 0x21, 0x77, 0x7f, 0xe7, 0x6d, 0xe2, 0xf0, 0x2d, 0xc2, 0xed, 0x1a, 0x52, 0xc8, + 0x90, 0xd0, 0xb0, 0xc6, 0x41, 0x01, 0xcc, 0x30, 0xb9, 0xdc, 0xcc, 0xcc, 0x49, 0x6b, 0x37, 0x26, + 0xb4, 0x36, 0xda, 0xbc, 0xda, 0x82, 0x42, 0x9d, 0x89, 0x9e, 0x19, 0x8e, 0x61, 0xac, 0xbf, 0x1a, + 0x30, 0xa7, 0x6d, 0xde, 0xa4, 0x8c, 0x23, 0xef, 0x98, 0xdd, 0xaf, 0x8e, 0x6b, 0xb7, 0xd0, 0x27, + 0xad, 0xd6, 0x71, 0x15, 0x53, 0x52, 0x36, 0x13, 0x28, 0x50, 0x4e, 0xba, 0xb1, 0xc5, 0xaf, 0x4e, + 0x68, 0x31, 0x4b, 0x1d, 0xe7, 0x42, 0x2d, 0x8e, 0xb4, 0x5b, 0xef, 0xe7, 0xe0, 0xd2, 0xa6, 0x6f, + 0xbb, 0x35, 0xbb, 0x63, 0x7b, 0x0e, 0x09, 0x1b, 0x5e, 0xfb, 0x54, 0x09, 0x22, 0xf7, 0xc4, 0x04, + 0x71, 0x03, 0xa6, 0x69, 0xb0, 0xe5, 0xbb, 0x71, 0x32, 0x59, 0x15, 0x47, 0x4d, 0xa3, 0x29, 0x28, + 0x47, 0x07, 0x2b, 0x28, 0x03, 0x2e, 0xa9, 0x58, 0xc9, 0xa3, 0xdd, 0xd8, 0xc5, 0xa7, 0xe4, 0x12, + 0xac, 0x8d, 0xb9, 0x04, 0xc2, 0x99, 0xa3, 0x6c, 0x3d, 0xc2, 0xc1, 0x7f, 0x64, 0x40, 0x66, 0x1a, + 0x2a, 0xb5, 0xf7, 0x60, 0x86, 0x46, 0xab, 0xa1, 0x02, 0xfb, 0xf5, 0x31, 0x27, 0x30, 0x64, 0x7d, + 0x13, 0xd7, 0x53, 0x04, 0x1c, 0x63, 0x59, 0xdf, 0x81, 0x92, 0xc8, 0x80, 0x2c, 0xb0, 0x1d, 0x72, + 0x1e, 0xd1, 0x26, 0x7d, 0x5f, 0xcf, 0xe0, 0x7f, 0xd9, 0xf7, 0xb5, 0x11, 0x23, 0x7c, 0xff, 0x61, + 0x0e, 0xa6, 0xc4, 0x39, 0x75, 0x2e, 0x39, 0xcd, 0x86, 0x29, 0x16, 0x10, 0x47, 0xd5, 0x0d, 0x5f, + 0x1a, 0xd7, 0x44, 0xdf, 0x25, 0xad, 0x80, 0x38, 0xc9, 0xd9, 0x28, 0x9e, 0xb0, 0x54, 0x8d, 0x28, + 0x4c, 0x33, 0xe9, 0xca, 0x32, 0xf2, 0xc6, 0x0f, 0x20, 0x09, 0x12, 0x05, 0x90, 0xae, 0x15, 0xa3, + 0x67, 0xac, 0x00, 0xac, 0x2e, 0x94, 0x85, 0x54, 0x5c, 0x5e, 0x7c, 0x0e, 0xa6, 0x78, 0x3f, 0x88, + 0x4f, 0xf6, 0x95, 0x78, 0x6e, 0xdb, 0xfd, 0x40, 0xc4, 0xfc, 0x42, 0x4a, 0x54, 0x90, 0xb0, 0x14, + 0x46, 0x2f, 0xc2, 0x8c, 0x3a, 0xde, 0x54, 0x56, 0xd1, 0x31, 0xa2, 0x64, 0x71, 0xcc, 0xb7, 0x7e, + 0x21, 0x5c, 0xd4, 0x77, 0x49, 0xdd, 0xf7, 0x5c, 0xca, 0xa9, 0xef, 0xa1, 0xeb, 0x19, 0xc4, 0xff, + 0x1f, 0x40, 0xbc, 0x98, 0x11, 0x4e, 0x61, 0xbe, 0xa4, 0x97, 0x28, 0x97, 0x19, 0xa8, 0xec, 0x13, + 0x93, 0xd5, 0xc3, 0xb2, 0x26, 0x8b, 0x32, 0x3a, 0x24, 0x36, 0xf3, 0xbd, 0xc1, 0x32, 0x1a, 0x4b, + 0x2a, 0x56, 0x5c, 0xeb, 0x4f, 0x06, 0xc8, 0x6a, 0xe8, 0x5c, 0x22, 0xe9, 0xad, 0x6c, 0x24, 0xdd, + 0x9a, 0xc0, 0x03, 0x46, 0x04, 0xd1, 0xbf, 0x94, 0x79, 0xc2, 0xef, 0xc4, 0x16, 0x06, 0xbe, 0x5b, + 0x6f, 0xac, 0x63, 0xb5, 0x11, 0x7a, 0x0b, 0x9b, 0x11, 0x19, 0xc7, 0x7c, 0x51, 0x37, 0xaa, 0xbf, + 0xcc, 0x9c, 0x59, 0xcd, 0xc7, 0x75, 0xa3, 0x92, 0x63, 0x58, 0x73, 0xd1, 0x35, 0x80, 0x20, 0xf4, + 0x1f, 0x50, 0x57, 0x56, 0xbd, 0xd1, 0x21, 0xa2, 0x63, 0xab, 0xa9, 0x39, 0x38, 0x25, 0x85, 0x5c, + 0x98, 0x16, 0xf5, 0x2f, 0x67, 0x66, 0x41, 0x1a, 0xfe, 0xf2, 0x98, 0x86, 0x6f, 0x0b, 0x25, 0xc9, + 0xd6, 0xca, 0x47, 0x86, 0x95, 0x6e, 0xeb, 0xdf, 0x06, 0x40, 0x12, 0x1c, 0xe8, 0x5d, 0x00, 0x27, + 0x76, 0x96, 0xf8, 0xd0, 0x5a, 0x9f, 0x60, 0xc5, 0xb5, 0xe7, 0x25, 0xe6, 0x6a, 0x12, 0xc3, 0x29, + 0x2c, 0xc4, 0xd2, 0x55, 0x68, 0x61, 0xa2, 0x6b, 0x72, 0x2a, 0x36, 0x1f, 0x5f, 0x81, 0x5a, 0xbf, + 0xcd, 0x41, 0xbe, 0xe9, 0xbb, 0xe7, 0x92, 0x3d, 0xdf, 0xca, 0x64, 0xcf, 0xdb, 0x63, 0x57, 0x06, + 0xee, 0xc8, 0xe4, 0xb9, 0x37, 0x90, 0x3c, 0x5f, 0x9d, 0x00, 0xe3, 0xf1, 0xb9, 0xf3, 0xa3, 0x3c, + 0xcc, 0x0a, 0xb7, 0xd7, 0xb9, 0xec, 0xf3, 0x99, 0x5c, 0xb6, 0x3a, 0x90, 0xcb, 0x9e, 0x4a, 0xcb, + 0x9e, 0x4d, 0x2a, 0xeb, 0xc3, 0x5c, 0xc7, 0x66, 0xbc, 0x19, 0xfa, 0x3b, 0x44, 0xdc, 0x83, 0x95, + 0xc9, 0x93, 0xdd, 0xa5, 0x75, 0x1b, 0x63, 0x33, 0xad, 0x1a, 0x67, 0x91, 0xd0, 0x07, 0x06, 0x20, + 0x41, 0xd9, 0x0e, 0x6d, 0x8f, 0x45, 0x26, 0x51, 0x75, 0x93, 0x9c, 0x74, 0x02, 0x4b, 0x6a, 0x02, + 0x68, 0xf3, 0x98, 0x7e, 0x3c, 0x04, 0xf3, 0xa4, 0x09, 0x5d, 0x24, 0xb9, 0x2e, 0x61, 0xcc, 0x6e, + 0x13, 0x73, 0x3a, 0x9b, 0xe4, 0xb6, 0x22, 0x32, 0x8e, 0xf9, 0xd6, 0x65, 0x28, 0x34, 0x7d, 0xb7, + 0xd1, 0x7c, 0x5c, 0x39, 0x6d, 0xfd, 0xd1, 0x00, 0x91, 0x1e, 0xcf, 0xe5, 0x7c, 0xf8, 0x7a, 0xf6, + 0x7c, 0xb8, 0x39, 0xbe, 0x93, 0x8f, 0x38, 0x1e, 0x1c, 0x10, 0xfe, 0x8a, 0x89, 0xed, 0x52, 0x8f, + 0x30, 0x76, 0xc7, 0xe6, 0x04, 0xdd, 0x97, 0x7d, 0xaf, 0xc4, 0x81, 0xd5, 0xba, 0xbc, 0x98, 0xea, + 0x7b, 0x25, 0xcc, 0xa1, 0x1e, 0x9f, 0x1d, 0x6f, 0xfd, 0x32, 0x2f, 0x57, 0x50, 0x1e, 0x41, 0xef, + 0x19, 0x30, 0x4f, 0x3d, 0xca, 0x75, 0x3b, 0x84, 0x99, 0x4f, 0x4f, 0x54, 0x45, 0x6a, 0x45, 0xb5, + 0x67, 0xd4, 0x04, 0xe7, 0x1b, 0x19, 0xfd, 0x78, 0x00, 0x0f, 0x71, 0x79, 0x0e, 0xc4, 0xe8, 0xb9, + 0x33, 0x42, 0x4f, 0x9f, 0x01, 0x31, 0x72, 0x0a, 0x07, 0xbd, 0x0e, 0x88, 0x91, 0xf0, 0x01, 0x75, + 0xc8, 0x9a, 0xe3, 0xf8, 0x3d, 0x8f, 0xcb, 0x96, 0x4c, 0xd4, 0x24, 0xd2, 0xa1, 0xd0, 0x3a, 0x26, + 0x81, 0x87, 0x8c, 0x12, 0x37, 0x3c, 0xdd, 0xd4, 0x81, 0xec, 0x0d, 0xef, 0x78, 0x63, 0x07, 0x5d, + 0x87, 0xb2, 0xb8, 0xed, 0xdd, 0x23, 0xfc, 0x1d, 0x3f, 0xdc, 0x37, 0xcb, 0xab, 0xc6, 0x95, 0x62, + 0xd2, 0x97, 0xba, 0x9b, 0xb0, 0x70, 0x5a, 0xce, 0xfa, 0x79, 0x01, 0x4a, 0x3a, 0x3b, 0xa2, 0x2a, + 0x14, 0x82, 0x3d, 0x9b, 0xc5, 0xce, 0xf0, 0x9c, 0xbe, 0xa9, 0x09, 0xe2, 0x51, 0x54, 0x19, 0xc8, + 0xff, 0x38, 0x92, 0x43, 0xef, 0x64, 0x4e, 0xdb, 0xdc, 0x44, 0x5d, 0x90, 0xb4, 0x83, 0x3d, 0xf1, + 0xb0, 0x3d, 0x61, 0xff, 0x14, 0x5d, 0x16, 0xd7, 0x57, 0xb7, 0xd1, 0x54, 0x59, 0x22, 0x75, 0xf7, + 0x74, 0x1b, 0x4d, 0x1c, 0xf1, 0x44, 0xa1, 0x22, 0xff, 0x30, 0x73, 0x76, 0xa2, 0x42, 0x45, 0x2a, + 0x4d, 0xa6, 0x22, 0x1f, 0x19, 0x56, 0xba, 0x11, 0x55, 0x8d, 0x52, 0x99, 0x5b, 0x67, 0xce, 0x20, + 0xb7, 0xce, 0xe9, 0x26, 0xa9, 0x4c, 0xa7, 0x89, 0x76, 0xf4, 0x13, 0x03, 0x2e, 0x3a, 0xd9, 0x26, + 0x29, 0x61, 0x66, 0x71, 0xa2, 0xce, 0xd8, 0x40, 0xd3, 0x55, 0x3b, 0xc7, 0xc5, 0xfa, 0x20, 0x10, + 0x3e, 0x8e, 0x8d, 0x6e, 0x41, 0xf1, 0x1b, 0x3e, 0xab, 0x77, 0x6c, 0xc6, 0xcc, 0x52, 0xe6, 0x42, + 0x52, 0xfc, 0xf2, 0xfd, 0x96, 0xa4, 0x1f, 0x1d, 0xac, 0x94, 0x9b, 0xbe, 0x1b, 0x3f, 0x62, 0x3d, + 0xc0, 0xfa, 0x81, 0x01, 0x90, 0x34, 0x10, 0x74, 0x43, 0xd2, 0x38, 0x51, 0x43, 0x32, 0x77, 0xaa, + 0x86, 0xe4, 0x0a, 0x14, 0x48, 0x18, 0xfa, 0xa1, 0x2a, 0x71, 0x4b, 0xc2, 0x57, 0x36, 0x04, 0x01, + 0x47, 0x74, 0xeb, 0x77, 0x53, 0x30, 0xdd, 0x22, 0x4e, 0x48, 0xf8, 0xb9, 0xd4, 0x5c, 0x9f, 0x86, + 0x12, 0xed, 0x76, 0x7b, 0xdc, 0xde, 0xe9, 0x10, 0xe9, 0xfa, 0xc5, 0xc8, 0x0d, 0x1a, 0x31, 0x11, + 0x27, 0x7c, 0x14, 0xc2, 0x94, 0x9c, 0x5c, 0x14, 0x97, 0x77, 0xc6, 0xdc, 0xf8, 0xc8, 0xda, 0xca, + 0xba, 0xcd, 0xed, 0x0d, 0x8f, 0x87, 0x7d, 0x5d, 0x54, 0x4c, 0x09, 0xd2, 0x0f, 0xff, 0xbc, 0x52, + 0xa8, 0xf5, 0x39, 0x61, 0x58, 0x62, 0xa1, 0xf7, 0x0d, 0x00, 0xc6, 0x43, 0xea, 0xb5, 0x05, 0x57, + 0x15, 0xe0, 0x5b, 0x93, 0x41, 0xb7, 0xb4, 0xbe, 0x68, 0x02, 0x7a, 0x89, 0x12, 0x06, 0x4e, 0x81, + 0xa2, 0x8a, 0xaa, 0xdd, 0xf2, 0x99, 0xbc, 0x1b, 0xd7, 0x6e, 0x10, 0x69, 0x4d, 0xaa, 0xb6, 0xa5, + 0x2f, 0x42, 0x49, 0x2b, 0x47, 0x4f, 0x41, 0x7e, 0x9f, 0xf4, 0xa3, 0x0c, 0x88, 0xc5, 0x5f, 0xf4, + 0x34, 0x14, 0x1e, 0xd8, 0x9d, 0x5e, 0xd4, 0x67, 0x9b, 0xc5, 0xd1, 0xc3, 0xcd, 0xdc, 0x0d, 0x63, + 0xe9, 0x15, 0x58, 0x18, 0x98, 0xdb, 0x93, 0x86, 0x97, 0x52, 0xc3, 0xad, 0x8f, 0x0d, 0x50, 0x93, + 0x39, 0x97, 0xba, 0x63, 0x27, 0x5b, 0x77, 0xbc, 0x32, 0xd1, 0x26, 0x8d, 0x28, 0x3d, 0x7e, 0x9f, + 0x83, 0x19, 0x75, 0xde, 0x9d, 0x4b, 0xbc, 0xb8, 0x99, 0x3b, 0x4a, 0x6d, 0x6c, 0x13, 0xa5, 0x05, + 0x23, 0xef, 0x29, 0x9d, 0x81, 0x7b, 0xca, 0xfa, 0x84, 0x38, 0x8f, 0xbf, 0xab, 0x1c, 0x1a, 0x50, + 0x56, 0x92, 0xe7, 0xe2, 0x37, 0x4e, 0xd6, 0x6f, 0x6e, 0x4f, 0x66, 0xec, 0x08, 0xc7, 0xf9, 0x5b, + 0x4e, 0x1b, 0x79, 0xc2, 0xf7, 0x54, 0xe3, 0x27, 0xfd, 0x81, 0xb7, 0x50, 0xd3, 0x4f, 0x7e, 0x0b, + 0xa5, 0xcf, 0xa0, 0xfc, 0xc8, 0x33, 0xe8, 0x9b, 0x00, 0xdc, 0x0e, 0xdb, 0x24, 0x7a, 0x11, 0x1d, + 0xdd, 0xa5, 0xee, 0x9c, 0x76, 0x5f, 0x7a, 0x9c, 0x76, 0x2a, 0xd1, 0xf7, 0x13, 0x95, 0x86, 0xc7, + 0xef, 0x87, 0x51, 0xba, 0x49, 0x9c, 0x7e, 0x5b, 0x43, 0xe0, 0x14, 0x5c, 0x5c, 0x3b, 0x4a, 0xe8, + 0x42, 0xf6, 0x1d, 0xf8, 0x3d, 0x45, 0xc7, 0x5a, 0xc2, 0xfa, 0x43, 0x59, 0xaf, 0xb5, 0x2c, 0xdf, + 0xdb, 0x71, 0xcf, 0xdf, 0x98, 0xa8, 0x8b, 0x91, 0xda, 0xbe, 0x11, 0x1f, 0x17, 0x7c, 0x1b, 0x8a, + 0x8c, 0x74, 0x88, 0xc3, 0xfd, 0x50, 0x39, 0x53, 0x73, 0xf2, 0x08, 0xad, 0xb4, 0x94, 0xca, 0xe8, + 0xb0, 0xd0, 0x86, 0xc7, 0x64, 0xac, 0x31, 0x51, 0x15, 0x4a, 0x4e, 0xa7, 0xc7, 0x38, 0x09, 0x1b, + 0x4d, 0x75, 0x5a, 0xe8, 0x76, 0x4b, 0x3d, 0x66, 0xe0, 0x44, 0x06, 0x55, 0x00, 0xf4, 0x03, 0x33, + 0x91, 0x6c, 0x99, 0xcd, 0xcb, 0x32, 0x55, 0x53, 0x71, 0x4a, 0x02, 0x55, 0xd5, 0x49, 0x14, 0xbd, + 0xd8, 0xfc, 0xbf, 0x81, 0x93, 0x28, 0x5e, 0xf4, 0x54, 0x03, 0xe1, 0x2a, 0x94, 0xc9, 0xbb, 0x9c, + 0x84, 0x9e, 0xdd, 0x11, 0x08, 0x05, 0x89, 0x20, 0x5d, 0x71, 0x23, 0x21, 0xe3, 0xb4, 0x0c, 0xda, + 0x86, 0x05, 0x46, 0x18, 0xa3, 0xbe, 0xb7, 0xb6, 0xbb, 0x2b, 0x6e, 0x41, 0x7d, 0x59, 0x5d, 0x96, + 0x6a, 0x9f, 0x52, 0x70, 0x0b, 0xad, 0x2c, 0xfb, 0x48, 0x92, 0xa2, 0xfb, 0x86, 0x22, 0xe1, 0x41, + 0x15, 0xe8, 0x36, 0xcc, 0x77, 0x32, 0x6f, 0x85, 0xd4, 0x2d, 0x46, 0xdf, 0xbf, 0xb2, 0xef, 0x8c, + 0xf0, 0x80, 0x34, 0xfa, 0x0a, 0x98, 0x69, 0x4a, 0xcb, 0xef, 0x85, 0x0e, 0xc1, 0xb6, 0xd7, 0x26, + 0xd1, 0x37, 0x24, 0xa5, 0xda, 0xf3, 0x87, 0x07, 0x2b, 0xe6, 0xe6, 0x08, 0x19, 0x3c, 0x72, 0x34, + 0x62, 0xb0, 0x18, 0x9b, 0xbf, 0x1d, 0xda, 0xbb, 0xbb, 0xd4, 0x69, 0xfa, 0x1d, 0xea, 0xf4, 0xe5, + 0x9d, 0xa7, 0x54, 0x7b, 0x45, 0x4d, 0x70, 0x71, 0x63, 0x98, 0xd0, 0xd1, 0xc1, 0xca, 0xf3, 0xca, + 0xf6, 0xa1, 0x7c, 0x3c, 0x5c, 0x37, 0xda, 0x82, 0x4b, 0x7b, 0xc4, 0xee, 0xf0, 0xbd, 0xfa, 0x1e, + 0x71, 0xf6, 0xe3, 0x18, 0x32, 0x67, 0x65, 0x6c, 0xc5, 0xfb, 0x7a, 0xe9, 0xee, 0x71, 0x11, 0x3c, + 0x6c, 0x1c, 0xfa, 0x99, 0x01, 0x8b, 0x03, 0x2b, 0x1e, 0x7d, 0x6b, 0x64, 0xce, 0x4f, 0xf4, 0x49, + 0x47, 0x6b, 0x98, 0xce, 0xda, 0x73, 0x62, 0x39, 0x86, 0xb2, 0xf0, 0xf0, 0x59, 0xa0, 0x9b, 0x00, + 0x34, 0x78, 0xcd, 0xee, 0xd2, 0x0e, 0x25, 0xcc, 0xbc, 0x24, 0xf7, 0x6b, 0x49, 0xf8, 0x79, 0xa3, + 0x19, 0x53, 0x45, 0x2e, 0x55, 0x4f, 0x7d, 0x9c, 0x92, 0x46, 0x9b, 0x30, 0xaf, 0x9e, 0xfa, 0x6a, + 0x63, 0x2e, 0xca, 0x8d, 0xf9, 0x84, 0xbc, 0xb5, 0x37, 0xd3, 0x9c, 0xa3, 0x63, 0x14, 0x3c, 0x30, + 0x16, 0xd5, 0xe1, 0x62, 0xda, 0x13, 0xa2, 0x1b, 0xc4, 0xa2, 0x54, 0xb8, 0x28, 0x6e, 0x1f, 0x9b, + 0x83, 0x4c, 0x7c, 0x5c, 0x1e, 0xf9, 0xb0, 0x48, 0xbd, 0x61, 0x2e, 0xf3, 0x8c, 0x54, 0xf4, 0x92, + 0x58, 0x9f, 0x86, 0xf7, 0x78, 0x77, 0x19, 0xca, 0xc7, 0xc3, 0xf5, 0xa2, 0x06, 0x5c, 0xe2, 0x11, + 0x61, 0x9d, 0x8a, 0xc2, 0x74, 0xa7, 0x27, 0xae, 0xad, 0xe6, 0xb3, 0x12, 0xee, 0x59, 0xe1, 0x2a, + 0xdb, 0xc7, 0xd9, 0x78, 0xd8, 0x98, 0xa5, 0x5b, 0x30, 0x97, 0x49, 0x68, 0xa7, 0xaa, 0x30, 0x7f, + 0x9c, 0x13, 0xa3, 0x53, 0x45, 0x05, 0xfa, 0xae, 0x01, 0xb3, 0xe9, 0x05, 0x52, 0x15, 0x43, 0xe3, + 0x0c, 0x5e, 0xab, 0xaa, 0xb2, 0x45, 0x7f, 0xd7, 0x95, 0xe6, 0xe1, 0x0c, 0x28, 0xea, 0x0d, 0xe9, + 0x1b, 0xac, 0x8d, 0x5b, 0xb4, 0x9c, 0xb8, 0x6b, 0x60, 0x7d, 0x60, 0xc0, 0xf0, 0x38, 0x40, 0x3e, + 0x14, 0x1d, 0xf5, 0xd1, 0x9f, 0x5a, 0x91, 0xb1, 0xbf, 0x22, 0xca, 0x7c, 0x3b, 0x18, 0xbd, 0x50, + 0x89, 0x69, 0x58, 0x83, 0x58, 0x7f, 0x37, 0xa0, 0x20, 0xdf, 0x64, 0xa0, 0x17, 0x52, 0xfb, 0x59, + 0x2b, 0x2b, 0x0b, 0xf2, 0x6f, 0x90, 0x7e, 0xb4, 0xb9, 0x97, 0x33, 0x9b, 0x9b, 0x1c, 0xa4, 0x6f, + 0x0a, 0xa2, 0xda, 0x6b, 0x74, 0x1d, 0xa6, 0xc9, 0xee, 0x2e, 0x71, 0xb8, 0x3a, 0xc5, 0x5e, 0x88, + 0x4b, 0xc7, 0x0d, 0x49, 0x15, 0x67, 0x8d, 0x04, 0x8b, 0x1e, 0xb1, 0x12, 0x46, 0x14, 0x4a, 0x9c, + 0x76, 0xc9, 0x9a, 0xeb, 0x12, 0xf7, 0x4c, 0xda, 0xbd, 0xf2, 0x2e, 0xba, 0x1d, 0xab, 0xc4, 0x89, + 0x76, 0x71, 0x87, 0x7f, 0x4e, 0x9c, 0x73, 0xee, 0xa6, 0xef, 0xd8, 0x9d, 0xa8, 0x56, 0xc7, 0x64, + 0x97, 0x84, 0xc4, 0x73, 0xe4, 0x07, 0x4c, 0x76, 0x40, 0xef, 0x84, 0x7e, 0x2f, 0x6e, 0xd0, 0xca, + 0x75, 0x5b, 0x6b, 0x36, 0x24, 0x0d, 0x6b, 0xae, 0x28, 0xbc, 0xf6, 0xa9, 0xe7, 0xaa, 0xd5, 0xd0, + 0x85, 0xd7, 0x1b, 0xd4, 0x73, 0xb1, 0xe4, 0xe8, 0x4a, 0x31, 0x3f, 0xaa, 0x52, 0xb4, 0x6e, 0x43, + 0x39, 0xf5, 0xcd, 0xa1, 0xa8, 0x02, 0xba, 0xe2, 0x4f, 0xd3, 0xe6, 0x7b, 0x83, 0x55, 0xc0, 0x56, + 0xcc, 0xc0, 0x89, 0x4c, 0xed, 0x6b, 0x0f, 0x1f, 0x2d, 0x5f, 0xf8, 0xf0, 0xd1, 0xf2, 0x85, 0x8f, + 0x1e, 0x2d, 0x5f, 0x78, 0xef, 0x70, 0xd9, 0x78, 0x78, 0xb8, 0x6c, 0x7c, 0x78, 0xb8, 0x6c, 0x7c, + 0x74, 0xb8, 0x6c, 0x7c, 0x7c, 0xb8, 0x6c, 0xfc, 0xf4, 0x2f, 0xcb, 0x17, 0xbe, 0x7a, 0x7d, 0xac, + 0x2f, 0x7e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x40, 0xb2, 0x39, 0xc1, 0x29, 0x2c, 0x00, 0x00, } func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { @@ -1609,6 +1648,11 @@ func (m *EndpointAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1a i -= len(m.IP) copy(dAtA[i:], m.IP) i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) @@ -1637,6 +1681,13 @@ func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AppProtocol != nil { + i -= len(*m.AppProtocol) + copy(dAtA[i:], *m.AppProtocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) + i-- + dAtA[i] = 0x22 + } i -= len(m.Protocol) copy(dAtA[i:], m.Protocol) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) @@ -1832,6 +1883,13 @@ func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x22 } } + if m.IPMode != nil { + i -= len(*m.IPMode) + copy(dAtA[i:], *m.IPMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPMode))) + i-- + dAtA[i] = 0x1a + } i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) @@ -2431,6 +2489,34 @@ func (m *PodList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ConditionType) + copy(dAtA[i:], m.ConditionType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *PodSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2902,9 +2988,26 @@ func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.AppProtocol != nil { + i -= len(*m.AppProtocol) + copy(dAtA[i:], *m.AppProtocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) + i-- + dAtA[i] = 0x32 + } i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) i-- dAtA[i] = 0x28 + { + size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) i-- dAtA[i] = 0x18 @@ -2941,6 +3044,15 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.TrafficDistribution != nil { + i -= len(*m.TrafficDistribution) + copy(dAtA[i:], *m.TrafficDistribution) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.TrafficDistribution))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } if m.InternalTrafficPolicy != nil { i -= len(*m.InternalTrafficPolicy) copy(dAtA[i:], *m.InternalTrafficPolicy) @@ -3399,6 +3511,8 @@ func (m *EndpointAddress) Size() (n int) { _ = l l = len(m.IP) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) if m.NodeName != nil { l = len(*m.NodeName) n += 1 + l + sovGenerated(uint64(l)) @@ -3417,6 +3531,10 @@ func (m *EndpointPort) Size() (n int) { n += 1 + sovGenerated(uint64(m.Port)) l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) + if m.AppProtocol != nil { + l = len(*m.AppProtocol) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3485,6 +3603,10 @@ func (m *LoadBalancerIngress) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Hostname) n += 1 + l + sovGenerated(uint64(l)) + if m.IPMode != nil { + l = len(*m.IPMode) + n += 1 + l + sovGenerated(uint64(l)) + } if len(m.Ports) > 0 { for _, e := range m.Ports { l = e.Size() @@ -3707,6 +3829,17 @@ func (m *PodList) Size() (n int) { return n } +func (m *PodReadinessGate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConditionType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *PodSpec) Size() (n int) { if m == nil { return 0 @@ -3884,7 +4017,13 @@ func (m *ServicePort) Size() (n int) { l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) n += 1 + sovGenerated(uint64(m.NodePort)) + if m.AppProtocol != nil { + l = len(*m.AppProtocol) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -3959,6 +4098,10 @@ func (m *ServiceSpec) Size() (n int) { l = len(*m.InternalTrafficPolicy) n += 2 + l + sovGenerated(uint64(l)) } + if m.TrafficDistribution != nil { + l = len(*m.TrafficDistribution) + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -4129,6 +4272,7 @@ func (this *EndpointAddress) String() string { } s := strings.Join([]string{`&EndpointAddress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, `}`, }, "") @@ -4142,6 +4286,7 @@ func (this *EndpointPort) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Port:` + fmt.Sprintf("%v", this.Port) + `,`, `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, `}`, }, "") return s @@ -4211,6 +4356,7 @@ func (this *LoadBalancerIngress) String() string { s := strings.Join([]string{`&LoadBalancerIngress{`, `IP:` + fmt.Sprintf("%v", this.IP) + `,`, `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `IPMode:` + valueToStringGenerated(this.IPMode) + `,`, `Ports:` + repeatedStringForPorts + `,`, `}`, }, "") @@ -4400,6 +4546,16 @@ func (this *PodList) String() string { }, "") return s } +func (this *PodReadinessGate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodReadinessGate{`, + `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, + `}`, + }, "") + return s +} func (this *PodSpec) String() string { if this == nil { return "nil" @@ -4554,7 +4710,9 @@ func (this *ServicePort) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, `}`, }, "") return s @@ -4595,6 +4753,7 @@ func (this *ServiceSpec) String() string { `IPFamilies:` + fmt.Sprintf("%v", this.IPFamilies) + `,`, `LoadBalancerClass:` + valueToStringGenerated(this.LoadBalancerClass) + `,`, `InternalTrafficPolicy:` + valueToStringGenerated(this.InternalTrafficPolicy) + `,`, + `TrafficDistribution:` + valueToStringGenerated(this.TrafficDistribution) + `,`, `}`, }, "") return s @@ -5449,6 +5608,38 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) @@ -5615,6 +5806,39 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } m.Protocol = Protocol(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.AppProtocol = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6081,6 +6305,39 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := LoadBalancerIPMode(dAtA[iNdEx:postIndex]) + m.IPMode = &s + iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) @@ -7836,6 +8093,88 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } return nil } +func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodReadinessGate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodReadinessGate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConditionType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConditionType = PodConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PodSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -9374,6 +9713,39 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { break } } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) @@ -9393,6 +9765,39 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.AppProtocol = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10046,6 +10451,39 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { s := ServiceInternalTrafficPolicy(dAtA[iNdEx:postIndex]) m.InternalTrafficPolicy = &s iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrafficDistribution", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.TrafficDistribution = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto index 719483287e..f913f32936 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto @@ -9,6 +9,7 @@ syntax = "proto2"; package github.com.cilium.cilium.pkg.k8s.slim.k8s.api.core.v1; import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto"; +import "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -104,7 +105,7 @@ message ContainerState { message ContainerStateRunning { // Time at which the container was last (re-)started // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time startedAt = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time startedAt = 1; } // ContainerStatus contains details for the current status of this container. @@ -128,6 +129,10 @@ message EndpointAddress { // or link-local multicast (224.0.0.0/24 or ff02::/16). optional string ip = 1; + // The Hostname of this endpoint + // +optional + optional string hostname = 3; + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. // +optional optional string nodeName = 4; @@ -151,6 +156,24 @@ message EndpointPort { // Default is TCP. // +optional optional string protocol = 3; + + // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. + // This field follows standard Kubernetes label syntax. + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per + // RFC-6335 and https://www.iana.org/assignments/service-names). + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as + // mycompany.com/my-custom-protocol. + // +optional + optional string appProtocol = 4; } // EndpointSubset is a group of addresses with a common set of ports. The @@ -170,10 +193,12 @@ message EndpointSubset { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. // +optional + // +listType=atomic repeated EndpointAddress addresses = 1; // Port numbers available on the related IP addresses. // +optional + // +listType=atomic repeated EndpointPort ports = 3; } @@ -194,7 +219,7 @@ message Endpoints { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; // The set of all endpoints is the union of all subsets. Addresses are placed into // subsets according to the IPs they share. A single address with multiple ports, @@ -204,6 +229,7 @@ message Endpoints { // NotReadyAddresses in the same subset. // Sets of addresses and ports that comprise a service. // +optional + // +listType=atomic repeated EndpointSubset subsets = 2; } @@ -212,7 +238,7 @@ message EndpointsList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; // List of endpoints. repeated Endpoints items = 2; @@ -231,6 +257,15 @@ message LoadBalancerIngress { // +optional optional string hostname = 2; + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + optional string ipMode = 3; + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -243,6 +278,7 @@ message LoadBalancerStatus { // Ingress is a list containing ingress points for the load-balancer. // Traffic intended for the service should be sent to these ingress points. // +optional + // +listType=atomic repeated LoadBalancerIngress ingress = 1; } @@ -252,7 +288,7 @@ message Namespace { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; } // NamespaceList is a list of Namespaces. @@ -260,7 +296,7 @@ message NamespaceList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; // Items is the list of Namespace objects in the list. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ @@ -273,7 +309,7 @@ message Node { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -315,7 +351,7 @@ message NodeList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; // List of nodes repeated Node items = 2; @@ -332,6 +368,7 @@ message NodeSpec { // each of IPv4 and IPv6. // +optional // +patchStrategy=merge + // +listType=set repeated string podCIDRs = 7; // ID of the node assigned by the cloud provider in the format: :// @@ -340,6 +377,7 @@ message NodeSpec { // If specified, the node's taints. // +optional + // +listType=atomic repeated Taint taints = 5; } @@ -350,6 +388,8 @@ message NodeStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated NodeCondition conditions = 4; // List of addresses reachable to the node. @@ -365,6 +405,8 @@ message NodeStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated NodeAddress addresses = 5; } @@ -374,7 +416,7 @@ message Pod { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -403,11 +445,11 @@ message PodCondition { // Last time we probed the condition. // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time lastProbeTime = 3; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time lastProbeTime = 3; // Last time the condition transitioned from one status to another. // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time lastTransitionTime = 4; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time lastTransitionTime = 4; // Unique, one-word, CamelCase reason for the condition's last transition. // +optional @@ -418,12 +460,10 @@ message PodCondition { optional string message = 6; } -// IP address information for entries in the (plural) PodIPs field. -// Each entry includes: -// -// IP: An IP address allocated to the pod. Routable at least within the cluster. +// PodIP represents a single IP address allocated to the pod. message PodIP { - // ip is an IP address (IPv4 or IPv6) assigned to the pod + // IP is the IP address assigned to the pod + // +required optional string ip = 1; } @@ -432,13 +472,19 @@ message PodList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; // List of pods. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md repeated Pod items = 2; } +// PodReadinessGate contains the reference to a pod condition +message PodReadinessGate { + // ConditionType refers to a condition in the pod's condition list with matching type. + optional string conditionType = 1; +} + // PodSpec is a description of a pod. message PodSpec { // List of initialization containers belonging to the pod. @@ -456,6 +502,8 @@ message PodSpec { // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated Container initContainers = 20; // List of containers belonging to the pod. @@ -464,6 +512,8 @@ message PodSpec { // Cannot be updated. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name repeated Container containers = 2; // ServiceAccountName is the name of the ServiceAccount to use to run this pod. @@ -471,10 +521,11 @@ message PodSpec { // +optional optional string serviceAccountName = 8; - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional + // NodeName indicates in which node this pod is scheduled. + // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + // This field should not be used to express a desire for the pod to be scheduled on a specific node. + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename optional string nodeName = 10; // Host networking requested for this pod. Use the host's network namespace. @@ -515,13 +566,17 @@ message PodStatus { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type repeated PodCondition conditions = 2; - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will + // not be updated even if there is a node is assigned to pod // +optional optional string hostIP = 5; - // IP address allocated to the pod. Routable at least within the cluster. + // podIP address allocated to the pod. Routable at least within the cluster. // Empty if not yet allocated. // +optional optional string podIP = 6; @@ -532,16 +587,19 @@ message PodStatus { // +optional // +patchStrategy=merge // +patchMergeKey=ip + // +listType=map + // +listMapKey=ip repeated PodIP podIPs = 12; // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time startTime = 7; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time startTime = 7; // The list has one entry per container in the manifest. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional + // +listType=atomic repeated ContainerStatus containerStatuses = 8; // The Quality of Service (QOS) classification assigned to the pod based on resource requirements @@ -551,6 +609,7 @@ message PodStatus { optional string qosClass = 9; } +// PortStatus represents the error condition of a service port message PortStatus { // Port is the port number of the service port of which status is recorded here optional int32 port = 1; @@ -580,7 +639,7 @@ message Secret { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; // Immutable, if set to true, ensures that data stored in the Secret cannot // be updated (only object metadata can be modified). @@ -615,7 +674,7 @@ message SecretList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; // Items is a list of secret objects. // More info: https://kubernetes.io/docs/concepts/configuration/secret @@ -629,7 +688,7 @@ message Service { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status @@ -649,7 +708,7 @@ message ServiceList { // Standard list metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.ListMeta metadata = 1; // List of services repeated Service items = 2; @@ -671,9 +730,38 @@ message ServicePort { // +optional optional string protocol = 2; + // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. + // This field follows standard Kubernetes label syntax. + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per + // RFC-6335 and https://www.iana.org/assignments/service-names). + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as + // mycompany.com/my-custom-protocol. + // +optional + optional string appProtocol = 6; + // The port that will be exposed by this service. optional int32 port = 3; + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + // +optional + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.util.intstr.IntOrString targetPort = 4; + // The port on each node on which this service is exposed when type is // NodePort or LoadBalancer. Usually assigned by the system. If a value is // specified, in-range, and not in use it will be used, otherwise the @@ -778,6 +866,7 @@ message ServiceSpec { // at a node with this IP. A common example is external load-balancers // that are not part of the Kubernetes system. // +optional + // +listType=atomic repeated string externalIPs = 5; // Supports "ClientIP" and "None". Used to maintain session affinity. @@ -792,10 +881,9 @@ message ServiceSpec { // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. - // Deprecated: This field was under-specified and its meaning varies across implementations, - // and it cannot support dual-stack. - // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. - // This field may be removed in a future API version. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional optional string loadBalancerIP = 8; @@ -804,6 +892,7 @@ message ServiceSpec { // cloud-provider does not support the feature." // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ // +optional + // +listType=atomic repeated string loadBalancerSourceRanges = 9; // externalTrafficPolicy describes how nodes distribute service traffic they @@ -891,6 +980,16 @@ message ServiceSpec { // (possibly modified by topology and other features). // +optional optional string internalTrafficPolicy = 22; + + // TrafficDistribution offers a way to express preferences for how traffic is + // distributed to Service endpoints. Implementations can use this field as a + // hint, but are not required to guarantee strict adherence. If the field is + // not set, the implementation will apply its default routing strategy. If set + // to "PreferClose", implementations should prioritize endpoints that are + // topologically close (e.g., same zone). + // This is an alpha field and requires enabling ServiceTrafficDistribution feature. + // +optional + optional string trafficDistribution = 23; } // ServiceStatus represents the current status of a service. @@ -906,7 +1005,7 @@ message ServiceStatus { // +patchStrategy=merge // +listType=map // +listMapKey=type - repeated github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Condition conditions = 2; + repeated .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Condition conditions = 2; } // SessionAffinityConfig represents the configurations of session affinity. @@ -934,7 +1033,7 @@ message Taint { // TimeAdded represents the time at which the taint was added. // It is only written for NoExecute taints. // +optional - optional github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time timeAdded = 4; + optional .github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.meta.v1.Time timeAdded = 4; } // TypedLocalObjectReference contains enough information to let you locate the diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go index a4e6d69182..029ced91c2 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go @@ -7,6 +7,7 @@ package v1 import ( slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr" ) const ( @@ -110,19 +111,6 @@ const ( ConditionUnknown ConditionStatus = "Unknown" ) -// PodQOSClass defines the supported qos classes of Pods. -// +enum -type PodQOSClass string - -const ( - // PodQOSGuaranteed is the Guaranteed qos class. - PodQOSGuaranteed PodQOSClass = "Guaranteed" - // PodQOSBurstable is the Burstable qos class. - PodQOSBurstable PodQOSClass = "Burstable" - // PodQOSBestEffort is the BestEffort qos class. - PodQOSBestEffort PodQOSClass = "BestEffort" -) - // ContainerStateRunning is a running state of a container. type ContainerStateRunning struct { // Time at which the container was last (re-)started @@ -193,6 +181,9 @@ const ( // DisruptionTarget indicates the pod is about to be terminated due to a // disruption (such as preemption, eviction API or garbage-collection). DisruptionTarget PodConditionType = "DisruptionTarget" + // PodReadyToStartContainers pod sandbox is successfully configured and + // the pod is ready to launch containers. + PodReadyToStartContainers PodConditionType = "PodReadyToStartContainers" ) // These are reasons for a pod's transition to a condition. @@ -200,6 +191,22 @@ const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" + + // PodReasonSchedulingGated reason in PodScheduled PodCondition means that the scheduler + // skips scheduling the pod because one or more scheduling gates are still present. + PodReasonSchedulingGated = "SchedulingGated" + + // PodReasonSchedulerError reason in PodScheduled PodCondition means that some internal error happens + // during scheduling, for example due to nodeAffinity parsing errors. + PodReasonSchedulerError = "SchedulerError" + + // TerminationByKubelet reason in DisruptionTarget pod condition indicates that the termination + // is initiated by kubelet + PodReasonTerminationByKubelet = "TerminationByKubelet" + + // PodReasonPreemptionByScheduler reason in DisruptionTarget pod condition indicates that the + // disruption was initiated by scheduler's preemption. + PodReasonPreemptionByScheduler = "PreemptionByScheduler" ) // PodCondition contains details for the current condition of this pod. @@ -225,6 +232,12 @@ type PodCondition struct { Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` } +const ( + // DefaultTerminationGracePeriodSeconds indicates the default duration in + // seconds a pod needs to terminate gracefully. + DefaultTerminationGracePeriodSeconds = 30 +) + // The node this Taint is attached to has the "effect" on // any pod that does not tolerate the Taint. type Taint struct { @@ -267,6 +280,12 @@ const ( TaintEffectNoExecute TaintEffect = "NoExecute" ) +// PodReadinessGate contains the reference to a pod condition +type PodReadinessGate struct { + // ConditionType refers to a condition in the pod's condition list with matching type. + ConditionType PodConditionType `json:"conditionType" protobuf:"bytes,1,opt,name=conditionType,casttype=PodConditionType"` +} + // PodSpec is a description of a pod. type PodSpec struct { // List of initialization containers belonging to the pod. @@ -284,6 +303,8 @@ type PodSpec struct { // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` // List of containers belonging to the pod. // Containers cannot currently be added or removed. @@ -291,15 +312,20 @@ type PodSpec struct { // Cannot be updated. // +patchMergeKey=name // +patchStrategy=merge + // +listType=map + // +listMapKey=name Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ // +optional ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional + + // NodeName indicates in which node this pod is scheduled. + // If empty, this pod is a candidate for scheduling by the scheduler defined in schedulerName. + // Once this field is set, the kubelet for this node becomes responsible for the lifecycle of this pod. + // This field should not be used to express a desire for the pod to be scheduled on a specific node. + // https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodename NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` // Host networking requested for this pod. Use the host's network namespace. // If this option is set, the ports that will be used must be specified. @@ -309,13 +335,24 @@ type PodSpec struct { HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` } -// IP address information for entries in the (plural) PodIPs field. -// Each entry includes: -// -// IP: An IP address allocated to the pod. Routable at least within the cluster. +// PodQOSClass defines the supported qos classes of Pods. +// +enum +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodIP represents a single IP address allocated to the pod. type PodIP struct { - // ip is an IP address (IPv4 or IPv6) assigned to the pod - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // IP is the IP address assigned to the pod + // +required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -347,11 +384,17 @@ type PodStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + + // hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. + // A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will + // not be updated even if there is a node is assigned to pod // +optional HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` - // IP address allocated to the pod. Routable at least within the cluster. + + // podIP address allocated to the pod. Routable at least within the cluster. // Empty if not yet allocated. // +optional PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` @@ -362,6 +405,8 @@ type PodStatus struct { // +optional // +patchStrategy=merge // +patchMergeKey=ip + // +listType=map + // +listMapKey=ip PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"` // RFC 3339 date and time at which the object was acknowledged by the Kubelet. @@ -372,6 +417,7 @@ type PodStatus struct { // The list has one entry per container in the manifest. // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status // +optional + // +listType=atomic ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` // The Quality of Service (QOS) classification assigned to the pod based on resource requirements // See PodQOSClass type for available QOS classes @@ -544,6 +590,7 @@ type LoadBalancerStatus struct { // Ingress is a list containing ingress points for the load-balancer. // Traffic intended for the service should be sent to these ingress points. // +optional + // +listType=atomic Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } @@ -560,6 +607,15 @@ type LoadBalancerIngress struct { // +optional Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` + // IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. + // Setting this to "VIP" indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + // Setting this to "Proxy" indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and node port or the pod's IP and port. + // Service implementations may use this information to adjust traffic routing. + // +optional + IPMode *LoadBalancerIPMode `json:"ipMode,omitempty" protobuf:"bytes,3,opt,name=ipMode"` + // Ports is a list of records of service ports // If used, every port defined in the service should have an entry in it // +listType=atomic @@ -577,6 +633,8 @@ const ( IPv4Protocol IPFamily = "IPv4" // IPv6Protocol indicates that this IP is IPv6 protocol IPv6Protocol IPFamily = "IPv6" + // IPFamilyUnknown indicates that this IP is unknown protocol + IPFamilyUnknown IPFamily = "" ) // IPFamilyPolicy represents the dual-stack-ness requested or required by a Service @@ -698,6 +756,7 @@ type ServiceSpec struct { // at a node with this IP. A common example is external load-balancers // that are not part of the Kubernetes system. // +optional + // +listType=atomic ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` // Supports "ClientIP" and "None". Used to maintain session affinity. @@ -712,10 +771,9 @@ type ServiceSpec struct { // This feature depends on whether the underlying cloud-provider supports specifying // the loadBalancerIP when a load balancer is created. // This field will be ignored if the cloud-provider does not support the feature. - // Deprecated: This field was under-specified and its meaning varies across implementations, - // and it cannot support dual-stack. - // As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available. - // This field may be removed in a future API version. + // Deprecated: This field was under-specified and its meaning varies across implementations. + // Using it is non-portable and it may not support dual-stack. + // Users are encouraged to use implementation-specific annotations when available. // +optional LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` @@ -724,6 +782,7 @@ type ServiceSpec struct { // cloud-provider does not support the feature." // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ // +optional + // +listType=atomic LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` // externalTrafficPolicy describes how nodes distribute service traffic they @@ -759,6 +818,12 @@ type ServiceSpec struct { // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` + // TopologyKeys is tombstoned to show why 16 is reserved protobuf tag. + // TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` + + // IPFamily is tombstoned to show why 15 is a reserved protobuf tag. + // IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` + // IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this // service. This field is usually assigned automatically based on cluster // configuration and the ipFamilyPolicy field. If this field is specified @@ -811,6 +876,16 @@ type ServiceSpec struct { // (possibly modified by topology and other features). // +optional InternalTrafficPolicy *ServiceInternalTrafficPolicy `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"` + + // TrafficDistribution offers a way to express preferences for how traffic is + // distributed to Service endpoints. Implementations can use this field as a + // hint, but are not required to guarantee strict adherence. If the field is + // not set, the implementation will apply its default routing strategy. If set + // to "PreferClose", implementations should prioritize endpoints that are + // topologically close (e.g., same zone). + // This is an alpha field and requires enabling ServiceTrafficDistribution feature. + // +optional + TrafficDistribution *string `json:"trafficDistribution,omitempty" protobuf:"bytes,23,opt,name=trafficDistribution"` } // ServicePort contains information on service's port. @@ -829,9 +904,38 @@ type ServicePort struct { // +optional Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. + // This field follows standard Kubernetes label syntax. + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per + // RFC-6335 and https://www.iana.org/assignments/service-names). + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as + // mycompany.com/my-custom-protocol. + // +optional + AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"` + // The port that will be exposed by this service. Port int32 `json:"port" protobuf:"varint,3,opt,name=port"` + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + // +optional + TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` + // The port on each node on which this service is exposed when type is // NodePort or LoadBalancer. Usually assigned by the system. If a value is // specified, in-range, and not in use it will be used, otherwise the @@ -872,6 +976,12 @@ type Service struct { Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ServiceList holds a list of services. @@ -917,6 +1027,7 @@ type Endpoints struct { // NotReadyAddresses in the same subset. // Sets of addresses and ports that comprise a service. // +optional + // +listType=atomic Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"` } @@ -937,9 +1048,11 @@ type EndpointSubset struct { // IP addresses which offer the related ports that are marked as ready. These endpoints // should be considered safe for load balancers and clients to utilize. // +optional + // +listType=atomic Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` // Port numbers available on the related IP addresses. // +optional + // +listType=atomic Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` } @@ -950,6 +1063,9 @@ type EndpointAddress struct { // May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), // or link-local multicast (224.0.0.0/24 or ff02::/16). IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` + // The Hostname of this endpoint + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. // +optional NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"` @@ -973,6 +1089,24 @@ type EndpointPort struct { // Default is TCP. // +optional Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` + + // The application protocol for this port. + // This is used as a hint for implementations to offer richer behavior for protocols that they understand. + // This field follows standard Kubernetes label syntax. + // Valid values are either: + // + // * Un-prefixed protocol names - reserved for IANA standard service names (as per + // RFC-6335 and https://www.iana.org/assignments/service-names). + // + // * Kubernetes-defined prefixed names: + // * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior- + // * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455 + // * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455 + // + // * Other protocols should use implementation-defined prefixed names such as + // mycompany.com/my-custom-protocol. + // +optional + AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -1000,14 +1134,15 @@ type NodeSpec struct { // each of IPv4 and IPv6. // +optional // +patchStrategy=merge + // +listType=set PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"` // ID of the node assigned by the cloud provider in the format: :// // +optional ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` - // If specified, the node's taints. // +optional + // +listType=atomic Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` } @@ -1018,6 +1153,8 @@ type NodeStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` // List of addresses reachable to the node. // Queried from cloud provider, if available. @@ -1032,6 +1169,8 @@ type NodeStatus struct { // +optional // +patchMergeKey=type // +patchStrategy=merge + // +listType=map + // +listMapKey=type Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` } @@ -1048,12 +1187,10 @@ const ( NodeTerminated NodePhase = "Terminated" ) -// +enum type NodeConditionType string // These are valid but not exhaustive conditions of node. A cloud provider may set a condition not listed here. -// The built-in set of conditions are: -// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. +// Relevant events contain "NodeReady", "NodeNotReady", "NodeSchedulable", and "NodeNotSchedulable". const ( // NodeReady means kubelet is healthy and ready to accept pods. NodeReady NodeConditionType = "Ready" @@ -1078,7 +1215,6 @@ type NodeCondition struct { Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` } -// +enum type NodeAddressType string // These are built-in addresses type of node. A cloud provider may set a type not listed here. @@ -1363,7 +1499,6 @@ type SecretList struct { } // PortStatus represents the error condition of a service port - type PortStatus struct { // Port is the port number of the service port of which status is recorded here Port int32 `json:"port" protobuf:"varint,1,opt,name=port"` @@ -1384,3 +1519,15 @@ type PortStatus struct { // +kubebuilder:validation:MaxLength=316 Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` } + +// LoadBalancerIPMode represents the mode of the LoadBalancer ingress IP +type LoadBalancerIPMode string + +const ( + // LoadBalancerIPModeVIP indicates that traffic is delivered to the node with + // the destination set to the load-balancer's IP and port. + LoadBalancerIPModeVIP LoadBalancerIPMode = "VIP" + // LoadBalancerIPModeProxy indicates that traffic is delivered to the node or pod with + // the destination set to the node's IP and port or the pod's IP and port. + LoadBalancerIPModeProxy LoadBalancerIPMode = "Proxy" +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types_cilium.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types_cilium.go new file mode 100644 index 0000000000..251316114d --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types_cilium.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package v1 + +// GetHostIP returns the Host IP of the pod. +func (p *Pod) GetHostIP() string { + return p.Status.HostIP +} + +// GetAPIVersion returns the API Version for the pod. +func (p *Pod) GetAPIVersion() string { + return SchemeGroupVersion.Version +} + +// GetKind returns its Kind. +func (p *Pod) GetKind() string { + return "Pod" +} + +// IsNil returns true if this structure is nil. +func (p *Pod) IsNil() bool { + return p == nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go index 62e4787571..53f82d9e9c 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepcopy.go @@ -175,6 +175,11 @@ func (in *EndpointAddress) DeepCopy() *EndpointAddress { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { *out = *in + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } return } @@ -201,7 +206,9 @@ func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]EndpointPort, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -285,6 +292,11 @@ func (in *EndpointsList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { *out = *in + if in.IPMode != nil { + in, out := &in.IPMode, &out.IPMode + *out = new(LoadBalancerIPMode) + **out = **in + } if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]PortStatus, len(*in)) @@ -629,6 +641,22 @@ func (in *PodList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate. +func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { + if in == nil { + return nil + } + out := new(PodReadinessGate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = *in @@ -869,6 +897,12 @@ func (in *ServiceList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServicePort) DeepCopyInto(out *ServicePort) { *out = *in + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } + out.TargetPort = in.TargetPort return } @@ -888,7 +922,9 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]ServicePort, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.Selector != nil { in, out := &in.Selector, &out.Selector @@ -937,6 +973,11 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(ServiceInternalTrafficPolicy) **out = **in } + if in.TrafficDistribution != nil { + in, out := &in.TrafficDistribution, &out.TrafficDistribution + *out = new(string) + **out = **in + } return } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go index ce6e352f0e..48ae7180c4 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go @@ -182,6 +182,9 @@ func (in *EndpointAddress) DeepEqual(other *EndpointAddress) bool { if in.IP != other.IP { return false } + if in.Hostname != other.Hostname { + return false + } if (in.NodeName == nil) != (other.NodeName == nil) { return false } else if in.NodeName != nil { @@ -209,6 +212,13 @@ func (in *EndpointPort) DeepEqual(other *EndpointPort) bool { if in.Protocol != other.Protocol { return false } + if (in.AppProtocol == nil) != (other.AppProtocol == nil) { + return false + } else if in.AppProtocol != nil { + if *in.AppProtocol != *other.AppProtocol { + return false + } + } return true } @@ -340,6 +350,14 @@ func (in *LoadBalancerIngress) DeepEqual(other *LoadBalancerIngress) bool { if in.Hostname != other.Hostname { return false } + if (in.IPMode == nil) != (other.IPMode == nil) { + return false + } else if in.IPMode != nil { + if *in.IPMode != *other.IPMode { + return false + } + } + if ((in.Ports != nil) && (other.Ports != nil)) || ((in.Ports == nil) != (other.Ports == nil)) { in, other := &in.Ports, &other.Ports if other == nil { @@ -738,6 +756,20 @@ func (in *PodList) DeepEqual(other *PodList) bool { return true } +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *PodReadinessGate) DeepEqual(other *PodReadinessGate) bool { + if other == nil { + return false + } + + if in.ConditionType != other.ConditionType { + return false + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *PodSpec) DeepEqual(other *PodSpec) bool { @@ -1079,9 +1111,21 @@ func (in *ServicePort) DeepEqual(other *ServicePort) bool { if in.Protocol != other.Protocol { return false } + if (in.AppProtocol == nil) != (other.AppProtocol == nil) { + return false + } else if in.AppProtocol != nil { + if *in.AppProtocol != *other.AppProtocol { + return false + } + } + if in.Port != other.Port { return false } + if in.TargetPort != other.TargetPort { + return false + } + if in.NodePort != other.NodePort { return false } @@ -1252,6 +1296,14 @@ func (in *ServiceSpec) DeepEqual(other *ServiceSpec) bool { } } + if (in.TrafficDistribution == nil) != (other.TrafficDistribution == nil) { + return false + } else if in.TrafficDistribution != nil { + if *in.TrafficDistribution != *other.TrafficDistribution { + return false + } + } + return true } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/labels.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/labels.go index 298e3393b1..32b2988223 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/labels.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/labels.go @@ -6,8 +6,11 @@ package labels import ( + "fmt" "sort" "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" ) // Labels allows you to present labels independently from their storage. @@ -130,3 +133,31 @@ func Equals(labels1, labels2 Set) bool { } return true } + +// ConvertSelectorToLabelsMap converts selector string to labels map +// and validates keys and values +func ConvertSelectorToLabelsMap(selector string, opts ...field.PathOption) (Set, error) { + labelsMap := Set{} + + if len(selector) == 0 { + return labelsMap, nil + } + + labels := strings.Split(selector, ",") + for _, label := range labels { + l := strings.Split(label, "=") + if len(l) != 2 { + return labelsMap, fmt.Errorf("invalid selector: %s", l) + } + key := strings.TrimSpace(l[0]) + if err := validateLabelKey(key, field.ToPath(opts...)); err != nil { + return labelsMap, err + } + value := strings.TrimSpace(l[1]) + if err := validateLabelValue(key, value, field.ToPath(opts...)); err != nil { + return labelsMap, err + } + labelsMap[key] = value + } + return labelsMap, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go index 13358faed8..d19a9e6788 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/selector.go @@ -7,6 +7,7 @@ package labels import ( "fmt" + "slices" "sort" "strconv" "strings" @@ -15,7 +16,6 @@ import ( "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/klog/v2" - stringslices "k8s.io/utils/strings/slices" "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection" ) @@ -267,8 +267,8 @@ func (r *Requirement) Operator() selection.Operator { } // Values returns requirement values -func (r *Requirement) Values() sets.String { - ret := sets.String{} +func (r *Requirement) Values() sets.Set[string] { + ret := sets.New[string]() for i := range r.strValues { ret.Insert(r.strValues[i]) } @@ -283,7 +283,7 @@ func (r Requirement) Equal(x Requirement) bool { if r.operator != x.operator { return false } - return stringslices.Equal(r.strValues, x.strValues) + return slices.Equal(r.strValues, x.strValues) } // Empty returns true if the internalSelector doesn't restrict selection space @@ -351,12 +351,12 @@ func (r *Requirement) String() string { // safeSort sorts input strings without modification func safeSort(in []string) []string { - if sort.StringsAreSorted(in) { + if slices.IsSorted(in) { return in } out := make([]string, len(in)) copy(out, in) - sort.Strings(out) + slices.Sort(out) return out } @@ -651,7 +651,7 @@ func (p *Parser) parse() (internalSelector, error) { case IdentifierToken, DoesNotExistToken: r, err := p.parseRequirement() if err != nil { - return nil, fmt.Errorf("unable to parse requirement: %v", err) + return nil, fmt.Errorf("unable to parse requirement: %w", err) } requirements = append(requirements, *r) t, l := p.consume(Values) @@ -686,7 +686,7 @@ func (p *Parser) parseRequirement() (*Requirement, error) { if err != nil { return nil, err } - var values sets.String + var values sets.Set[string] switch operator { case selection.In, selection.NotIn: values, err = p.parseValues() @@ -696,7 +696,7 @@ func (p *Parser) parseRequirement() (*Requirement, error) { if err != nil { return nil, err } - return NewRequirement(key, operator, values.List()) + return NewRequirement(key, operator, sets.List(values)) } @@ -752,7 +752,7 @@ func (p *Parser) parseOperator() (op selection.Operator, err error) { } // parseValues parses the values for set based matching (x,y,z) -func (p *Parser) parseValues() (sets.String, error) { +func (p *Parser) parseValues() (sets.Set[string], error) { tok, lit := p.consume(Values) if tok != OpenParToken { return nil, fmt.Errorf("found '%s' expected: '('", lit) @@ -770,7 +770,7 @@ func (p *Parser) parseValues() (sets.String, error) { return s, nil case ClosedParToken: // handles "()" p.consume(Values) - return sets.NewString(""), nil + return sets.New[string](""), nil default: return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) } @@ -778,8 +778,8 @@ func (p *Parser) parseValues() (sets.String, error) { // parseIdentifiersList parses a (possibly empty) list of // of comma separated (possibly empty) identifiers -func (p *Parser) parseIdentifiersList() (sets.String, error) { - s := sets.NewString() +func (p *Parser) parseIdentifiersList() (sets.Set[string], error) { + s := sets.New[string]() for { tok, lit := p.consume(Values) switch tok { @@ -814,8 +814,8 @@ func (p *Parser) parseIdentifiersList() (sets.String, error) { } // parseExactValue parses the only value for exact match style -func (p *Parser) parseExactValue() (sets.String, error) { - s := sets.NewString() +func (p *Parser) parseExactValue() (sets.Set[string], error) { + s := sets.New[string]() tok, _ := p.lookahead(Values) if tok == EndOfStringToken || tok == CommaToken { s.Insert("") @@ -908,7 +908,7 @@ func SelectorFromSet(ls Set) Selector { // nil and empty Sets are considered equivalent to Everything(). // The Set is validated client-side, which allows to catch errors early. func ValidatedSelectorFromSet(ls Set) (Selector, error) { - if ls == nil || len(ls) == 0 { + if len(ls) == 0 { return internalSelector{}, nil } requirements := make([]Requirement, 0, len(ls)) @@ -930,7 +930,7 @@ func ValidatedSelectorFromSet(ls Set) (Selector, error) { // Note: this method copies the Set; if the Set is immutable, consider wrapping it with ValidatedSetSelector // instead, which does not copy. func SelectorFromValidatedSet(ls Set) Selector { - if ls == nil || len(ls) == 0 { + if len(ls) == 0 { return internalSelector{} } requirements := make([]Requirement, 0, len(ls)) @@ -950,3 +950,76 @@ func SelectorFromValidatedSet(ls Set) Selector { func ParseToRequirements(selector string, opts ...field.PathOption) ([]Requirement, error) { return parse(selector, field.ToPath(opts...)) } + +// ValidatedSetSelector wraps a Set, allowing it to implement the Selector interface. Unlike +// Set.AsSelectorPreValidated (which copies the input Set), this type simply wraps the underlying +// Set. As a result, it is substantially more efficient. A nil and empty Sets are considered +// equivalent to Everything(). +// +// Callers MUST ensure the underlying Set is not mutated, and that it is already validated. If these +// constraints are not met, Set.AsValidatedSelector should be preferred +// +// None of the Selector methods mutate the underlying Set, but Add() and Requirements() convert to +// the less optimized version. +type ValidatedSetSelector Set + +func (s ValidatedSetSelector) Matches(labels Labels) bool { + for k, v := range s { + if !labels.Has(k) || v != labels.Get(k) { + return false + } + } + return true +} + +func (s ValidatedSetSelector) Empty() bool { + return len(s) == 0 +} + +func (s ValidatedSetSelector) String() string { + keys := make([]string, 0, len(s)) + for k := range s { + keys = append(keys, k) + } + // Ensure deterministic output + slices.Sort(keys) + b := strings.Builder{} + for i, key := range keys { + v := s[key] + b.Grow(len(key) + 2 + len(v)) + if i != 0 { + b.WriteString(",") + } + b.WriteString(key) + b.WriteString("=") + b.WriteString(v) + } + return b.String() +} + +func (s ValidatedSetSelector) Add(r ...Requirement) Selector { + return s.toFullSelector().Add(r...) +} + +func (s ValidatedSetSelector) Requirements() (requirements Requirements, selectable bool) { + return s.toFullSelector().Requirements() +} + +func (s ValidatedSetSelector) DeepCopySelector() Selector { + res := make(ValidatedSetSelector, len(s)) + for k, v := range s { + res[k] = v + } + return res +} + +func (s ValidatedSetSelector) RequiresExactMatch(label string) (value string, found bool) { + v, f := s[label] + return v, f +} + +func (s ValidatedSetSelector) toFullSelector() Selector { + return SelectorFromValidatedSet(Set(s)) +} + +var _ Selector = ValidatedSetSelector{} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/zz_generated.deepequal.go index f05c7cb36e..1f3ad53517 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels/zz_generated.deepequal.go @@ -177,3 +177,27 @@ func (in *Set) DeepEqual(other *Set) bool { return true } + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *ValidatedSetSelector) DeepEqual(other *ValidatedSetSelector) bool { + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for key, inValue := range *in { + if otherValue, present := (*other)[key]; !present { + return false + } else { + if inValue != otherValue { + return false + } + } + } + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.pb.go index c6777d920c..18629faf29 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.pb.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.pb.go @@ -358,82 +358,83 @@ func init() { } var fileDescriptor_e0f89ca41f751b36 = []byte{ - // 1190 bytes of a gzipped FileDescriptorProto + // 1205 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xf7, 0x76, 0x13, 0xc7, 0x7e, 0x4e, 0x9b, 0x74, 0xbe, 0xed, 0x97, 0x25, 0x12, 0xb6, 0x6b, + 0x17, 0xf7, 0x66, 0x13, 0xc7, 0x7e, 0x4e, 0x9b, 0x74, 0xbe, 0xed, 0x97, 0x25, 0x12, 0xb6, 0x6b, 0x24, 0x94, 0x4a, 0xb0, 0xa6, 0x39, 0x54, 0x69, 0x85, 0x10, 0xdd, 0xf4, 0x87, 0x42, 0x9b, 0xb4, - 0x9a, 0xb4, 0x3d, 0x00, 0x07, 0x26, 0xeb, 0xa9, 0x33, 0x78, 0x77, 0xd6, 0xec, 0x8c, 0x03, 0xbe, - 0x15, 0x89, 0x43, 0x01, 0x21, 0x55, 0x1c, 0x10, 0xc7, 0x56, 0xe2, 0x3f, 0xe1, 0xd2, 0x63, 0x8f, - 0x3d, 0x20, 0x8b, 0x1a, 0xfe, 0x08, 0xd4, 0x0b, 0x68, 0x66, 0x67, 0xbd, 0xeb, 0x1f, 0x55, 0x55, - 0x93, 0x93, 0x77, 0xdf, 0x7b, 0xf3, 0xf9, 0x7c, 0x66, 0xf7, 0xbd, 0xcf, 0xac, 0xe1, 0x6a, 0x9b, - 0xc9, 0x83, 0xde, 0xbe, 0xeb, 0x47, 0x61, 0xd3, 0x67, 0x01, 0xeb, 0x8d, 0x7e, 0xba, 0x9d, 0x76, - 0xb3, 0xb3, 0x29, 0x9a, 0x22, 0x60, 0xa1, 0xbe, 0x20, 0x5d, 0x26, 0x9a, 0x21, 0x95, 0xa4, 0x79, - 0x78, 0xae, 0xd9, 0xa6, 0x9c, 0xc6, 0x44, 0xd2, 0x96, 0xdb, 0x8d, 0x23, 0x19, 0xa1, 0xf3, 0x19, - 0x8e, 0x9b, 0x00, 0xa4, 0x3f, 0xdd, 0x4e, 0xdb, 0xed, 0x6c, 0x0a, 0x57, 0xe1, 0xe8, 0x0b, 0x85, - 0xe3, 0x2a, 0x1c, 0xf7, 0xf0, 0xdc, 0xda, 0x7b, 0x39, 0xfe, 0x76, 0xd4, 0x8e, 0x9a, 0x1a, 0x6e, - 0xbf, 0x77, 0x4f, 0xdf, 0xe9, 0x1b, 0x7d, 0x95, 0xd0, 0xac, 0x29, 0x21, 0x2e, 0x8b, 0x94, 0x96, - 0x90, 0xf8, 0x07, 0x8c, 0xd3, 0xb8, 0xaf, 0x95, 0xc6, 0x3d, 0x2e, 0x59, 0x48, 0x27, 0x75, 0xad, - 0x9d, 0x7f, 0xd5, 0x02, 0xe1, 0x1f, 0xd0, 0x90, 0x4c, 0xae, 0x6b, 0xfc, 0x64, 0x43, 0x79, 0x2b, - 0xe2, 0x2d, 0x26, 0x59, 0xc4, 0x51, 0x1d, 0x16, 0x64, 0xbf, 0x4b, 0x1d, 0xab, 0x6e, 0xad, 0x97, - 0xbd, 0xe5, 0x27, 0x83, 0x5a, 0x61, 0x38, 0xa8, 0x2d, 0xdc, 0xee, 0x77, 0x29, 0xd6, 0x19, 0x74, - 0x01, 0x8a, 0x42, 0x12, 0xd9, 0x13, 0xce, 0x31, 0x5d, 0x73, 0xc6, 0xd4, 0x14, 0xf7, 0x74, 0xf4, - 0xc5, 0xa0, 0xb6, 0x32, 0x82, 0x4b, 0x42, 0xd8, 0x2c, 0x40, 0x1f, 0x03, 0x8a, 0xf6, 0x05, 0x8d, - 0x0f, 0x69, 0xeb, 0x5a, 0xa2, 0x82, 0x45, 0xdc, 0xb1, 0xeb, 0xd6, 0xba, 0xed, 0xad, 0x19, 0x18, - 0x74, 0x73, 0xaa, 0x02, 0xcf, 0x58, 0x85, 0x1e, 0x58, 0x80, 0x02, 0x22, 0xe4, 0xed, 0x98, 0x70, - 0xa1, 0xc9, 0x6e, 0xb3, 0x90, 0x3a, 0x0b, 0x75, 0x6b, 0xbd, 0xb2, 0xf1, 0x81, 0x3b, 0xdf, 0x4b, - 0x72, 0x15, 0x46, 0x26, 0xe5, 0xc6, 0x14, 0x3e, 0x9e, 0xc1, 0x89, 0xde, 0x81, 0x62, 0x4c, 0x89, - 0x88, 0xb8, 0xb3, 0xa8, 0x9f, 0xc8, 0x89, 0xf4, 0x89, 0x60, 0x1d, 0xc5, 0x26, 0x8b, 0xce, 0xc2, - 0x52, 0x48, 0x85, 0x20, 0x6d, 0xea, 0x14, 0x75, 0xe1, 0x8a, 0x29, 0x5c, 0xda, 0x49, 0xc2, 0x38, - 0xcd, 0x37, 0xfe, 0x3e, 0x06, 0xc7, 0x6f, 0x90, 0x7d, 0x1a, 0xec, 0xd1, 0x80, 0xfa, 0x32, 0x8a, - 0xd1, 0x8f, 0x16, 0x54, 0x42, 0x22, 0xfd, 0x03, 0x1d, 0x16, 0x8e, 0x55, 0xb7, 0xd7, 0x2b, 0x1b, - 0x77, 0xe7, 0xdd, 0xe8, 0x18, 0xb8, 0xbb, 0x93, 0x01, 0x5f, 0xe1, 0x32, 0xee, 0x7b, 0xff, 0x33, - 0xca, 0x2a, 0xb9, 0x0c, 0xce, 0xf3, 0xa3, 0x9f, 0x2d, 0x58, 0xd5, 0xf7, 0x57, 0xbe, 0xee, 0xc6, - 0x54, 0x08, 0x16, 0x71, 0xd5, 0x11, 0x4a, 0xd4, 0xad, 0x23, 0x11, 0x85, 0xe9, 0x97, 0x3d, 0x16, - 0xd3, 0x90, 0x72, 0xe9, 0x39, 0x46, 0xce, 0xea, 0xce, 0x04, 0x23, 0x9e, 0xd2, 0xb0, 0xf6, 0x21, - 0xac, 0x4e, 0x6e, 0x07, 0xad, 0x82, 0xdd, 0xa1, 0xfd, 0xa4, 0xa9, 0xb1, 0xba, 0x44, 0xa7, 0x60, - 0xf1, 0x90, 0x04, 0x3d, 0x9a, 0x34, 0x31, 0x4e, 0x6e, 0x2e, 0x1e, 0xdb, 0xb4, 0x1a, 0xbf, 0x5a, - 0xe0, 0xbc, 0x4c, 0x08, 0x7a, 0x2b, 0x07, 0xe4, 0x55, 0x8c, 0x2a, 0xfb, 0x3a, 0xed, 0x27, 0xa8, - 0x57, 0xa0, 0x14, 0x75, 0x55, 0x87, 0x46, 0xb1, 0x99, 0x8e, 0xb3, 0xa6, 0xa6, 0x74, 0xd3, 0xc4, - 0x5f, 0x0c, 0x6a, 0xa7, 0xc7, 0xe0, 0xd3, 0x04, 0x1e, 0x2d, 0x45, 0x0d, 0x28, 0x6a, 0x3d, 0xc2, - 0xb1, 0xeb, 0xf6, 0x7a, 0xd9, 0x03, 0xd5, 0x4c, 0x77, 0x75, 0x04, 0x9b, 0x4c, 0xe3, 0x37, 0x0b, - 0x4a, 0x37, 0x98, 0x90, 0x3b, 0x54, 0x12, 0x74, 0x09, 0x56, 0x62, 0x2a, 0xa2, 0x5e, 0xec, 0xd3, - 0xbb, 0x34, 0x56, 0xcf, 0xc1, 0xd0, 0xbf, 0x61, 0xe8, 0x57, 0xf0, 0x78, 0x1a, 0x4f, 0xd6, 0xa3, - 0x77, 0xa1, 0xe4, 0x47, 0x5c, 0x32, 0xde, 0xa3, 0x7a, 0x22, 0xcb, 0xde, 0x6a, 0x2a, 0x7d, 0xcb, - 0xc4, 0xf1, 0xa8, 0x02, 0x5d, 0x05, 0x14, 0xd3, 0x90, 0x30, 0xce, 0x78, 0x7b, 0x5b, 0xd2, 0x70, - 0x2b, 0xea, 0x71, 0xa9, 0x87, 0xcf, 0xf6, 0xfe, 0xaf, 0x46, 0x07, 0x4f, 0x65, 0xf1, 0x8c, 0x15, - 0x8d, 0xbf, 0x96, 0x00, 0x6e, 0xee, 0x7f, 0x41, 0xfd, 0x64, 0x1f, 0x75, 0x58, 0xe0, 0x24, 0x9c, - 0x72, 0x9f, 0x5d, 0x12, 0x52, 0xac, 0x33, 0x68, 0x13, 0x96, 0x53, 0x03, 0x53, 0x51, 0xb3, 0xcd, - 0x53, 0xa6, 0x72, 0xf9, 0x5a, 0x2e, 0x87, 0xc7, 0x2a, 0x51, 0x13, 0xca, 0x0a, 0x41, 0x74, 0x89, - 0x9f, 0xee, 0xf0, 0xa4, 0x59, 0x56, 0xde, 0x4d, 0x13, 0x38, 0xab, 0x41, 0x1e, 0xd8, 0x3d, 0xd6, - 0x32, 0x33, 0xfd, 0x7e, 0xfa, 0xae, 0xef, 0x6c, 0x5f, 0x7e, 0x31, 0xa8, 0x9d, 0x79, 0x99, 0xd9, - 0x2a, 0x8b, 0x14, 0xee, 0x9d, 0xed, 0xcb, 0x58, 0x2d, 0x9e, 0xf5, 0x62, 0x8a, 0xaf, 0xf9, 0x62, - 0x36, 0x00, 0xda, 0x99, 0x59, 0x2e, 0xe9, 0x47, 0x8c, 0xcc, 0x6a, 0xc8, 0x99, 0x64, 0xae, 0x0a, - 0x7d, 0x63, 0xc1, 0xc9, 0x16, 0x0d, 0x68, 0x6a, 0x51, 0x42, 0x92, 0xb0, 0xeb, 0x94, 0x8f, 0xc0, - 0x1b, 0x4f, 0x0f, 0x07, 0xb5, 0x93, 0x97, 0x27, 0xa1, 0xf1, 0x34, 0x1b, 0x3a, 0x84, 0x62, 0x90, - 0x58, 0x55, 0x45, 0xbb, 0xc2, 0xee, 0xbc, 0xbc, 0x59, 0x7f, 0xb8, 0x79, 0x8b, 0x1a, 0xb9, 0xac, - 0x71, 0x27, 0xc3, 0x86, 0xbe, 0xb7, 0xa0, 0x42, 0x38, 0x8f, 0xa4, 0x7e, 0x14, 0xc2, 0x59, 0xd6, - 0xec, 0x7b, 0x47, 0xc0, 0x7e, 0x29, 0x43, 0x9d, 0x70, 0xc9, 0x5c, 0x06, 0xe7, 0xc9, 0xd1, 0x77, - 0x16, 0xac, 0x44, 0x5f, 0x71, 0x1a, 0x63, 0x7a, 0x8f, 0xc6, 0x94, 0xfb, 0x54, 0x38, 0xc7, 0xb5, - 0xa0, 0xab, 0x73, 0x0b, 0x1a, 0x83, 0xcb, 0x1a, 0x69, 0x3c, 0x2e, 0xf0, 0x24, 0xef, 0xda, 0x05, - 0xa8, 0xcc, 0xe9, 0x89, 0xca, 0x53, 0x27, 0x37, 0xff, 0x5a, 0x9e, 0xfa, 0xad, 0x05, 0x27, 0xc6, - 0xf5, 0xa9, 0x51, 0xef, 0x30, 0xde, 0x9a, 0x1c, 0xf5, 0xeb, 0x8c, 0xb7, 0xb0, 0xce, 0x8c, 0xcc, - 0xc0, 0x7e, 0xa9, 0x19, 0xb8, 0x00, 0xca, 0x91, 0xe2, 0x28, 0x08, 0x68, 0xac, 0x07, 0xab, 0xe4, - 0x9d, 0x50, 0x63, 0xb1, 0x35, 0x8a, 0xe2, 0x5c, 0x45, 0xe3, 0x07, 0x0b, 0x4e, 0xdf, 0x22, 0xb1, - 0x64, 0x24, 0xc8, 0x5e, 0x6b, 0x8b, 0x48, 0x82, 0x62, 0x28, 0x85, 0xe6, 0x5a, 0x2b, 0xaa, 0x6c, - 0x78, 0xff, 0xbd, 0x61, 0xb2, 0x31, 0xcd, 0x62, 0x78, 0xc4, 0xd3, 0xf8, 0xc7, 0x82, 0x37, 0x67, - 0xaa, 0x51, 0xb6, 0x8e, 0xf8, 0x94, 0xa2, 0x8f, 0xe6, 0x3e, 0x56, 0xcd, 0x31, 0x91, 0x39, 0x7a, - 0x1a, 0xc9, 0xd4, 0xa0, 0x18, 0x16, 0x99, 0xa4, 0x61, 0x7a, 0x86, 0xef, 0xcc, 0x4b, 0x36, 0x73, - 0x47, 0xde, 0x71, 0xc3, 0xbc, 0xa8, 0xac, 0x5f, 0xe0, 0x84, 0xaa, 0x11, 0xc0, 0x82, 0xfe, 0x80, - 0x3a, 0x0b, 0x4b, 0x82, 0xfa, 0x11, 0x6f, 0x09, 0xbd, 0x55, 0x3b, 0xfb, 0x30, 0xda, 0x4b, 0xc2, - 0x38, 0xcd, 0xa3, 0xb7, 0x61, 0x91, 0x13, 0x1e, 0x25, 0x1f, 0x9f, 0x8b, 0x19, 0xee, 0xae, 0x0a, - 0xe2, 0x24, 0x77, 0xf1, 0xd4, 0x2f, 0x8f, 0x6a, 0x85, 0x07, 0x8f, 0x6b, 0x85, 0x87, 0x8f, 0x6b, - 0x85, 0x47, 0x8f, 0x6b, 0x85, 0xfb, 0xbf, 0xd7, 0x0b, 0x8d, 0x4f, 0xa1, 0x9c, 0xb9, 0xd3, 0x11, - 0x53, 0x36, 0x3e, 0x87, 0x92, 0xfa, 0x46, 0x4e, 0x4f, 0xb1, 0x57, 0xb4, 0xf6, 0x06, 0x00, 0xe9, - 0xb2, 0xf1, 0xa3, 0x7a, 0xd4, 0x2c, 0x97, 0x6e, 0x6d, 0xa7, 0x87, 0x41, 0xae, 0xca, 0xfb, 0xec, - 0xc9, 0xf3, 0x6a, 0xe1, 0xe9, 0xf3, 0x6a, 0xe1, 0xd9, 0xf3, 0x6a, 0xe1, 0xfe, 0xb0, 0x6a, 0x3d, - 0x19, 0x56, 0xad, 0xa7, 0xc3, 0xaa, 0xf5, 0x6c, 0x58, 0xb5, 0xfe, 0x18, 0x56, 0xad, 0x87, 0x7f, - 0x56, 0x0b, 0x9f, 0x9c, 0x9f, 0xef, 0x5f, 0xce, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x96, 0xad, - 0x00, 0xb3, 0x1e, 0x0d, 0x00, 0x00, + 0x9a, 0xb4, 0x3d, 0x00, 0x07, 0x26, 0xeb, 0xa9, 0x33, 0x64, 0x77, 0xd6, 0xec, 0x8c, 0x03, 0xbe, + 0x95, 0x5b, 0x01, 0x21, 0x55, 0x1c, 0x10, 0xc7, 0x56, 0xe2, 0x3f, 0xe1, 0xd2, 0x63, 0x8f, 0x3d, + 0x20, 0x8b, 0x1a, 0xfe, 0x08, 0x54, 0x21, 0x81, 0x66, 0x76, 0xd6, 0xbb, 0xfe, 0x51, 0x95, 0xba, + 0x39, 0x79, 0xf7, 0xbd, 0x37, 0x9f, 0xcf, 0x67, 0x76, 0xdf, 0xfb, 0xcc, 0x1a, 0xae, 0xb4, 0x99, + 0xdc, 0xef, 0xee, 0xb9, 0x7e, 0x14, 0x36, 0x7d, 0x16, 0xb0, 0xee, 0xf0, 0xa7, 0x73, 0xd0, 0x6e, + 0x1e, 0x6c, 0x88, 0xa6, 0x08, 0x58, 0xa8, 0x2f, 0x48, 0x87, 0x89, 0x66, 0x48, 0x25, 0x69, 0x1e, + 0x9e, 0x6d, 0xb6, 0x29, 0xa7, 0x31, 0x91, 0xb4, 0xe5, 0x76, 0xe2, 0x48, 0x46, 0xe8, 0x5c, 0x86, + 0xe3, 0x26, 0x00, 0xe9, 0x4f, 0xe7, 0xa0, 0xed, 0x1e, 0x6c, 0x08, 0x57, 0xe1, 0xe8, 0x0b, 0x85, + 0xe3, 0x2a, 0x1c, 0xf7, 0xf0, 0xec, 0xea, 0x7b, 0x39, 0xfe, 0x76, 0xd4, 0x8e, 0x9a, 0x1a, 0x6e, + 0xaf, 0x7b, 0x57, 0xdf, 0xe9, 0x1b, 0x7d, 0x95, 0xd0, 0xac, 0x2a, 0x21, 0x2e, 0x8b, 0x94, 0x96, + 0x90, 0xf8, 0xfb, 0x8c, 0xd3, 0xb8, 0xa7, 0x95, 0xc6, 0x5d, 0x2e, 0x59, 0x48, 0xc7, 0x75, 0xad, + 0x9e, 0x7b, 0xd9, 0x02, 0xe1, 0xef, 0xd3, 0x90, 0x8c, 0xaf, 0x6b, 0xfc, 0x68, 0x43, 0x79, 0x33, + 0xe2, 0x2d, 0x26, 0x59, 0xc4, 0x51, 0x1d, 0xe6, 0x65, 0xaf, 0x43, 0x1d, 0xab, 0x6e, 0xad, 0x95, + 0xbd, 0xa5, 0xc7, 0xfd, 0x5a, 0x61, 0xd0, 0xaf, 0xcd, 0xdf, 0xea, 0x75, 0x28, 0xd6, 0x19, 0x74, + 0x1e, 0x8a, 0x42, 0x12, 0xd9, 0x15, 0xce, 0x9c, 0xae, 0x39, 0x6d, 0x6a, 0x8a, 0xbb, 0x3a, 0xfa, + 0xbc, 0x5f, 0x5b, 0x1e, 0xc2, 0x25, 0x21, 0x6c, 0x16, 0xa0, 0x8f, 0x01, 0x45, 0x7b, 0x82, 0xc6, + 0x87, 0xb4, 0x75, 0x35, 0x51, 0xc1, 0x22, 0xee, 0xd8, 0x75, 0x6b, 0xcd, 0xf6, 0x56, 0x0d, 0x0c, + 0xba, 0x31, 0x51, 0x81, 0xa7, 0xac, 0x42, 0xf7, 0x2d, 0x40, 0x01, 0x11, 0xf2, 0x56, 0x4c, 0xb8, + 0xd0, 0x64, 0xb7, 0x58, 0x48, 0x9d, 0xf9, 0xba, 0xb5, 0x56, 0x59, 0xff, 0xc0, 0x9d, 0xed, 0x25, + 0xb9, 0x0a, 0x23, 0x93, 0x72, 0x7d, 0x02, 0x1f, 0x4f, 0xe1, 0x44, 0xef, 0x40, 0x31, 0xa6, 0x44, + 0x44, 0xdc, 0x59, 0xd0, 0x4f, 0xe4, 0x78, 0xfa, 0x44, 0xb0, 0x8e, 0x62, 0x93, 0x45, 0x67, 0x60, + 0x31, 0xa4, 0x42, 0x90, 0x36, 0x75, 0x8a, 0xba, 0x70, 0xd9, 0x14, 0x2e, 0x6e, 0x27, 0x61, 0x9c, + 0xe6, 0x1b, 0x7f, 0xcd, 0xc1, 0xb1, 0xeb, 0x64, 0x8f, 0x06, 0xbb, 0x34, 0xa0, 0xbe, 0x8c, 0x62, + 0xf4, 0x83, 0x05, 0x95, 0x90, 0x48, 0x7f, 0x5f, 0x87, 0x85, 0x63, 0xd5, 0xed, 0xb5, 0xca, 0xfa, + 0x9d, 0x59, 0x37, 0x3a, 0x02, 0xee, 0x6e, 0x67, 0xc0, 0x97, 0xb9, 0x8c, 0x7b, 0xde, 0xff, 0x8c, + 0xb2, 0x4a, 0x2e, 0x83, 0xf3, 0xfc, 0xe8, 0x27, 0x0b, 0x56, 0xf4, 0xfd, 0xe5, 0xaf, 0x3b, 0x31, + 0x15, 0x82, 0x45, 0x5c, 0x75, 0x84, 0x12, 0x75, 0xf3, 0x48, 0x44, 0x61, 0xfa, 0x65, 0x97, 0xc5, + 0x34, 0xa4, 0x5c, 0x7a, 0x8e, 0x91, 0xb3, 0xb2, 0x3d, 0xc6, 0x88, 0x27, 0x34, 0xac, 0x7e, 0x08, + 0x2b, 0xe3, 0xdb, 0x41, 0x2b, 0x60, 0x1f, 0xd0, 0x5e, 0xd2, 0xd4, 0x58, 0x5d, 0xa2, 0x93, 0xb0, + 0x70, 0x48, 0x82, 0x2e, 0x4d, 0x9a, 0x18, 0x27, 0x37, 0x17, 0xe6, 0x36, 0xac, 0xc6, 0x2f, 0x16, + 0x38, 0x2f, 0x12, 0x82, 0xde, 0xca, 0x01, 0x79, 0x15, 0xa3, 0xca, 0xbe, 0x46, 0x7b, 0x09, 0xea, + 0x65, 0x28, 0x45, 0x1d, 0xd5, 0xa1, 0x51, 0x6c, 0xa6, 0xe3, 0x8c, 0xa9, 0x29, 0xdd, 0x30, 0xf1, + 0xe7, 0xfd, 0xda, 0xa9, 0x11, 0xf8, 0x34, 0x81, 0x87, 0x4b, 0x51, 0x03, 0x8a, 0x5a, 0x8f, 0x70, + 0xec, 0xba, 0xbd, 0x56, 0xf6, 0x40, 0x35, 0xd3, 0x1d, 0x1d, 0xc1, 0x26, 0xd3, 0xf8, 0xd5, 0x82, + 0xd2, 0x75, 0x26, 0xe4, 0x36, 0x95, 0x04, 0x5d, 0x84, 0xe5, 0x98, 0x8a, 0xa8, 0x1b, 0xfb, 0xf4, + 0x0e, 0x8d, 0xd5, 0x73, 0x30, 0xf4, 0x6f, 0x18, 0xfa, 0x65, 0x3c, 0x9a, 0xc6, 0xe3, 0xf5, 0xe8, + 0x5d, 0x28, 0xf9, 0x11, 0x97, 0x8c, 0x77, 0xa9, 0x9e, 0xc8, 0xb2, 0xb7, 0x92, 0x4a, 0xdf, 0x34, + 0x71, 0x3c, 0xac, 0x40, 0x57, 0x00, 0xc5, 0x34, 0x24, 0x8c, 0x33, 0xde, 0xde, 0x92, 0x34, 0xdc, + 0x8c, 0xba, 0x5c, 0xea, 0xe1, 0xb3, 0xbd, 0xff, 0xab, 0xd1, 0xc1, 0x13, 0x59, 0x3c, 0x65, 0x45, + 0xe3, 0xcf, 0x45, 0x80, 0x1b, 0x7b, 0x5f, 0x50, 0x3f, 0xd9, 0x47, 0x1d, 0xe6, 0x39, 0x09, 0x27, + 0xdc, 0x67, 0x87, 0x84, 0x14, 0xeb, 0x0c, 0xda, 0x80, 0xa5, 0xd4, 0xc0, 0x54, 0xd4, 0x6c, 0xf3, + 0xa4, 0xa9, 0x5c, 0xba, 0x9a, 0xcb, 0xe1, 0x91, 0x4a, 0xd4, 0x84, 0xb2, 0x42, 0x10, 0x1d, 0xe2, + 0xa7, 0x3b, 0x3c, 0x61, 0x96, 0x95, 0x77, 0xd2, 0x04, 0xce, 0x6a, 0x90, 0x07, 0x76, 0x97, 0xb5, + 0xcc, 0x4c, 0xbf, 0x9f, 0xbe, 0xeb, 0xdb, 0x5b, 0x97, 0x9e, 0xf7, 0x6b, 0xa7, 0x5f, 0x64, 0xb6, + 0xca, 0x22, 0x85, 0x7b, 0x7b, 0xeb, 0x12, 0x56, 0x8b, 0xa7, 0xbd, 0x98, 0xe2, 0x2b, 0xbe, 0x98, + 0x75, 0x80, 0x76, 0x66, 0x96, 0x8b, 0xfa, 0x11, 0x23, 0xb3, 0x1a, 0x72, 0x26, 0x99, 0xab, 0x42, + 0xdf, 0x58, 0x70, 0xa2, 0x45, 0x03, 0x9a, 0x5a, 0x94, 0x90, 0x24, 0xec, 0x38, 0xe5, 0x23, 0xf0, + 0xc6, 0x53, 0x83, 0x7e, 0xed, 0xc4, 0xa5, 0x71, 0x68, 0x3c, 0xc9, 0x86, 0x0e, 0xa1, 0x18, 0x24, + 0x56, 0x55, 0xd1, 0xae, 0xb0, 0x33, 0x2b, 0x6f, 0xd6, 0x1f, 0x6e, 0xde, 0xa2, 0x86, 0x2e, 0x6b, + 0xdc, 0xc9, 0xb0, 0xa1, 0xef, 0x2c, 0xa8, 0x10, 0xce, 0x23, 0xa9, 0x1f, 0x85, 0x70, 0x96, 0x34, + 0xfb, 0xee, 0x11, 0xb0, 0x5f, 0xcc, 0x50, 0xc7, 0x5c, 0x32, 0x97, 0xc1, 0x79, 0x72, 0xf4, 0xad, + 0x05, 0xcb, 0xd1, 0x57, 0x9c, 0xc6, 0x98, 0xde, 0xa5, 0x31, 0xe5, 0x3e, 0x15, 0xce, 0x31, 0x2d, + 0xe8, 0xca, 0xcc, 0x82, 0x46, 0xe0, 0xb2, 0x46, 0x1a, 0x8d, 0x0b, 0x3c, 0xce, 0xbb, 0x7a, 0x1e, + 0x2a, 0x33, 0x7a, 0xa2, 0xf2, 0xd4, 0xf1, 0xcd, 0xbf, 0x92, 0xa7, 0xfe, 0x6d, 0xc1, 0xf1, 0x51, + 0x7d, 0xaa, 0xad, 0x49, 0x87, 0xa5, 0x43, 0x91, 0x0c, 0xd9, 0xb0, 0xad, 0x2f, 0xde, 0xdc, 0x4a, + 0xe7, 0x21, 0x57, 0xa5, 0xec, 0xe1, 0x80, 0xf1, 0xd6, 0xb8, 0x3d, 0x5c, 0x63, 0xbc, 0x85, 0x75, + 0x66, 0x68, 0x20, 0xf6, 0x0b, 0x0d, 0xc4, 0x4c, 0xf5, 0xfc, 0xeb, 0x4c, 0xb5, 0x0b, 0xa0, 0x9c, + 0x30, 0x8e, 0x82, 0x80, 0xc6, 0x7a, 0xa0, 0x4b, 0xde, 0x71, 0xa5, 0x7b, 0x73, 0x18, 0xc5, 0xb9, + 0x8a, 0xc6, 0xf7, 0x16, 0x9c, 0xba, 0x49, 0x62, 0xc9, 0x48, 0x90, 0xb5, 0x53, 0x8b, 0x48, 0x82, + 0x62, 0x28, 0x85, 0xe6, 0x5a, 0xef, 0xaa, 0xb2, 0xee, 0xbd, 0x7e, 0xa3, 0x66, 0xcf, 0x31, 0x8b, + 0xe1, 0x21, 0x4f, 0xe3, 0x1f, 0x0b, 0xde, 0x9c, 0xaa, 0x46, 0x1d, 0x27, 0x88, 0x4f, 0x28, 0xfa, + 0x68, 0xe6, 0xe3, 0xdc, 0x1c, 0x4f, 0xd9, 0x49, 0x92, 0x46, 0x32, 0x35, 0x28, 0x86, 0x05, 0x26, + 0x69, 0x98, 0x7e, 0x3b, 0x6c, 0xcf, 0x4a, 0x36, 0x75, 0x47, 0xde, 0x31, 0xc3, 0xbc, 0xa0, 0x8e, + 0x1c, 0x81, 0x13, 0xaa, 0x46, 0x00, 0xf3, 0xfa, 0xc3, 0xed, 0x0c, 0x2c, 0x0a, 0xea, 0x47, 0xbc, + 0x25, 0xf4, 0x56, 0xed, 0xec, 0x83, 0x6c, 0x37, 0x09, 0xe3, 0x34, 0x8f, 0xde, 0x86, 0x05, 0x4e, + 0x78, 0x94, 0x7c, 0xf4, 0x2e, 0x64, 0xb8, 0x3b, 0x2a, 0x88, 0x93, 0xdc, 0x85, 0x93, 0x3f, 0x3f, + 0xac, 0x15, 0xee, 0x3f, 0xaa, 0x15, 0x1e, 0x3c, 0xaa, 0x15, 0x1e, 0x3e, 0xaa, 0x15, 0xee, 0xfd, + 0x56, 0x2f, 0x34, 0x3e, 0x85, 0x72, 0xe6, 0x8a, 0x47, 0x4c, 0xd9, 0xf8, 0x1c, 0x4a, 0xea, 0xdb, + 0x3c, 0x3d, 0x3d, 0x5f, 0x32, 0x1e, 0xa3, 0x43, 0x37, 0xf7, 0x5f, 0x86, 0xce, 0xfb, 0xec, 0xf1, + 0xb3, 0x6a, 0xe1, 0xc9, 0xb3, 0x6a, 0xe1, 0xe9, 0xb3, 0x6a, 0xe1, 0xde, 0xa0, 0x6a, 0x3d, 0x1e, + 0x54, 0xad, 0x27, 0x83, 0xaa, 0xf5, 0x74, 0x50, 0xb5, 0x7e, 0x1f, 0x54, 0xad, 0x07, 0x7f, 0x54, + 0x0b, 0x9f, 0x9c, 0x9b, 0xed, 0xdf, 0xd5, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x8c, 0x32, + 0xb1, 0x96, 0x0d, 0x00, 0x00, } func (m *Condition) Marshal() (dAtA []byte, err error) { @@ -788,6 +789,16 @@ func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x30 } + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) @@ -1088,6 +1099,10 @@ func (m *OwnerReference) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) if m.Controller != nil { n += 2 } @@ -1268,6 +1283,8 @@ func (this *OwnerReference) String() string { s := strings.Join([]string{`&OwnerReference{`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, `Controller:` + valueToStringGenerated(this.Controller) + `,`, `}`, }, "") @@ -2696,6 +2713,70 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto index c63d3425f2..f237c9313b 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/generated.proto @@ -96,6 +96,7 @@ message LabelSelector { // matchExpressions is a list of label selector requirements. The requirements are ANDed. // +kubebuilder:validation:Optional + // +listType=atomic repeated LabelSelectorRequirement matchExpressions = 2; } @@ -103,8 +104,6 @@ message LabelSelector { // relates the key and values. message LabelSelectorRequirement { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge optional string key = 1; // operator represents a key's relationship to a set of values. @@ -117,8 +116,8 @@ message LabelSelectorRequirement { // the values array must be non-empty. If the operator is Exists or DoesNotExist, // the values array must be empty. This array is replaced during a strategic // merge patch. - // // +kubebuilder:validation:Optional + // +listType=atomic repeated string values = 3; } @@ -265,6 +264,8 @@ message ObjectMeta { // +optional // +patchMergeKey=uid // +patchStrategy=merge + // +listType=map + // +listMapKey=uid repeated OwnerReference ownerReferences = 13; } @@ -273,6 +274,9 @@ message ObjectMeta { // be cluster-scoped, so there is no namespace field. // +structType=atomic message OwnerReference { + // API version of the referent. + optional string apiVersion = 5; + // Kind of the referent. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds optional string kind = 1; @@ -281,6 +285,10 @@ message OwnerReference { // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names optional string name = 3; + // UID of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + optional string uid = 4; + // If true, this reference points to the managing controller. // +optional optional bool controller = 6; diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/helpers.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/helpers.go index a3f6761275..d2d08077a0 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/helpers.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/helpers.go @@ -9,6 +9,7 @@ import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection" @@ -57,6 +58,100 @@ func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { return selector, nil } +// LabelSelectorAsMap converts the LabelSelector api type into a map of strings, ie. the +// original structure of a label selector. Operators that cannot be converted into plain +// labels (Exists, DoesNotExist, NotIn, and In with more than one value) will result in +// an error. +func LabelSelectorAsMap(ps *LabelSelector) (map[string]string, error) { + if ps == nil { + return nil, nil + } + selector := map[string]string{} + for k, v := range ps.MatchLabels { + selector[k] = v + } + for _, expr := range ps.MatchExpressions { + switch expr.Operator { + case LabelSelectorOpIn: + if len(expr.Values) != 1 { + return selector, fmt.Errorf("operator %q without a single value cannot be converted into the old label selector format", expr.Operator) + } + // Should we do anything in case this will override a previous key-value pair? + selector[expr.Key] = expr.Values[0] + case LabelSelectorOpNotIn, LabelSelectorOpExists, LabelSelectorOpDoesNotExist: + return selector, fmt.Errorf("operator %q cannot be converted into the old label selector format", expr.Operator) + default: + return selector, fmt.Errorf("%q is not a valid selector operator", expr.Operator) + } + } + return selector, nil +} + +// ParseToLabelSelector parses a string representing a selector into a LabelSelector object. +// Note: This function should be kept in sync with the parser in pkg/labels/selector.go +func ParseToLabelSelector(selector string) (*LabelSelector, error) { + reqs, err := labels.ParseToRequirements(selector) + if err != nil { + return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %w", selector, err) + } + + labelSelector := &LabelSelector{ + MatchLabels: map[string]string{}, + MatchExpressions: []LabelSelectorRequirement{}, + } + for _, req := range reqs { + var op LabelSelectorOperator + switch req.Operator() { + case selection.Equals, selection.DoubleEquals: + vals := req.Values() + if vals.Len() != 1 { + return nil, fmt.Errorf("equals operator must have exactly one value") + } + val, ok := vals.PopAny() + if !ok { + return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved") + } + labelSelector.MatchLabels[req.Key()] = val + continue + case selection.In: + op = LabelSelectorOpIn + case selection.NotIn: + op = LabelSelectorOpNotIn + case selection.Exists: + op = LabelSelectorOpExists + case selection.DoesNotExist: + op = LabelSelectorOpDoesNotExist + case selection.GreaterThan, selection.LessThan: + // Adding a separate case for these operators to indicate that this is deliberate + return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator()) + default: + return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator()) + } + labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{ + Key: req.Key(), + Operator: op, + Values: sets.List(req.Values()), + }) + } + return labelSelector, nil +} + +// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object. +func SetAsLabelSelector(ls labels.Set) *LabelSelector { + if ls == nil { + return nil + } + + selector := &LabelSelector{ + MatchLabels: make(map[string]string, len(ls)), + } + for label, value := range ls { + selector.MatchLabels[label] = value + } + + return selector +} + // FormatLabelSelector convert labelSelector into plain string func FormatLabelSelector(labelSelector *LabelSelector) string { selector, err := LabelSelectorAsSelector(labelSelector) @@ -73,10 +168,11 @@ func FormatLabelSelector(labelSelector *LabelSelector) string { // FullOwnerReferences converts slim OwnerReferences to original OwnerReferences func FullOwnerReferences(references []OwnerReference) []metav1.OwnerReference { - var fullRefs []metav1.OwnerReference for _, ref := range references { full := metav1.OwnerReference{ + APIVersion: ref.APIVersion, + UID: ref.UID, Name: ref.Name, Kind: ref.Kind, Controller: ref.Controller, @@ -88,11 +184,12 @@ func FullOwnerReferences(references []OwnerReference) []metav1.OwnerReference { // SlimOwnerReferences converts original OwnerReferences to slim OwnerReferences func SlimOwnerReferences(references []metav1.OwnerReference) []OwnerReference { - var slimRefs []OwnerReference for _, ref := range references { slim := OwnerReference{ + APIVersion: ref.APIVersion, Name: ref.Name, + UID: ref.UID, Kind: ref.Kind, Controller: ref.Controller, } @@ -100,3 +197,31 @@ func SlimOwnerReferences(references []metav1.OwnerReference) []OwnerReference { } return slimRefs } + +// HasAnnotation returns a bool if passed in annotation exists +func HasAnnotation(obj ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +// SetMetaDataAnnotation sets the annotation and value +func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +// HasLabel returns a bool if passed in label exists +func HasLabel(obj ObjectMeta, label string) bool { + _, found := obj.Labels[label] + return found +} + +// SetMetaDataLabel sets the label and value +func SetMetaDataLabel(obj *ObjectMeta, label string, value string) { + if obj.Labels == nil { + obj.Labels = make(map[string]string) + } + obj.Labels[label] = value +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/meta.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/meta.go index f5b506010f..3ad9831830 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/meta.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/meta.go @@ -38,24 +38,22 @@ func (obj *ObjectMeta) GetObjectMeta() metav1.Object { return obj } // Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows // fast, direct access to metadata fields for API objects. -func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } -func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } -func (meta *ObjectMeta) GetName() string { return meta.Name } -func (meta *ObjectMeta) SetName(name string) { meta.Name = name } -func (meta *ObjectMeta) GetGenerateName() string { - panic("ObjectMeta - GetGenerateName() not implemented") +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetGeneration() int64 { panic("ObjectMeta - GetGeneration() not implemented") } +func (meta *ObjectMeta) SetGeneration(_ int64) { panic("ObjectMeta - SetGeneration() not implemented") } +func (meta *ObjectMeta) GetSelfLink() string { panic("ObjectMeta - GetSelfLink() not implemented") } +func (meta *ObjectMeta) SetSelfLink(_ string) { + panic("ObjectMeta - SetSelfLink() not implemented") } -func (meta *ObjectMeta) SetGenerateName(string) { - panic("ObjectMeta - SetGenerateName() not implemented") -} -func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } -func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } -func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } -func (meta *ObjectMeta) SetResourceVersion(ver string) { meta.ResourceVersion = ver } -func (meta *ObjectMeta) GetGeneration() int64 { panic("ObjectMeta - GetGeneration() not implemented") } -func (meta *ObjectMeta) SetGeneration(_ int64) { panic("ObjectMeta - SetGeneration() not implemented") } -func (meta *ObjectMeta) GetSelfLink() string { panic("ObjectMeta - GetSelfLink() not implemented") } -func (meta *ObjectMeta) SetSelfLink(_ string) { panic("ObjectMeta - SetSelfLink() not implemented") } func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { panic("ObjectMeta - GetCreationTimestamp() not implemented") } @@ -95,9 +93,6 @@ func (meta *ObjectMeta) GetOwnerReferences() []metav1.OwnerReference { func (meta *ObjectMeta) SetOwnerReferences(references []metav1.OwnerReference) { meta.OwnerReferences = SlimOwnerReferences(references) } -func (meta *ObjectMeta) GetZZZ_DeprecatedClusterName() string { panic("not implemented") } -func (meta *ObjectMeta) SetZZZ_DeprecatedClusterName(_ string) { panic("not implemented") } - func (meta *ObjectMeta) GetManagedFields() []metav1.ManagedFieldsEntry { panic("ObjectMeta - GetManagedFields() not implemented") } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/register.go index 6df49bca43..d323d5714c 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/register.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/register.go @@ -14,6 +14,13 @@ import ( // GroupName is the group name for this API. const GroupName = "meta.k8s.io" +var ( + // localSchemeBuilder is used to make compiler happy for autogenerated + // conversions. However, it's not used. + schemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &schemeBuilder +) + // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} @@ -21,6 +28,9 @@ var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} // TODO: this should be v1 probably var Unversioned = schema.GroupVersion{Group: "", Version: "v1"} +// WatchEventKind is name reserved for serializing watch events. +const WatchEventKind = "WatchEvent" + // Kind takes an unqualified kind and returns a Group qualified GroupKind func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() @@ -32,6 +42,20 @@ var scheme = runtime.NewScheme() // ParameterCodec knows about query parameters used with the meta v1 API spec. var ParameterCodec = runtime.NewParameterCodec(scheme) +var optionsTypes = []runtime.Object{} + +// AddToGroupVersion registers common meta types into schemas. +func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) { + // Supports legacy code paths, most callers should use metav1.ParameterCodec for now + scheme.AddKnownTypes(groupVersion, optionsTypes...) + // Register Unversioned types under their own special group + scheme.AddUnversionedTypes(Unversioned) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + utilruntime.Must(RegisterConversions(scheme)) + utilruntime.Must(RegisterDefaults(scheme)) +} + // AddMetaToScheme registers base meta types into schemas. func AddMetaToScheme(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, @@ -43,8 +67,10 @@ func AddMetaToScheme(scheme *runtime.Scheme) error { } func init() { - scheme.AddUnversionedTypes(SchemeGroupVersion) + scheme.AddUnversionedTypes(SchemeGroupVersion, optionsTypes...) utilruntime.Must(AddMetaToScheme(scheme)) + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + utilruntime.Must(RegisterDefaults(scheme)) } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go index 27d9338234..d3dea84ccd 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/types.go @@ -75,6 +75,18 @@ type ListMeta struct { RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"` } +// Field path constants that are specific to the internal API +// representation. +const ( + ObjectNameField = "metadata.name" +) + +// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here +const ( + FinalizerOrphanDependents = "orphan" + FinalizerDeleteDependents = "foregroundDeletion" +) + // ObjectMeta is metadata that all persisted resources must have, which includes all objects // users must create. type ObjectMeta struct { @@ -184,6 +196,8 @@ type ObjectMeta struct { // +optional // +patchMergeKey=uid // +patchStrategy=merge + // +listType=map + // +listMapKey=uid OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` } @@ -205,17 +219,41 @@ const ( // be cluster-scoped, so there is no namespace field. // +structType=atomic type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` // Kind of the referent. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids + UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` // If true, this reference points to the managing controller. // +optional Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` } +const ( + // FieldValidationIgnore ignores unknown/duplicate fields + FieldValidationIgnore = "Ignore" + // FieldValidationWarn responds with a warning, but successfully serve the request + FieldValidationWarn = "Warn" + // FieldValidationStrict fails the request on unknown/duplicate fields + FieldValidationStrict = "Strict" +) + +// TODO: remove me when watch is refactored +func LabelSelectorQueryParam(version string) string { + return "labelSelector" +} + +// TODO: remove me when watch is refactored +func FieldSelectorQueryParam(version string) string { + return "fieldSelector" +} + // Note: // There are two different styles of label selectors used in versioned types: // an older style which is represented as just a string in versioned types, and a @@ -235,6 +273,7 @@ type LabelSelector struct { MatchLabels map[string]MatchLabelsValue `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` // matchExpressions is a list of label selector requirements. The requirements are ANDed. // +kubebuilder:validation:Optional + // +listType=atomic MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` } @@ -248,9 +287,7 @@ type MatchLabelsValue = string // relates the key and values. type LabelSelectorRequirement struct { // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge - Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` // operator represents a key's relationship to a set of values. // Valid operators are In, NotIn, Exists and DoesNotExist. // @@ -260,8 +297,8 @@ type LabelSelectorRequirement struct { // the values array must be non-empty. If the operator is Exists or DoesNotExist, // the values array must be empty. This array is replaced during a strategic // merge patch. - // // +kubebuilder:validation:Optional + // +listType=atomic Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` } diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go index 0e31e67b82..2f9f12b677 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/validation/validation.go @@ -6,27 +6,57 @@ package validation import ( + "fmt" + "regexp" + "unicode" + + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" ) -func ValidateLabelSelector(ps *slim_metav1.LabelSelector, fldPath *field.Path) field.ErrorList { +// LabelSelectorValidationOptions is a struct that can be passed to ValidateLabelSelector to record the validate options +type LabelSelectorValidationOptions struct { + // Allow invalid label value in selector + AllowInvalidLabelValueInSelector bool +} + +// LabelSelectorHasInvalidLabelValue returns true if the given selector contains an invalid label value in a match expression. +// This is useful for determining whether AllowInvalidLabelValueInSelector should be set to true when validating an update +// based on existing persisted invalid values. +func LabelSelectorHasInvalidLabelValue(ps *slim_metav1.LabelSelector) bool { + if ps == nil { + return false + } + for _, e := range ps.MatchExpressions { + for _, v := range e.Values { + if len(validation.IsValidLabelValue(v)) > 0 { + return true + } + } + } + return false +} + +// ValidateLabelSelector validate the LabelSelector according to the opts and returns any validation errors. +// opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data. +func ValidateLabelSelector(ps *slim_metav1.LabelSelector, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if ps == nil { return allErrs } allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) for i, expr := range ps.MatchExpressions { - allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) + allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, opts, fldPath.Child("matchExpressions").Index(i))...) } return allErrs } // ValidateLabelSelectorRequirement validate the requirement according to the opts and returns any validation errors. // opts.AllowInvalidLabelValueInSelector is only expected to be set to true when required for backwards compatibility with existing invalid data. -func ValidateLabelSelectorRequirement(sr slim_metav1.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList { +func ValidateLabelSelectorRequirement(sr slim_metav1.LabelSelectorRequirement, opts LabelSelectorValidationOptions, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} switch sr.Operator { case slim_metav1.LabelSelectorOpIn, slim_metav1.LabelSelectorOpNotIn: @@ -41,6 +71,13 @@ func ValidateLabelSelectorRequirement(sr slim_metav1.LabelSelectorRequirement, f allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) } allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) + if !opts.AllowInvalidLabelValueInSelector { + for valueIndex, value := range sr.Values { + for _, msg := range validation.IsValidLabelValue(value) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("values").Index(valueIndex), value, msg)) + } + } + } return allErrs } @@ -64,3 +101,114 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi } return allErrs } + +var FieldManagerMaxLength = 128 + +// ValidateFieldManager valides that the fieldManager is the proper length and +// only has printable characters. +func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + // the field can not be set as a `*string`, so a empty string ("") is + // considered as not set and is defaulted by the rest of the process + // (unless apply is used, in which case it is required). + if len(fieldManager) > FieldManagerMaxLength { + allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength)) + } + // Verify that all characters are printable. + for i, r := range fieldManager { + if !unicode.IsPrint(r) { + allErrs = append(allErrs, field.Invalid(fldPath, fieldManager, fmt.Sprintf("invalid character %#U (at position %d)", r, i))) + } + } + + return allErrs +} + +var allowedFieldValidationValues = sets.NewString("", slim_metav1.FieldValidationIgnore, slim_metav1.FieldValidationWarn, slim_metav1.FieldValidationStrict) + +// ValidateFieldValidation validates that a fieldValidation query param only contains allowed values. +func ValidateFieldValidation(fldPath *field.Path, fieldValidation string) field.ErrorList { + allErrs := field.ErrorList{} + if !allowedFieldValidationValues.Has(fieldValidation) { + allErrs = append(allErrs, field.NotSupported(fldPath, fieldValidation, allowedFieldValidationValues.List())) + } + return allErrs + +} + +const MaxSubresourceNameLength = 256 + +func ValidateConditions(conditions []slim_metav1.Condition, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + conditionTypeToFirstIndex := map[string]int{} + for i, condition := range conditions { + if _, ok := conditionTypeToFirstIndex[condition.Type]; ok { + allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("type"), condition.Type)) + } else { + conditionTypeToFirstIndex[condition.Type] = i + } + + allErrs = append(allErrs, ValidateCondition(condition, fldPath.Index(i))...) + } + + return allErrs +} + +// validConditionStatuses is used internally to check validity and provide a good message +var validConditionStatuses = sets.NewString(string(slim_metav1.ConditionTrue), string(slim_metav1.ConditionFalse), string(slim_metav1.ConditionUnknown)) + +const ( + maxReasonLen = 1 * 1024 + maxMessageLen = 32 * 1024 +) + +func ValidateCondition(condition slim_metav1.Condition, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // type is set and is a valid format + allErrs = append(allErrs, ValidateLabelName(condition.Type, fldPath.Child("type"))...) + + // status is set and is an accepted value + if !validConditionStatuses.Has(string(condition.Status)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("status"), condition.Status, validConditionStatuses.List())) + } + + if condition.ObservedGeneration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("observedGeneration"), condition.ObservedGeneration, "must be greater than or equal to zero")) + } + + if condition.LastTransitionTime.IsZero() { + allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), "must be set")) + } + + if len(condition.Reason) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("reason"), "must be set")) + } else { + for _, currErr := range isValidConditionReason(condition.Reason) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr)) + } + if len(condition.Reason) > maxReasonLen { + allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen)) + } + } + + if len(condition.Message) > maxMessageLen { + allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen)) + } + + return allErrs +} + +const conditionReasonFmt string = "[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?" +const conditionReasonErrMsg string = "a condition reason must start with alphabetic character, optionally followed by a string of alphanumeric characters or '_,:', and must end with an alphanumeric character or '_'" + +var conditionReasonRegexp = regexp.MustCompile("^" + conditionReasonFmt + "$") + +// isValidConditionReason tests for a string that conforms to rules for condition reasons. This checks the format, but not the length. +func isValidConditionReason(value string) []string { + if !conditionReasonRegexp.MatchString(value) { + return []string{validation.RegexError(conditionReasonErrMsg, conditionReasonFmt, "my_name", "MY_NAME", "MyName", "ReasonA,ReasonB", "ReasonA:ReasonB")} + } + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.conversion.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.conversion.go new file mode 100644 index 0000000000..e475040a27 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.conversion.go @@ -0,0 +1,23 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.deepequal.go index 140545c0e6..21c8be665c 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.deepequal.go @@ -242,12 +242,18 @@ func (in *OwnerReference) DeepEqual(other *OwnerReference) bool { return false } + if in.APIVersion != other.APIVersion { + return false + } if in.Kind != other.Kind { return false } if in.Name != other.Name { return false } + if in.UID != other.UID { + return false + } if (in.Controller == nil) != (other.Controller == nil) { return false } else if in.Controller != nil { diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.defaults.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..cd8143c7f9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/zz_generated.defaults.go @@ -0,0 +1,20 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go new file mode 100644 index 0000000000..3955e271b0 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/doc.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// +deepequal-gen=package + +// Package types contains slimmer versions of k8s types. +// +groupName=util +package intstr diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go new file mode 100644 index 0000000000..376a6c0f87 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.pb.go @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto + +package intstr + +import ( + fmt "fmt" + + io "io" + math "math" + math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { + return fileDescriptor_8984be45904ea297, []int{0} +} +func (m *IntOrString) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IntOrString) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntOrString.Merge(m, src) +} +func (m *IntOrString) XXX_Size() int { + return m.Size() +} +func (m *IntOrString) XXX_DiscardUnknown() { + xxx_messageInfo_IntOrString.DiscardUnknown(m) +} + +var xxx_messageInfo_IntOrString proto.InternalMessageInfo + +func init() { + proto.RegisterType((*IntOrString)(nil), "github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.util.intstr.IntOrString") +} + +func init() { + proto.RegisterFile("github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto", fileDescriptor_8984be45904ea297) +} + +var fileDescriptor_8984be45904ea297 = []byte{ + // 293 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xf2, 0x4a, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcc, 0xc9, 0x2c, 0x85, 0x53, 0x05, 0xd9, + 0xe9, 0xfa, 0xd9, 0x16, 0xc5, 0xfa, 0xc5, 0x39, 0x99, 0xb9, 0x60, 0x46, 0x62, 0x41, 0x66, 0xb1, + 0x7e, 0x69, 0x49, 0x66, 0x8e, 0x7e, 0x66, 0x5e, 0x49, 0x71, 0x49, 0x91, 0x7e, 0x7a, 0x6a, 0x5e, + 0x6a, 0x51, 0x62, 0x49, 0x6a, 0x8a, 0x5e, 0x41, 0x51, 0x7e, 0x49, 0xbe, 0x90, 0x15, 0xc2, 0x2c, + 0x3d, 0x88, 0x21, 0x30, 0xaa, 0x20, 0x3b, 0x5d, 0x2f, 0xdb, 0xa2, 0x58, 0x0f, 0x64, 0x16, 0x98, + 0x01, 0x32, 0x4b, 0x0f, 0x64, 0x96, 0x1e, 0xc4, 0x2c, 0x29, 0x5d, 0x24, 0x77, 0xa4, 0xe7, 0xa7, + 0xe7, 0xeb, 0x83, 0x8d, 0x4c, 0x2a, 0x4d, 0x03, 0xf3, 0xc0, 0x1c, 0x30, 0x0b, 0x62, 0x95, 0xd2, + 0x44, 0x46, 0x2e, 0x6e, 0xcf, 0xbc, 0x12, 0xff, 0xa2, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x21, + 0x0d, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x66, 0x27, 0x91, 0x13, + 0xf7, 0xe4, 0x19, 0x1e, 0xdd, 0x93, 0x67, 0x09, 0xa9, 0x2c, 0x48, 0xfd, 0x05, 0xa5, 0x83, 0xc0, + 0x2a, 0x84, 0xd4, 0xb8, 0xd8, 0x32, 0xf3, 0x4a, 0xc2, 0x12, 0x73, 0x24, 0x98, 0x14, 0x18, 0x35, + 0x58, 0x9d, 0xf8, 0xa0, 0x6a, 0xd9, 0x3c, 0xc1, 0xa2, 0x41, 0x50, 0x59, 0x90, 0xba, 0xe2, 0x92, + 0x22, 0x90, 0x3a, 0x66, 0x05, 0x46, 0x0d, 0x4e, 0x84, 0xba, 0x60, 0xb0, 0x68, 0x10, 0x54, 0xd6, + 0x8a, 0x63, 0xc6, 0x02, 0x79, 0x86, 0x86, 0x3b, 0x0a, 0x0c, 0x4e, 0x09, 0x27, 0x1e, 0xca, 0x31, + 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, + 0x18, 0x2f, 0x3c, 0x92, 0x63, 0xbc, 0xf1, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, + 0xe5, 0x18, 0xa2, 0xac, 0xc8, 0x0f, 0x70, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0x17, 0x78, + 0x08, 0xad, 0x01, 0x00, 0x00, +} + +func (m *IntOrString) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.StrVal) + copy(dAtA[i:], m.StrVal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *IntOrString) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Type)) + n += 1 + sovGenerated(uint64(m.IntVal)) + l = len(m.StrVal) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *IntOrString) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) + } + m.IntVal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntVal |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StrVal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto new file mode 100644 index 0000000000..2d929fd185 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/generated.proto @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.cilium.cilium.pkg.k8s.slim.k8s.apis.util.intstr; + +// Package-wide variables from generator "generated". +option go_package = "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr"; + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message IntOrString { + optional int64 type = 1; + + optional int32 intVal = 2; + + optional string strVal = 3; +} + diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go new file mode 100644 index 0000000000..2f76aa3777 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/intstr.go @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Copyright 2014 The Kubernetes Authors. + +package intstr + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "runtime/debug" + "strconv" + "strings" + + "k8s.io/klog/v2" +) + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +type IntOrString struct { + Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"` + IntVal int32 `protobuf:"varint,2,opt,name=intVal"` + StrVal string `protobuf:"bytes,3,opt,name=strVal"` +} + +// Type represents the stored type of IntOrString. +type Type int64 + +const ( + Int Type = iota // The IntOrString holds an int. + String // The IntOrString holds a string. +) + +// FromInt creates an IntOrString object with an int32 value. It is +// your responsibility not to call this method with a value greater +// than int32. +// Deprecated: use FromInt32 instead. +func FromInt(val int) IntOrString { + if val > math.MaxInt32 || val < math.MinInt32 { + klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + } + return IntOrString{Type: Int, IntVal: int32(val)} +} + +// FromInt32 creates an IntOrString object with an int32 value. +func FromInt32(val int32) IntOrString { + return IntOrString{Type: Int, IntVal: val} +} + +// FromString creates an IntOrString object with a string value. +func FromString(val string) IntOrString { + return IntOrString{Type: String, StrVal: val} +} + +// Parse the given string and try to convert it to an int32 integer before +// setting it as a string value. +func Parse(val string) IntOrString { + i, err := strconv.ParseInt(val, 10, 32) + if err != nil { + return FromString(val) + } + return FromInt32(int32(i)) +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (intstr *IntOrString) UnmarshalJSON(value []byte) error { + if value[0] == '"' { + intstr.Type = String + return json.Unmarshal(value, &intstr.StrVal) + } + intstr.Type = Int + return json.Unmarshal(value, &intstr.IntVal) +} + +// String returns the string value, or the Itoa of the int value. +func (intstr *IntOrString) String() string { + if intstr == nil { + return "" + } + if intstr.Type == String { + return intstr.StrVal + } + return strconv.Itoa(intstr.IntValue()) +} + +// IntValue returns the IntVal if type Int, or if +// it is a String, will attempt a conversion to int, +// returning 0 if a parsing error occurs. +func (intstr *IntOrString) IntValue() int { + if intstr.Type == String { + i, _ := strconv.Atoi(intstr.StrVal) + return i + } + return int(intstr.IntVal) +} + +// MarshalJSON implements the json.Marshaller interface. +func (intstr IntOrString) MarshalJSON() ([]byte, error) { + switch intstr.Type { + case Int: + return json.Marshal(intstr.IntVal) + case String: + return json.Marshal(intstr.StrVal) + default: + return []byte{}, fmt.Errorf("impossible IntOrString.Type") + } +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } + +// OpenAPIV3OneOfTypes is used by the kube-openapi generator when constructing +// the OpenAPI v3 spec of this type. +func (IntOrString) OpenAPIV3OneOfTypes() []string { return []string{"integer", "string"} } + +func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString { + if intOrPercent == nil { + return &defaultValue + } + return intOrPercent +} + +// GetScaledValueFromIntOrPercent is meant to replace GetValueFromIntOrPercent. +// This method returns a scaled value from an IntOrString type. If the IntOrString +// is a percentage string value it's treated as a percentage and scaled appropriately +// in accordance to the total, if it's an int value it's treated as a simple value and +// if it is a string value which is either non-numeric or numeric but lacking a trailing '%' it returns an error. +func GetScaledValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { + if intOrPercent == nil { + return 0, errors.New("nil value for IntOrString") + } + value, isPercent, err := getIntOrPercentValueSafely(intOrPercent) + if err != nil { + return 0, fmt.Errorf("invalid value for IntOrString: %w", err) + } + if isPercent { + if roundUp { + value = int(math.Ceil(float64(value) * (float64(total)) / 100)) + } else { + value = int(math.Floor(float64(value) * (float64(total)) / 100)) + } + } + return value, nil +} + +// GetValueFromIntOrPercent was deprecated in favor of +// GetScaledValueFromIntOrPercent. This method was treating all int as a numeric value and all +// strings with or without a percent symbol as a percentage value. +// Deprecated +func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { + if intOrPercent == nil { + return 0, errors.New("nil value for IntOrString") + } + value, isPercent, err := getIntOrPercentValue(intOrPercent) + if err != nil { + return 0, fmt.Errorf("invalid value for IntOrString: %w", err) + } + if isPercent { + if roundUp { + value = int(math.Ceil(float64(value) * (float64(total)) / 100)) + } else { + value = int(math.Floor(float64(value) * (float64(total)) / 100)) + } + } + return value, nil +} + +// getIntOrPercentValue is a legacy function and only meant to be called by GetValueFromIntOrPercent +// For a more correct implementation call getIntOrPercentSafely +func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { + switch intOrStr.Type { + case Int: + return intOrStr.IntValue(), false, nil + case String: + s := strings.Replace(intOrStr.StrVal, "%", "", -1) + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %w", intOrStr.StrVal, err) + } + return int(v), true, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} + +func getIntOrPercentValueSafely(intOrStr *IntOrString) (int, bool, error) { + switch intOrStr.Type { + case Int: + return intOrStr.IntValue(), false, nil + case String: + isPercent := false + s := intOrStr.StrVal + if strings.HasSuffix(s, "%") { + isPercent = true + s = strings.TrimSuffix(intOrStr.StrVal, "%") + } else { + return 0, false, fmt.Errorf("invalid type: string is not a percentage") + } + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %w", intOrStr.StrVal, err) + } + return int(v), isPercent, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go new file mode 100644 index 0000000000..c9cb4ad583 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/util/intstr/zz_generated.deepequal.go @@ -0,0 +1,29 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Code generated by deepequal-gen. DO NOT EDIT. + +package intstr + +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *IntOrString) DeepEqual(other *IntOrString) bool { + if other == nil { + return false + } + + if in.Type != other.Type { + return false + } + if in.IntVal != other.IntVal { + return false + } + if in.StrVal != other.StrVal { + return false + } + + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go b/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go index 930c32d0dd..ad20042c01 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/utils/utils.go @@ -5,21 +5,28 @@ package utils import ( "net" - "sort" + "strings" v1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" v1meta "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/cilium/cilium/pkg/ip" + k8sconst "github.com/cilium/cilium/pkg/k8s/apis/cilium.io" slim_corev1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1" "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/selection" + labelsPkg "github.com/cilium/cilium/pkg/labels" + "github.com/cilium/cilium/pkg/slices" ) const ( // ServiceProxyNameLabel is the label for service proxy name in k8s service related // objects. serviceProxyNameLabel = "service.kubernetes.io/service-proxy-name" + // EndpointSliceMeshControllerName is a unique value used with LabelManagedBy to indicate + // the component managing an EndpointSlice. + EndpointSliceMeshControllerName = "endpointslice-mesh-controller.cilium.io" ) type NamespaceNameGetter interface { @@ -59,47 +66,25 @@ func GetObjNamespaceName(obj NamespaceNameGetter) string { return ns + "/" + obj.GetName() } -// ServiceConfiguration is the required configuration for GetServiceAndEndpointListOptionsModifier -type ServiceConfiguration interface { - // K8sServiceProxyNameValue must return the value of the proxy name - // annotation. If set, only services with this label will be handled. - K8sServiceProxyNameValue() string -} - -// IngressConfiguration is the required configuration for GetServiceAndEndpointListOptionsModifier -type IngressConfiguration interface { - // K8sIngressControllerEnabled returns true if ingress controller feature is enabled in Cilium - K8sIngressControllerEnabled() bool -} - -// GatewayAPIConfiguration is the required configuration for GetServiceAndEndpointListOptionsModifier -type GatewayAPIConfiguration interface { - // K8sGatewayAPIEnabled returns true if gateway API is enabled in Cilium - K8sGatewayAPIEnabled() bool -} - -// PolicyConfiguration is the required configuration for K8s NetworkPolicy -type PolicyConfiguration interface { - // K8sNetworkPolicyEnabled returns true if cilium agent needs to support K8s NetworkPolicy - K8sNetworkPolicyEnabled() bool -} - // GetEndpointSliceListOptionsModifier returns the options modifier for endpointSlice object list. // This methods returns a ListOptions modifier which adds a label selector to -// select all endpointSlice objects that do not contain the k8s headless service label. -// This is the same behavior as kube-proxy. +// select all endpointSlice objects they are not from remote clusters in Cilium cluster mesh. +// This is mostly the same behavior as kube-proxy except the cluster mesh behavior which is +// tied to how Cilium internally works with clustermesh endpoints and that this function also doesn't ignore headless Services. // Given label mirroring from the service objects to endpoint slice objects were introduced in Kubernetes PR 94443, // and released as part of Kubernetes v1.20; we can start using GetServiceAndEndpointListOptionsModifier for // endpoint slices when dropping support for Kubernetes v1.19 and older. We can do that since the // serviceProxyNameLabel label will then be mirrored to endpoint slices for services with that label. +// We also ignore Kubernetes endpoints coming from other clusters in the Cilium clustermesh here as +// Cilium does not rely on mirrored Kubernetes EndpointSlice for any of its functionalities. func GetEndpointSliceListOptionsModifier() (func(options *v1meta.ListOptions), error) { - nonHeadlessServiceSelector, err := labels.NewRequirement(v1.IsHeadlessService, selection.DoesNotExist, nil) + nonRemoteEndpointSelector, err := labels.NewRequirement(discoveryv1.LabelManagedBy, selection.NotEquals, []string{EndpointSliceMeshControllerName}) if err != nil { return nil, err } labelSelector := labels.NewSelector() - labelSelector = labelSelector.Add(*nonHeadlessServiceSelector) + labelSelector = labelSelector.Add(*nonRemoteEndpointSelector) return func(options *v1meta.ListOptions) { options.LabelSelector = labelSelector.String() @@ -109,28 +94,23 @@ func GetEndpointSliceListOptionsModifier() (func(options *v1meta.ListOptions), e // GetServiceAndEndpointListOptionsModifier returns the options modifier for service and endpoint object lists. // This methods returns a ListOptions modifier which adds a label selector to only // select services that are in context of Cilium. -// Like kube-proxy Cilium does not select services/endpoints containing k8s headless service label. +// Unlike kube-proxy Cilium does not select services/endpoints containing k8s headless service label. // We honor service.kubernetes.io/service-proxy-name label in the service object and only // handle services that match our service proxy name. If the service proxy name for Cilium // is an empty string, we assume that Cilium is the default service handler in which case // we select all services that don't have the above mentioned label. -func GetServiceAndEndpointListOptionsModifier(cfg ServiceConfiguration) (func(options *v1meta.ListOptions), error) { +func GetServiceAndEndpointListOptionsModifier(k8sServiceProxy string) (func(options *v1meta.ListOptions), error) { var ( - serviceNameSelector, nonHeadlessServiceSelector *labels.Requirement - err error + serviceNameSelector *labels.Requirement + err error ) - nonHeadlessServiceSelector, err = labels.NewRequirement(v1.IsHeadlessService, selection.DoesNotExist, nil) - if err != nil { - return nil, err - } - - if cfg.K8sServiceProxyNameValue() == "" { + if k8sServiceProxy == "" { serviceNameSelector, err = labels.NewRequirement( serviceProxyNameLabel, selection.DoesNotExist, nil) } else { serviceNameSelector, err = labels.NewRequirement( - serviceProxyNameLabel, selection.DoubleEquals, []string{cfg.K8sServiceProxyNameValue()}) + serviceProxyNameLabel, selection.DoubleEquals, []string{k8sServiceProxy}) } if err != nil { @@ -138,7 +118,7 @@ func GetServiceAndEndpointListOptionsModifier(cfg ServiceConfiguration) (func(op } labelSelector := labels.NewSelector() - labelSelector = labelSelector.Add(*serviceNameSelector, *nonHeadlessServiceSelector) + labelSelector = labelSelector.Add(*serviceNameSelector) return func(options *v1meta.ListOptions) { options.LabelSelector = labelSelector.String() @@ -163,22 +143,18 @@ func ValidIPs(podStatus slim_corev1.PodStatus) []string { } // make it a set first to avoid repeated IP addresses - ipsMap := make(map[string]struct{}, 1+len(podStatus.PodIPs)) + ips := []string{} if podStatus.PodIP != "" { - ipsMap[podStatus.PodIP] = struct{}{} + ips = append(ips, podStatus.PodIP) } + for _, podIP := range podStatus.PodIPs { if podIP.IP != "" { - ipsMap[podIP.IP] = struct{}{} + ips = append(ips, podIP.IP) } } - ips := make([]string, 0, len(ipsMap)) - for ipStr := range ipsMap { - ips = append(ips, ipStr) - } - sort.Strings(ips) - return ips + return slices.SortedUnique(ips) } // IsPodRunning returns true if the pod is considered to be in running state. @@ -219,3 +195,68 @@ func GetClusterIPByFamily(ipFamily slim_corev1.IPFamily, service *slim_corev1.Se return "" } + +// nameLabelsGetter is an interface that returns the name and the labels for +// the namespace. +type nameLabelsGetter interface { + GetName() string + GetLabels() map[string]string +} + +// RemoveCiliumLabels returns a copy of the given labels map, without the labels owned by Cilium. +func RemoveCiliumLabels(labels map[string]string) map[string]string { + res := map[string]string{} + for k, v := range labels { + if strings.HasPrefix(k, k8sconst.LabelPrefix) { + continue + } + res[k] = v + } + return res +} + +// SanitizePodLabels makes sure that no important pod labels were overridden manually on k8s pod +// object creation. +func SanitizePodLabels(podLabels map[string]string, namespace nameLabelsGetter, serviceAccount, clusterName string) map[string]string { + sanitizedLabels := RemoveCiliumLabels(podLabels) + + // Sanitize namespace labels + for k, v := range namespace.GetLabels() { + sanitizedLabels[joinPath(k8sconst.PodNamespaceMetaLabels, k)] = v + } + // Sanitize namespace name label + sanitizedLabels[k8sconst.PodNamespaceLabel] = namespace.GetName() + // Sanitize service account name + if serviceAccount != "" { + sanitizedLabels[k8sconst.PolicyLabelServiceAccount] = serviceAccount + } else { + delete(sanitizedLabels, k8sconst.PolicyLabelServiceAccount) + } + // Sanitize cluster name + sanitizedLabels[k8sconst.PolicyLabelCluster] = clusterName + + return sanitizedLabels +} + +// StripPodSpecialLabels strips labels that are not supposed to be coming from a k8s pod object update. +func StripPodSpecialLabels(labels map[string]string) map[string]string { + sanitizedLabels := make(map[string]string) + for k, v := range RemoveCiliumLabels(labels) { + // If the key contains the prefix for namespace labels then we will + // ignore it. + if strings.HasPrefix(k, k8sconst.PodNamespaceMetaLabels) { + continue + } + // Also ignore it if the key is a kubernetes namespace label. + if k == k8sconst.PodNamespaceLabel { + continue + } + sanitizedLabels[k] = v + } + return sanitizedLabels +} + +// joinPath mimics JoinPath from pkg/policy/utils, which could not be imported here due to circular dependency +func joinPath(a, b string) string { + return a + labelsPkg.PathDelimiter + b +} diff --git a/vendor/github.com/cilium/cilium/pkg/labels/array.go b/vendor/github.com/cilium/cilium/pkg/labels/array.go index 0582adb0fa..e13b055d73 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/array.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/array.go @@ -85,7 +85,7 @@ func (ls LabelArray) Contains(needed LabelArray) bool { nextLabel: for i := range needed { for l := range ls { - if needed[i].matches(&ls[l]) { + if ls[l].Has(&needed[i]) { continue nextLabel } } @@ -96,13 +96,29 @@ nextLabel: return true } +// Intersects returns true if ls contains at least one label in needed. +// +// This has the same matching semantics as Has, namely, +// ["k8s:foo=bar"].Intersects(["any:foo=bar"]) == true +// ["any:foo=bar"].Intersects(["k8s:foo=bar"]) == false +func (ls LabelArray) Intersects(needed LabelArray) bool { + for _, l := range ls { + for _, n := range needed { + if l.Has(&n) { + return true + } + } + } + return false +} + // Lacks is identical to Contains but returns all missing labels func (ls LabelArray) Lacks(needed LabelArray) LabelArray { missing := LabelArray{} nextLabel: for i := range needed { for l := range ls { - if needed[i].matches(&ls[l]) { + if ls[l].Has(&needed[l]) { continue nextLabel } } @@ -113,24 +129,25 @@ nextLabel: return missing } -// Has returns whether the provided key exists. +// Has returns whether the provided key exists in the label array. // Implementation of the // github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels.Labels interface. +// +// The key can be of source "any", in which case the source is +// ignored. The inverse, however, is not true. +// ["k8s.foo=bar"].Has("any.foo") => true +// ["any.foo=bar"].Has("k8s.foo") => false +// +// If the key is of source "cidr", this will also match +// broader keys. +// ["cidr:1.1.1.1/32"].Has("cidr.1.0.0.0/8") => true +// ["cidr:1.0.0.0/8"].Has("cidr.1.1.1.1/32") => false func (ls LabelArray) Has(key string) bool { // The key is submitted in the form of `source.key=value` keyLabel := parseSelectLabel(key, '.') - if keyLabel.IsAnySource() { - for l := range ls { - if ls[l].Key == keyLabel.Key { - return true - } - } - } else { - for _, lsl := range ls { - // Note that if '=value' is part of 'key' it is ignored here - if lsl.Source == keyLabel.Source && lsl.Key == keyLabel.Key { - return true - } + for _, l := range ls { + if l.HasKey(&keyLabel) { + return true } } return false @@ -139,19 +156,21 @@ func (ls LabelArray) Has(key string) bool { // Get returns the value for the provided key. // Implementation of the // github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels.Labels interface. +// +// The key can be of source "any", in which case the source is +// ignored. The inverse, however, is not true. +// ["k8s.foo=bar"].Get("any.foo") => "bar" +// ["any.foo=bar"].Get("k8s.foo") => "" +// +// If the key is of source "cidr", this will also match +// broader keys. +// ["cidr:1.1.1.1/32"].Has("cidr.1.0.0.0/8") => true +// ["cidr:1.0.0.0/8"].Has("cidr.1.1.1.1/32") => false func (ls LabelArray) Get(key string) string { keyLabel := parseSelectLabel(key, '.') - if keyLabel.IsAnySource() { - for l := range ls { - if ls[l].Key == keyLabel.Key { - return ls[l].Value - } - } - } else { - for _, lsl := range ls { - if lsl.Source == keyLabel.Source && lsl.Key == keyLabel.Key { - return lsl.Value - } + for _, l := range ls { + if l.HasKey(&keyLabel) { + return l.Value } } return "" diff --git a/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go b/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go index 11e5669ec1..a96810ea1a 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/arraylist.go @@ -3,7 +3,9 @@ package labels -import "sort" +import ( + "sort" +) // LabelArrayList is an array of LabelArrays. It is primarily intended as a // simple collection @@ -45,6 +47,57 @@ func (ls LabelArrayList) Equals(b LabelArrayList) bool { return true } +// Diff returns the string of differences between 'ls' and 'expected' LabelArrayList with +// '+ ' or '- ' for obtaining something unexpected, or not obtaining the expected, respectively. +// For use in debugging. Assumes sorted LabelArrayLists. +func (ls LabelArrayList) Diff(expected LabelArrayList) (res string) { + res += "" + i := 0 + j := 0 + for i < len(ls) && j < len(expected) { + if ls[i].Equals(expected[j]) { + i++ + j++ + continue + } + if ls[i].Less(expected[j]) { + // obtained has an unexpected labelArray + res += " + " + ls[i].String() + "\n" + i++ + } + for j < len(expected) && expected[j].Less(ls[i]) { + // expected has a missing labelArray + res += " - " + expected[j].String() + "\n" + j++ + } + } + for i < len(ls) { + // obtained has an unexpected labelArray + res += " + " + ls[i].String() + "\n" + i++ + } + for j < len(expected) { + // expected has a missing labelArray + res += " - " + expected[j].String() + "\n" + j++ + } + + return res +} + +// GetModel returns the LabelArrayList as a [][]string. Each member LabelArray +// becomes a []string. +func (ls LabelArrayList) String() string { + res := "" + for _, v := range ls { + if res != "" { + res += ", " + } + res += v.String() + } + return res +} + // Sort sorts the LabelArrayList in-place, but also returns the sorted list // for convenience. The LabelArrays themselves must already be sorted. This is // true for all constructors of LabelArray. diff --git a/vendor/github.com/cilium/cilium/pkg/labels/cidr.go b/vendor/github.com/cilium/cilium/pkg/labels/cidr.go new file mode 100644 index 0000000000..b2eb45f333 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/labels/cidr.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package labels + +import ( + "fmt" + "net/netip" + "strconv" + "strings" + + "github.com/cilium/cilium/pkg/option" +) + +var ( + worldLabelNonDualStack = Label{Source: LabelSourceReserved, Key: IDNameWorld} + worldLabelV4 = Label{Source: LabelSourceReserved, Key: IDNameWorldIPv4} + worldLabelV6 = Label{Source: LabelSourceReserved, Key: IDNameWorldIPv6} +) + +// maskedIPToLabelString is the base method for serializing an IP + prefix into +// a string that can be used for creating Labels and EndpointSelector objects. +// +// For IPv6 addresses, it converts ":" into "-" as EndpointSelectors don't +// support colons inside the name section of a label. +func maskedIPToLabel(ipStr string, prefix int) Label { + var str strings.Builder + str.Grow( + 1 /* preZero */ + + len(ipStr) + + 1 /* postZero */ + + 2 /*len of prefix*/ + + 1, /* '/' */ + ) + + for i := 0; i < len(ipStr); i++ { + if ipStr[i] == ':' { + // EndpointSelector keys can't start or end with a "-", so insert a + // zero at the start or end if it would otherwise have a "-" at that + // position. + if i == 0 { + str.WriteByte('0') + str.WriteByte('-') + continue + } + if i == len(ipStr)-1 { + str.WriteByte('-') + str.WriteByte('0') + continue + } + str.WriteByte('-') + } else { + str.WriteByte(ipStr[i]) + } + } + str.WriteRune('/') + str.WriteString(strconv.Itoa(prefix)) + return Label{Key: str.String(), Source: LabelSourceCIDR} +} + +// IPStringToLabel parses a string and returns it as a CIDR label. +// +// If ip is not a valid IP address or CIDR Prefix, returns an error. +func IPStringToLabel(ip string) (Label, error) { + // factored out of netip.ParsePrefix to avoid allocating an empty netip.Prefix in case it's + // an IP and not a CIDR. + i := strings.LastIndexByte(ip, '/') + if i < 0 { + parsedIP, err := netip.ParseAddr(ip) + if err != nil { + return Label{}, fmt.Errorf("%q is not an IP address: %w", ip, err) + } + return maskedIPToLabel(ip, parsedIP.BitLen()), nil + } else { + parsedPrefix, err := netip.ParsePrefix(ip) + if err != nil { + return Label{}, fmt.Errorf("%q is not a CIDR: %w", ip, err) + } + return maskedIPToLabel(parsedPrefix.Masked().Addr().String(), parsedPrefix.Bits()), nil + } +} + +// GetCIDRLabels turns a CIDR in to a specially formatted label, and returns +// a Labels including the CIDR-specific label and the appropriate world label. +// e.g. "10.0.0.0/8" => ["cidr:10.0.0.0/8", "reserved:world-ipv4"] +// +// IPv6 requires some special treatment, since ":" is special in the label selector +// grammar. For example, "::/0" becomes "cidr:0--0/0", +func GetCIDRLabels(prefix netip.Prefix) Labels { + lbls := make(Labels, 2) + if prefix.Bits() > 0 { + l := maskedIPToLabel(prefix.Addr().String(), prefix.Bits()) + l.cidr = &prefix + lbls[l.Key] = l + } + lbls.AddWorldLabel(prefix.Addr()) + + return lbls +} + +func (lbls Labels) AddWorldLabel(addr netip.Addr) { + switch { + case !option.Config.IsDualStack(): + lbls[worldLabelNonDualStack.Key] = worldLabelNonDualStack + case addr.Is4(): + lbls[worldLabelV4.Key] = worldLabelV4 + default: + lbls[worldLabelV6.Key] = worldLabelV6 + } +} + +func LabelToPrefix(key string) (netip.Prefix, error) { + prefixStr := strings.Replace(key, "-", ":", -1) + pfx, err := netip.ParsePrefix(prefixStr) + if err != nil { + return netip.Prefix{}, fmt.Errorf("failed to parse label prefix %s: %w", key, err) + } + return pfx, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go b/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go deleted file mode 100644 index c05cf985da..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/labels/cidr/cidr.go +++ /dev/null @@ -1,107 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package cidr - -import ( - "fmt" - "net/netip" - "strconv" - "strings" - - "github.com/cilium/cilium/pkg/labels" -) - -// maskedIPToLabelString is the base method for serializing an IP + prefix into -// a string that can be used for creating Labels and EndpointSelector objects. -// -// For IPv6 addresses, it converts ":" into "-" as EndpointSelectors don't -// support colons inside the name section of a label. -func maskedIPToLabelString(ip netip.Addr, prefix int) string { - ipStr := ip.String() - ipNoColons := strings.Replace(ipStr, ":", "-", -1) - - // EndpointSelector keys can't start or end with a "-", so insert a - // zero at the start or end if it would otherwise have a "-" at that - // position. - preZero := "" - postZero := "" - if ipNoColons[0] == '-' { - preZero = "0" - } - if ipNoColons[len(ipNoColons)-1] == '-' { - postZero = "0" - } - var str strings.Builder - str.Grow( - len(labels.LabelSourceCIDR) + - len(preZero) + - len(ipNoColons) + - len(postZero) + - 2 /*len of prefix*/ + - 2, /* ':' '/' */ - ) - str.WriteString(labels.LabelSourceCIDR) - str.WriteRune(':') - str.WriteString(preZero) - str.WriteString(ipNoColons) - str.WriteString(postZero) - str.WriteRune('/') - str.WriteString(strconv.Itoa(prefix)) - return str.String() -} - -// IPStringToLabel parses a string and returns it as a CIDR label. -// -// If ip is not a valid IP address or CIDR Prefix, returns an error. -func IPStringToLabel(ip string) (labels.Label, error) { - var lblString string - // factored out of netip.ParsePrefix to avoid allocating an empty netip.Prefix in case it's - // an IP and not a CIDR. - i := strings.LastIndexByte(ip, '/') - if i < 0 { - parsedIP, err := netip.ParseAddr(ip) - if err != nil { - return labels.Label{}, fmt.Errorf("%q is not an IP address: %w", ip, err) - } - lblString = maskedIPToLabelString(parsedIP, parsedIP.BitLen()) - } else { - parsedPrefix, err := netip.ParsePrefix(ip) - if err != nil { - return labels.Label{}, fmt.Errorf("%q is not a CIDR: %w", ip, err) - } - lblString = maskedIPToLabelString(parsedPrefix.Masked().Addr(), parsedPrefix.Bits()) - } - return labels.ParseLabel(lblString), nil -} - -// GetCIDRLabels turns a CIDR into a set of labels representing the cidr itself -// and all broader CIDRS which include the specified CIDR in them. For example: -// CIDR: 10.0.0.0/8 => -// -// "cidr:10.0.0.0/8", "cidr:10.0.0.0/7", "cidr:8.0.0.0/6", -// "cidr:8.0.0.0/5", "cidr:0.0.0.0/4, "cidr:0.0.0.0/3", -// "cidr:0.0.0.0/2", "cidr:0.0.0.0/1", "cidr:0.0.0.0/0" -// -// The identity reserved:world is always added as it includes any CIDR. -func GetCIDRLabels(prefix netip.Prefix) labels.Labels { - ones := prefix.Bits() - result := make([]string, 0, ones+1) - - // If ones is zero, then it's the default CIDR prefix /0 which should - // just be regarded as reserved:world. In all other cases, we need - // to generate the set of prefixes starting from the /0 up to the - // specified prefix length. - if ones > 0 { - ip := prefix.Addr() - for i := 0; i <= ones; i++ { - p := netip.PrefixFrom(ip, i) - label := maskedIPToLabelString(p.Masked().Addr(), i) - result = append(result, label) - } - } - - result = append(result, labels.LabelSourceReserved+":"+labels.IDNameWorld) - - return labels.NewLabelsFromModel(result) -} diff --git a/vendor/github.com/cilium/cilium/pkg/labels/cidr/doc.go b/vendor/github.com/cilium/cilium/pkg/labels/cidr/doc.go deleted file mode 100644 index f97bd9a51f..0000000000 --- a/vendor/github.com/cilium/cilium/pkg/labels/cidr/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -// Package cidr provides helper methods for generating labels for CIDRs which -// are partially derived from node state. -package cidr diff --git a/vendor/github.com/cilium/cilium/pkg/labels/labels.go b/vendor/github.com/cilium/cilium/pkg/labels/labels.go index 1eaf46e47e..40c714ee0f 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/labels.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/labels.go @@ -7,9 +7,14 @@ import ( "bytes" "encoding/json" "fmt" - "net" - "sort" + "net/netip" + "slices" "strings" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/container/cache" + "github.com/cilium/cilium/pkg/logging/logfields" ) const ( @@ -26,6 +31,14 @@ const ( // IDNameWorld is the label used for the world ID. IDNameWorld = "world" + // IDNameWorldIPv4 is the label used for the world-ipv4 ID, to distinguish + // it from world-ipv6 in dual-stack mode. + IDNameWorldIPv4 = "world-ipv4" + + // IDNameWorldIPv6 is the label used for the world-ipv6 ID, to distinguish + // it from world-ipv4 in dual-stack mode. + IDNameWorldIPv6 = "world-ipv6" + // IDNameCluster is the label used to identify an unspecified endpoint // inside the cluster IDNameCluster = "cluster" @@ -42,6 +55,16 @@ const ( // with IDNameHost if the kube-apiserver is running on the local host. IDNameKubeAPIServer = "kube-apiserver" + // IDNameEncryptedOverlay is the label used to identify encrypted overlay + // traffic. + // + // It is part of the reserved identity 11 and signals that overlay traffic + // with this identity must be IPSec encrypted before leaving the host. + // + // This identity should never be seen on the wire and is used only on the + // local host. + IDNameEncryptedOverlay = "overlay-to-encrypt" + // IDNameIngress is the label used to identify Ingress proxies. It is part // of the reserved identity 8. IDNameIngress = "ingress" @@ -69,6 +92,12 @@ var ( // LabelWorld is the label used for world. LabelWorld = Labels{IDNameWorld: NewLabel(IDNameWorld, "", LabelSourceReserved)} + // LabelWorldIPv4 is the label used for world-ipv4. + LabelWorldIPv4 = Labels{IDNameWorldIPv4: NewLabel(IDNameWorldIPv4, "", LabelSourceReserved)} + + // LabelWorldIPv6 is the label used for world-ipv6. + LabelWorldIPv6 = Labels{IDNameWorldIPv6: NewLabel(IDNameWorldIPv6, "", LabelSourceReserved)} + // LabelRemoteNode is the label used for remote nodes. LabelRemoteNode = Labels{IDNameRemoteNode: NewLabel(IDNameRemoteNode, "", LabelSourceReserved)} @@ -76,9 +105,18 @@ var ( // on IDNameKubeAPIServer. LabelKubeAPIServer = Labels{IDNameKubeAPIServer: NewLabel(IDNameKubeAPIServer, "", LabelSourceReserved)} + LabelKubeAPIServerExt = Labels{ + IDNameKubeAPIServer: NewLabel(IDNameKubeAPIServer, "", LabelSourceReserved), + IDNameWorld: NewLabel(IDNameWorld, "", LabelSourceReserved), + } + // LabelIngress is the label used for Ingress proxies. See comment // on IDNameIngress. LabelIngress = Labels{IDNameIngress: NewLabel(IDNameIngress, "", LabelSourceReserved)} + + // LabelKeyFixedIdentity is the label that can be used to define a fixed + // identity. + LabelKeyFixedIdentity = "io.cilium.fixed-identity" ) const ( @@ -100,18 +138,32 @@ const ( // LabelSourceContainer is a label imported from the container runtime LabelSourceContainer = "container" + // LabelSourceCNI is a label imported from the CNI plugin + LabelSourceCNI = "cni" + // LabelSourceReserved is the label source for reserved types. LabelSourceReserved = "reserved" // LabelSourceCIDR is the label source for generated CIDRs. LabelSourceCIDR = "cidr" + // LabelSourceCIDRGroup is the label source used for labels from CIDRGroups + LabelSourceCIDRGroup = "cidrgroup" + + // LabelSourceCIDRGroupKeyPrefix is the source as a k8s selector key prefix + LabelSourceCIDRGroupKeyPrefix = LabelSourceCIDRGroup + "." + + // LabelSourceNode is the label source for remote-nodes. + LabelSourceNode = "node" + + // LabelSourceFQDN is the label source for IPs resolved by fqdn lookups + LabelSourceFQDN = "fqdn" + // LabelSourceReservedKeyPrefix is the prefix of a reserved label LabelSourceReservedKeyPrefix = LabelSourceReserved + "." - // LabelKeyFixedIdentity is the label that can be used to define a fixed - // identity. - LabelKeyFixedIdentity = "io.cilium.fixed-identity" + // LabelSourceDirectory is the label source for policies read from files + LabelSourceDirectory = "directory" ) // Label is the Cilium's representation of a container label. @@ -122,36 +174,88 @@ type Label struct { // // +kubebuilder:validation:Optional Source string `json:"source"` + + // optimization for CIDR prefixes + // +deepequal-gen=false + cidr *netip.Prefix `json:"-"` } // Labels is a map of labels where the map's key is the same as the label's key. type Labels map[string]Label +// +// Convenience functions to use instead of Has(), which iterates through the labels +// + +// HasLabelWithKey returns true if lbls has a label with 'key' +func (l Labels) HasLabelWithKey(key string) bool { + _, ok := l[key] + return ok +} + +func (l Labels) HasFixedIdentityLabel() bool { + return l.HasLabelWithKey(LabelKeyFixedIdentity) +} + +func (l Labels) HasInitLabel() bool { + return l.HasLabelWithKey(IDNameInit) +} + +func (l Labels) HasHealthLabel() bool { + return l.HasLabelWithKey(IDNameHealth) +} + +func (l Labels) HasIngressLabel() bool { + return l.HasLabelWithKey(IDNameIngress) +} + +func (l Labels) HasHostLabel() bool { + return l.HasLabelWithKey(IDNameHost) +} + +func (l Labels) HasKubeAPIServerLabel() bool { + return l.HasLabelWithKey(IDNameKubeAPIServer) +} + +func (l Labels) HasRemoteNodeLabel() bool { + return l.HasLabelWithKey(IDNameRemoteNode) +} + +func (l Labels) HasWorldIPv6Label() bool { + return l.HasLabelWithKey(IDNameWorldIPv6) +} + +func (l Labels) HasWorldIPv4Label() bool { + return l.HasLabelWithKey(IDNameWorldIPv4) +} + +func (l Labels) HasNonDualstackWorldLabel() bool { + return l.HasLabelWithKey(IDNameWorld) +} + +func (l Labels) HasWorldLabel() bool { + return l.HasNonDualstackWorldLabel() || l.HasWorldIPv4Label() || l.HasWorldIPv6Label() +} + // GetPrintableModel turns the Labels into a sorted list of strings -// representing the labels, with CIDRs deduplicated (ie, only provide the most -// specific CIDR). +// representing the labels. func (l Labels) GetPrintableModel() (res []string) { - cidr := "" - prefixLength := 0 + res = make([]string, 0, len(l)) for _, v := range l { if v.Source == LabelSourceCIDR { - vStr := strings.Replace(v.String(), "-", ":", -1) - prefix := strings.Replace(v.Key, "-", ":", -1) - _, ipnet, _ := net.ParseCIDR(prefix) - ones, _ := ipnet.Mask.Size() - if ones > prefixLength { - cidr = vStr - prefixLength = ones + prefix, err := LabelToPrefix(v.Key) + if err != nil { + res = append(res, v.String()) + } else { + res = append(res, LabelSourceCIDR+":"+prefix.String()) } - continue + } else { + // not a CIDR label, no magic needed + res = append(res, v.String()) } - res = append(res, v.String()) - } - if cidr != "" { - res = append(res, cidr) } - sort.Strings(res) + slices.Sort(res) return res } @@ -160,20 +264,6 @@ func (l Labels) String() string { return strings.Join(l.GetPrintableModel(), ",") } -// AppendPrefixInKey appends the given prefix to all the Key's of the map and the -// respective Labels' Key. -func (l Labels) AppendPrefixInKey(prefix string) Labels { - newLabels := Labels{} - for k, v := range l { - newLabels[prefix+k] = Label{ - Key: prefix + v.Key, - Value: v.Value, - Source: v.Source, - } - } - return newLabels -} - // Equals returns true if the two Labels contain the same set of labels. func (l Labels) Equals(other Labels) bool { if len(l) != len(other) { @@ -202,6 +292,17 @@ func (l Labels) GetFromSource(source string) Labels { return lbls } +// RemoveFromSource removes all labels that are from the given source +func (l Labels) RemoveFromSource(source string) Labels { + lbls := Labels{} + for k, v := range l { + if v.Source != source { + lbls[k] = v + } + } + return lbls +} + // NewLabel returns a new label from the given key, value and source. If source is empty, // the default value will be LabelSourceUnspec. If key starts with '$', the source // will be overwritten with LabelSourceReserved. If key contains ':', the value @@ -222,11 +323,21 @@ func NewLabel(key string, value string, source string) Label { value = "" } - return Label{ - Key: key, - Value: value, - Source: source, + l := Label{ + Key: cache.Strings.Get(key), + Value: cache.Strings.Get(value), + Source: cache.Strings.Get(source), } + if l.Source == LabelSourceCIDR { + c, err := LabelToPrefix(l.Key) + if err != nil { + logrus.WithField("key", l.Key).WithError(err).Error("Failed to parse CIDR label: invalid prefix.") + } else { + l.cidr = &c + } + } + + return l } // Equals returns true if source, Key and Value are equal and false otherwise. @@ -247,9 +358,48 @@ func (l *Label) IsReservedSource() bool { return l.Source == LabelSourceReserved } -// matches returns true if l matches the target -func (l *Label) matches(target *Label) bool { - return l.Equals(target) +// Has returns true label L contains target. +// target may be "looser" w.r.t source or cidr, i.e. +// "k8s:foo=bar".Has("any:foo=bar") is true +// "any:foo=bar".Has("k8s:foo=bar") is false +// "cidr:10.0.0.1/32".Has("cidr:10.0.0.0/24") is true +func (l *Label) Has(target *Label) bool { + return l.HasKey(target) && l.Value == target.Value +} + +// HasKey returns true if l has target's key. +// target may be "looser" w.r.t source or cidr, i.e. +// "k8s:foo=bar".HasKey("any:foo") is true +// "any:foo=bar".HasKey("k8s:foo") is false +// "cidr:10.0.0.1/32".HasKey("cidr:10.0.0.0/24") is true +// "cidr:10.0.0.0/24".HasKey("cidr:10.0.0.1/32") is false +func (l *Label) HasKey(target *Label) bool { + if !target.IsAnySource() && l.Source != target.Source { + return false + } + + // Do cidr-aware matching if both sources are "cidr". + if target.Source == LabelSourceCIDR && l.Source == LabelSourceCIDR { + tc := target.cidr + if tc == nil { + v, err := LabelToPrefix(target.Key) + if err != nil { + tc = &v + } + } + lc := l.cidr + if lc == nil { + v, err := LabelToPrefix(l.Key) + if err != nil { + lc = &v + } + } + if tc != nil && lc != nil && tc.Bits() <= lc.Bits() && tc.Contains(lc.Addr()) { + return true + } + } + + return l.Key == target.Key } // String returns the string representation of Label in the for of Source:Key=Value or @@ -291,7 +441,7 @@ func (l *Label) UnmarshalJSON(data []byte) error { var aux string if err := json.Unmarshal(data, &aux); err != nil { - return fmt.Errorf("decode of Label as string failed: %+v", err) + return fmt.Errorf("decode of Label as string failed: %w", err) } if aux == "" { @@ -309,6 +459,15 @@ func (l *Label) UnmarshalJSON(data []byte) error { l.Value = aux.Value } + if l.Source == LabelSourceCIDR { + c, err := LabelToPrefix(l.Key) + if err == nil { + l.cidr = &c + } else { + logrus.WithField("key", l.Key).WithError(err).Error("Failed to parse CIDR label: invalid prefix.") + } + } + return nil } @@ -395,6 +554,15 @@ func NewLabelsFromModel(base []string) Labels { return lbls } +// FromSlice creates labels from a slice of labels. +func FromSlice(labels []Label) Labels { + lbls := make(Labels, len(labels)) + for _, lbl := range labels { + lbls[lbl.Key] = lbl + } + return lbls +} + // NewLabelsFromSortedList returns labels based on the output of SortedList() func NewLabelsFromSortedList(list string) Labels { return NewLabelsFromModel(strings.Split(list, ";")) @@ -413,7 +581,7 @@ func NewSelectLabelArrayFromModel(base []string) LabelArray { // NewFrom creates a new Labels from the given labels by creating a copy. func NewFrom(l Labels) Labels { - nl := NewLabelsFromModel(nil) + nl := make(Labels, len(l)) nl.MergeLabels(l) return nl } @@ -469,13 +637,24 @@ func (l Label) FormatForKVStore() []byte { // kvstore.prefixMatchesKey()) b := make([]byte, 0, len(l.Source)+len(l.Key)+len(l.Value)+3) buf := bytes.NewBuffer(b) + l.formatForKVStoreInto(buf) + return buf.Bytes() +} + +// formatForKVStoreInto writes the label as a formatted string, ending in +// a semicolon into buf. +// +// DO NOT BREAK THE FORMAT OF THIS. THE RETURNED STRING IS USED AS +// PART OF THE KEY IN THE KEY-VALUE STORE. +// +// Non-pointer receiver allows this to be called on a value in a map. +func (l Label) formatForKVStoreInto(buf *bytes.Buffer) { buf.WriteString(l.Source) buf.WriteRune(':') buf.WriteString(l.Key) buf.WriteRune('=') buf.WriteString(l.Value) buf.WriteRune(';') - return buf.Bytes() } // SortedList returns the labels as a sorted list, separated by semicolon @@ -487,12 +666,23 @@ func (l Labels) SortedList() []byte { for k := range l { keys = append(keys, k) } - sort.Strings(keys) + slices.Sort(keys) - b := make([]byte, 0, len(keys)*2) + // Labels can have arbitrary size. However, when many CIDR identities are in + // the system, for example due to a FQDN policy matching S3, CIDR labels + // dominate in number. IPv4 CIDR labels in serialized form are max 25 bytes + // long. Allocate slightly more to avoid having a realloc if there's some + // other labels which may longer, since the cost of allocating a few bytes + // more is dominated by a second allocation, especially since these + // allocations are short-lived. + // + // cidr:123.123.123.123/32=; + // 0 1 2 + // 1234567890123456789012345 + b := make([]byte, 0, len(keys)*30) buf := bytes.NewBuffer(b) for _, k := range keys { - buf.Write(l[k].FormatForKVStore()) + l[k].formatForKVStoreInto(buf) } return buf.Bytes() @@ -533,24 +723,38 @@ func (l Labels) FindReserved() Labels { // IsReserved returns true if any of the labels has a reserved source. func (l Labels) IsReserved() bool { + return l.HasSource(LabelSourceReserved) +} + +// Has returns true if l contains the given label. +func (l Labels) Has(label Label) bool { for _, lbl := range l { - if lbl.Source == LabelSourceReserved { + if lbl.Has(&label) { return true } } return false } -// Has returns true if l contains the given label. -func (l Labels) Has(label Label) bool { +// HasSource returns true if l contains the given label source. +func (l Labels) HasSource(source string) bool { for _, lbl := range l { - if lbl.matches(&label) { + if lbl.Source == source { return true } } return false } +// CollectSources returns all distinct label sources found in l +func (l Labels) CollectSources() map[string]struct{} { + sources := make(map[string]struct{}) + for _, lbl := range l { + sources[lbl.Source] = struct{}{} + } + return sources +} + // parseSource returns the parsed source of the given str. It also returns the next piece // of text that is after the source. // Example: @@ -607,6 +811,18 @@ func parseLabel(str string, delim byte) (lbl Label) { lbl.Value = next[i+1:] } } + + if lbl.Source == LabelSourceCIDR { + if lbl.Value != "" { + logrus.WithField(logfields.Label, lbl.String()).Error("Invalid CIDR label: labels with source cidr cannot have values.") + } + c, err := LabelToPrefix(lbl.Key) + if err != nil { + logrus.WithField(logfields.Label, str).WithError(err).Error("Failed to parse CIDR label: invalid prefix.") + } else { + lbl.cidr = &c + } + } return lbl } diff --git a/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go b/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go index f05f9f0455..4dad8e7ab8 100644 --- a/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go +++ b/vendor/github.com/cilium/cilium/pkg/labels/oplabels.go @@ -18,7 +18,7 @@ func (k keepMarks) set(key string) { k[key] = struct{}{} // marked for keeping } -// OpLabels represents the the possible types. +// OpLabels represents the possible types. type OpLabels struct { // Active labels that are enabled and disabled but not deleted Custom Labels @@ -116,22 +116,22 @@ func (o *OpLabels) AllLabels() Labels { return all } -func (o *OpLabels) ReplaceInformationLabels(l Labels, logger *logrus.Entry) bool { +func (o *OpLabels) ReplaceInformationLabels(sourceFilter string, l Labels, logger *logrus.Entry) bool { changed := false keepers := make(keepMarks) for _, v := range l { keepers.set(v.Key) - if o.OrchestrationInfo.upsertLabel(v) { + if o.OrchestrationInfo.upsertLabel(sourceFilter, v) { changed = true logger.WithField(logfields.Object, logfields.Repr(v)).Debug("Assigning information label") } } - o.OrchestrationInfo.deleteUnMarked(keepers) + o.OrchestrationInfo.deleteUnMarked(sourceFilter, keepers) return changed } -func (o *OpLabels) ReplaceIdentityLabels(l Labels, logger *logrus.Entry) bool { +func (o *OpLabels) ReplaceIdentityLabels(sourceFilter string, l Labels, logger *logrus.Entry) bool { changed := false keepers := make(keepMarks) @@ -141,13 +141,13 @@ func (o *OpLabels) ReplaceIdentityLabels(l Labels, logger *logrus.Entry) bool { // A disabled identity label stays disabled without value updates if _, found := o.Disabled[k]; found { disabledKeepers.set(k) - } else if keepers.set(v.Key); o.OrchestrationIdentity.upsertLabel(v) { + } else if keepers.set(v.Key); o.OrchestrationIdentity.upsertLabel(sourceFilter, v) { logger.WithField(logfields.Object, logfields.Repr(v)).Debug("Assigning security relevant label") changed = true } } - if o.OrchestrationIdentity.deleteUnMarked(keepers) || o.Disabled.deleteUnMarked(disabledKeepers) { + if o.OrchestrationIdentity.deleteUnMarked(sourceFilter, keepers) || o.Disabled.deleteUnMarked(sourceFilter, disabledKeepers) { changed = true } @@ -201,25 +201,40 @@ func (o *OpLabels) ModifyIdentityLabels(addLabels, delLabels Labels) (changed bo // upsertLabel updates or inserts 'label' in 'l', but only if exactly the same label // was not already in 'l'. Returns 'true' if a label was added, or an old label was // updated, 'false' otherwise. -func (l Labels) upsertLabel(label Label) bool { +// The label is only updated if its source matches the provided 'sourceFilter' +// or in case the provided sourceFilter is 'LabelSourceAny'. The new label must +// also match the old label 'source' in order for it to be replaced. +func (l Labels) upsertLabel(sourceFilter string, label Label) bool { oldLabel, found := l[label.Key] if found { + if sourceFilter != LabelSourceAny && sourceFilter != oldLabel.Source { + return false + } + // Key is the same, check if Value and Source are also the same if label.Value == oldLabel.Value && label.Source == oldLabel.Source { return false // No change } + + // If the label is not from the same source, then don't replace it. + if oldLabel.Source != label.Source { + return false + } } + // Insert or replace old label l[label.Key] = label return true } // deleteUnMarked deletes the labels which have not been marked for keeping. +// The labels are only deleted if their source matches the provided sourceFilter +// or in case the provided sourceFilter is 'LabelSourceAny'. // Returns true if any of them were deleted. -func (l Labels) deleteUnMarked(marks keepMarks) bool { +func (l Labels) deleteUnMarked(sourceFilter string, marks keepMarks) bool { deleted := false - for k := range l { - if _, keep := marks[k]; !keep { + for k, v := range l { + if _, keep := marks[k]; !keep && (sourceFilter == LabelSourceAny || sourceFilter == v.Source) { delete(l, k) deleted = true } diff --git a/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go b/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go index ac172cdea4..d6f2588624 100644 --- a/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go +++ b/vendor/github.com/cilium/cilium/pkg/loadbalancer/loadbalancer.go @@ -4,15 +4,22 @@ package loadbalancer import ( + "encoding/binary" + "errors" "fmt" "net" "sort" + "strconv" "strings" + "github.com/cilium/statedb/index" + "github.com/cilium/statedb/part" + "github.com/cilium/cilium/api/v1/models" "github.com/cilium/cilium/pkg/cidr" cmtypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/option" + "github.com/cilium/cilium/pkg/u8proto" ) // SVCType is a type of a service. @@ -46,6 +53,49 @@ const ( SVCNatPolicyNat64 = SVCNatPolicy("Nat64") ) +type SVCForwardingMode string + +const ( + SVCForwardingModeUndef = SVCForwardingMode("undef") + SVCForwardingModeDSR = SVCForwardingMode("dsr") + SVCForwardingModeSNAT = SVCForwardingMode("snat") +) + +func ToSVCForwardingMode(s string) SVCForwardingMode { + if s == option.NodePortModeDSR { + return SVCForwardingModeDSR + } + if s == option.NodePortModeSNAT { + return SVCForwardingModeSNAT + } + return SVCForwardingModeUndef +} + +type SVCLoadBalancingAlgorithm uint8 + +const ( + SVCLoadBalancingAlgorithmUndef = 0 + SVCLoadBalancingAlgorithmRandom = 1 + SVCLoadBalancingAlgorithmMaglev = 2 +) + +func ToSVCLoadBalancingAlgorithm(s string) SVCLoadBalancingAlgorithm { + if s == option.NodePortAlgMaglev { + return SVCLoadBalancingAlgorithmMaglev + } + if s == option.NodePortAlgRandom { + return SVCLoadBalancingAlgorithmRandom + } + return SVCLoadBalancingAlgorithmUndef +} + +type SVCSourceRangesPolicy string + +const ( + SVCSourceRangesPolicyAllow = SVCSourceRangesPolicy("allow") + SVCSourceRangesPolicyDeny = SVCSourceRangesPolicy("deny") +) + // ServiceFlags is the datapath representation of the service flags that can be // used (lb{4,6}_service.flags) type ServiceFlags uint16 @@ -66,18 +116,27 @@ const ( serviceFlagLoopback = 1 << 11 serviceFlagIntLocalScope = 1 << 12 serviceFlagTwoScopes = 1 << 13 + serviceFlagQuarantined = 1 << 14 + // serviceFlagSrcRangesDeny is set on master + // svc entry, serviceFlagQuarantined is only + // set on backend svc entries. + serviceFlagSourceRangeDeny = 1 << 14 + serviceFlagFwdModeDSR = 1 << 15 ) type SvcFlagParam struct { SvcType SVCType SvcNatPolicy SVCNatPolicy + SvcFwdModeDSR bool SvcExtLocal bool SvcIntLocal bool SessionAffinity bool IsRoutable bool CheckSourceRange bool + SourceRangeDeny bool L7LoadBalancer bool LoopbackHostport bool + Quarantined bool } // NewSvcFlag creates service flag @@ -119,6 +178,9 @@ func NewSvcFlag(p *SvcFlagParam) ServiceFlags { if p.IsRoutable { flags |= serviceFlagRoutable } + if p.SourceRangeDeny { + flags |= serviceFlagSourceRangeDeny + } if p.CheckSourceRange { flags |= serviceFlagSourceRange } @@ -128,6 +190,12 @@ func NewSvcFlag(p *SvcFlagParam) ServiceFlags { if p.SvcExtLocal != p.SvcIntLocal && p.SvcType != SVCTypeClusterIP { flags |= serviceFlagTwoScopes } + if p.Quarantined { + flags |= serviceFlagQuarantined + } + if p.SvcFwdModeDSR { + flags |= serviceFlagFwdModeDSR + } return flags } @@ -150,6 +218,10 @@ func (s ServiceFlags) SVCType() SVCType { } } +func (s ServiceFlags) IsL7LB() bool { + return s&serviceFlagL7LoadBalancer != 0 +} + // SVCExtTrafficPolicy returns a service traffic policy from the flags func (s ServiceFlags) SVCExtTrafficPolicy() SVCTrafficPolicy { switch { @@ -183,9 +255,19 @@ func (s ServiceFlags) SVCNatPolicy(fe L3n4Addr) SVCNatPolicy { } } +// SVCSlotQuarantined +func (s ServiceFlags) SVCSlotQuarantined() bool { + if s&serviceFlagQuarantined == 0 { + return false + } else { + return true + } +} + // String returns the string implementation of ServiceFlags. func (s ServiceFlags) String() string { var str []string + seenDeny := false str = append(str, string(s.SVCType())) if s&serviceFlagExtLocalScope != 0 { @@ -205,6 +287,10 @@ func (s ServiceFlags) String() string { } if s&serviceFlagSourceRange != 0 { str = append(str, "check source-range") + if s&serviceFlagSourceRangeDeny != 0 { + seenDeny = true + str = append(str, "deny") + } } if s&serviceFlagNat46x64 != 0 { str = append(str, "46x64") @@ -215,7 +301,12 @@ func (s ServiceFlags) String() string { if s&serviceFlagLoopback != 0 { str = append(str, "loopback") } - + if !seenDeny && s&serviceFlagQuarantined != 0 { + str = append(str, "quarantined") + } + if s&serviceFlagFwdModeDSR != 0 { + str = append(str, "dsr") + } return strings.Join(str, ", ") } @@ -225,7 +316,10 @@ func (s ServiceFlags) UInt16() uint16 { } const ( + // NONE type. NONE = L4Type("NONE") + // ANY type. + ANY = L4Type("ANY") // TCP type. TCP = L4Type("TCP") // UDP type. @@ -320,14 +414,25 @@ func GetBackendStateFromFlags(flags uint8) BackendState { // DefaultBackendWeight is used when backend weight is not set in ServiceSpec const DefaultBackendWeight = 100 -var ( - // AllProtocols is the list of all supported L4 protocols - AllProtocols = []L4Type{TCP, UDP, SCTP} -) +// AllProtocols is the list of all supported L4 protocols +var AllProtocols = []L4Type{TCP, UDP, SCTP} // L4Type name. type L4Type = string +func L4TypeAsByte(l4 L4Type) byte { + switch l4 { + case TCP: + return 'T' + case UDP: + return 'U' + case SCTP: + return 'S' + default: + return '?' + } +} + // FEPortName is the name of the frontend's port. type FEPortName string @@ -342,6 +447,31 @@ type ServiceName struct { Cluster string } +func (n *ServiceName) Equal(other ServiceName) bool { + return n.Namespace == other.Namespace && + n.Name == other.Name && + n.Cluster == other.Cluster +} + +func (n ServiceName) Compare(other ServiceName) int { + switch { + case n.Namespace < other.Namespace: + return -1 + case n.Namespace > other.Namespace: + return 1 + case n.Name < other.Name: + return -1 + case n.Name > other.Name: + return 1 + case n.Cluster < other.Cluster: + return -1 + case n.Cluster > other.Cluster: + return 1 + default: + return 0 + } +} + func (n ServiceName) String() string { if n.Cluster != "" { return n.Cluster + "/" + n.Namespace + "/" + n.Name @@ -373,6 +503,8 @@ type Backend struct { // Node hosting this backend. This is used to determine backends local to // a node. NodeName string + // Zone where backend is located. + ZoneID uint8 L3n4Addr // State of the backend for load-balancing service traffic State BackendState @@ -381,25 +513,29 @@ type Backend struct { } func (b *Backend) String() string { - return b.L3n4Addr.String() + state, _ := b.State.String() + return "[" + b.L3n4Addr.String() + "," + "State:" + state + "]" } // SVC is a structure for storing service details. type SVC struct { - Frontend L3n4AddrID // SVC frontend addr and an allocated ID - Backends []*Backend // List of service backends - Type SVCType // Service type - ExtTrafficPolicy SVCTrafficPolicy // Service external traffic policy - IntTrafficPolicy SVCTrafficPolicy // Service internal traffic policy - NatPolicy SVCNatPolicy // Service NAT 46/64 policy + Frontend L3n4AddrID // SVC frontend addr and an allocated ID + Backends []*Backend // List of service backends + Type SVCType // Service type + ForwardingMode SVCForwardingMode // Service mode (DSR vs SNAT) + ExtTrafficPolicy SVCTrafficPolicy // Service external traffic policy + IntTrafficPolicy SVCTrafficPolicy // Service internal traffic policy + NatPolicy SVCNatPolicy // Service NAT 46/64 policy + SourceRangesPolicy SVCSourceRangesPolicy SessionAffinity bool SessionAffinityTimeoutSec uint32 - HealthCheckNodePort uint16 // Service health check node port - Name ServiceName // Fully qualified service name + HealthCheckNodePort uint16 // Service health check node port + Name ServiceName // Fully qualified service name + LoadBalancerAlgorithm SVCLoadBalancingAlgorithm // Service LB algorithm (random or maglev) LoadBalancerSourceRanges []*cidr.CIDR - L7LBProxyPort uint16 // Non-zero for L7 LB services - L7LBFrontendPorts []string // Non-zero for L7 LB frontend service ports + L7LBProxyPort uint16 // Non-zero for L7 LB services LoopbackHostport bool + Annotations map[string]string } func (s *SVC) GetModel() *models.Service { @@ -520,6 +656,10 @@ func IsValidBackendState(state string) bool { func NewL4Type(name string) (L4Type, error) { switch strings.ToLower(name) { + case "none": + return NONE, nil + case "any": + return ANY, nil case "tcp": return TCP, nil case "udp": @@ -531,6 +671,19 @@ func NewL4Type(name string) (L4Type, error) { } } +func NewL4TypeFromNumber(proto uint8) L4Type { + switch proto { + case 6: + return TCP + case 17: + return UDP + case 132: + return SCTP + default: + return ANY + } +} + // L4Addr is an abstraction for the backend port with a L4Type, usually tcp or udp, and // the Port number. // @@ -554,6 +707,22 @@ func NewL4Addr(protocol L4Type, number uint16) *L4Addr { return &L4Addr{Protocol: protocol, Port: number} } +// Equals returns true if both L4Addr are considered equal. +func (l *L4Addr) Equals(o *L4Addr) bool { + switch { + case (l == nil) != (o == nil): + return false + case (l == nil) && (o == nil): + return true + } + return l.Port == o.Port && l.Protocol == o.Protocol +} + +// String returns a string representation of an L4Addr +func (l *L4Addr) String() string { + return fmt.Sprintf("%d/%s", l.Port, l.Protocol) +} + // L3n4Addr is used to store, as an unique L3+L4 address in the KVStore. It also // includes the lookup scope for frontend addresses which is used in service // handling for externalTrafficPolicy=Local and internalTrafficPolicy=Local, @@ -621,23 +790,84 @@ func NewL3n4AddrFromModel(base *models.FrontendAddress) (*L3n4Addr, error) { return &L3n4Addr{AddrCluster: addrCluster, L4Addr: *l4addr, Scope: scope}, nil } +// L3n4AddrFromString constructs a StateDB key by parsing the input in the form of +// L3n4Addr.String(), e.g. :/protocol. The input can be partial to construct +// keys for prefix searches, e.g. "1.2.3.4". +// This must be kept in sync with Bytes(). +func L3n4AddrFromString(key string) (index.Key, error) { + keyErr := errors.New("bad key, expected \":/(/i)\", e.g. \"1.2.3.4:80/TCP\"") + var out []byte + + if len(key) == 0 { + return index.Key{}, keyErr + } + + // Parse address + var addr string + if strings.HasPrefix(key, "[") { + addr, key, _ = strings.Cut(key[1:], "]") + switch { + case strings.HasPrefix(key, ":"): + key = key[1:] + case len(key) > 0: + return index.Key{}, keyErr + } + } else { + addr, key, _ = strings.Cut(key, ":") + } + + addrCluster, err := cmtypes.ParseAddrCluster(addr) + if err != nil { + return index.Key{}, fmt.Errorf("%w: %w", keyErr, err) + } + addr20 := addrCluster.As20() + out = append(out, addr20[:]...) + + // Parse port + if len(key) > 0 { + var s string + s, key, _ = strings.Cut(key, "/") + port, err := strconv.ParseUint(s, 10, 16) + if err != nil { + return index.Key{}, fmt.Errorf("%w: %w", keyErr, err) + } + out = binary.BigEndian.AppendUint16(out, uint16(port)) + } + + // Parse protocol + hadProto := false + if len(key) > 0 { + var proto string + proto, key, _ = strings.Cut(key, "/") + protoByte := L4TypeAsByte(strings.ToUpper(proto)) + if protoByte == '?' { + return index.Key{}, fmt.Errorf("%w: bad protocol, expected TCP/UDP/SCTP", keyErr) + } + out = append(out, protoByte) + hadProto = true + } + + // Parse scope. + switch { + case key == "i": + out = append(out, ScopeInternal) + case hadProto: + // Since external scope is implicit we add it here if the protocol was + // also provided. This way we can construct partial keys for prefix + // searching and we can construct complete key for 'get'. + out = append(out, ScopeExternal) + } + return index.Key(out), nil +} + // NewBackend creates the Backend struct instance from given params. // The default state for the returned Backend is BackendStateActive. func NewBackend(id BackendID, protocol L4Type, addrCluster cmtypes.AddrCluster, portNumber uint16) *Backend { - lbport := NewL4Addr(protocol, portNumber) - b := Backend{ - ID: id, - L3n4Addr: L3n4Addr{AddrCluster: addrCluster, L4Addr: *lbport}, - State: BackendStateActive, - Preferred: Preferred(false), - Weight: DefaultBackendWeight, - } - - return &b + return NewBackendWithState(id, protocol, addrCluster, portNumber, 0, BackendStateActive) } // NewBackendWithState creates the Backend struct instance from given params. -func NewBackendWithState(id BackendID, protocol L4Type, addrCluster cmtypes.AddrCluster, portNumber uint16, +func NewBackendWithState(id BackendID, protocol L4Type, addrCluster cmtypes.AddrCluster, portNumber uint16, zone uint8, state BackendState) *Backend { lbport := NewL4Addr(protocol, portNumber) b := Backend{ @@ -645,6 +875,7 @@ func NewBackendWithState(id BackendID, protocol L4Type, addrCluster cmtypes.Addr L3n4Addr: L3n4Addr{AddrCluster: addrCluster, L4Addr: *lbport}, State: state, Weight: DefaultBackendWeight, + ZoneID: zone, } return &b @@ -655,8 +886,7 @@ func NewBackendFromBackendModel(base *models.BackendAddress) (*Backend, error) { return nil, fmt.Errorf("missing IP address") } - // FIXME: Should this be NONE ? - l4addr := NewL4Addr(NONE, base.Port) + l4addr := NewL4Addr(base.Protocol, base.Port) addrCluster, err := cmtypes.ParseAddrCluster(*base.IP) if err != nil { return nil, err @@ -668,6 +898,7 @@ func NewBackendFromBackendModel(base *models.BackendAddress) (*Backend, error) { b := &Backend{ NodeName: base.NodeName, + ZoneID: option.Config.GetZoneID(base.Zone), L3n4Addr: L3n4Addr{AddrCluster: addrCluster, L4Addr: *l4addr}, State: state, Preferred: Preferred(base.Preferred), @@ -689,8 +920,7 @@ func NewL3n4AddrFromBackendModel(base *models.BackendAddress) (*L3n4Addr, error) return nil, fmt.Errorf("missing IP address") } - // FIXME: Should this be NONE ? - l4addr := NewL4Addr(NONE, base.Port) + l4addr := NewL4Addr(base.Protocol, base.Port) addrCluster, err := cmtypes.ParseAddrCluster(*base.IP) if err != nil { return nil, err @@ -708,9 +938,10 @@ func (a *L3n4Addr) GetModel() *models.FrontendAddress { scope = models.FrontendAddressScopeInternal } return &models.FrontendAddress{ - IP: a.AddrCluster.String(), - Port: a.Port, - Scope: scope, + IP: a.AddrCluster.String(), + Protocol: a.Protocol, + Port: a.Port, + Scope: scope, } } @@ -723,25 +954,20 @@ func (b *Backend) GetBackendModel() *models.BackendAddress { stateStr, _ := b.State.String() return &models.BackendAddress{ IP: &addrClusterStr, + Protocol: b.Protocol, Port: b.Port, NodeName: b.NodeName, + Zone: option.Config.GetZone(b.ZoneID), State: stateStr, Preferred: bool(b.Preferred), Weight: &b.Weight, } } -// String returns the L3n4Addr in the "IPv4:Port[/Scope]" format for IPv4 and -// "[IPv6]:Port[/Scope]" format for IPv6. +// String returns the L3n4Addr in the "IPv4:Port/Protocol[/Scope]" format for IPv4 and +// "[IPv6]:Port/Protocol[/Scope]" format for IPv6. func (a *L3n4Addr) String() string { - var scope string - if a.Scope == ScopeInternal { - scope = "/i" - } - if a.IsIPv6() { - return fmt.Sprintf("[%s]:%d%s", a.AddrCluster.String(), a.Port, scope) - } - return fmt.Sprintf("%s:%d%s", a.AddrCluster.String(), a.Port, scope) + return a.StringWithProtocol() } // StringWithProtocol returns the L3n4Addr in the "IPv4:Port/Protocol[/Scope]" @@ -752,15 +978,13 @@ func (a *L3n4Addr) StringWithProtocol() string { scope = "/i" } if a.IsIPv6() { - return fmt.Sprintf("[%s]:%d/%s%s", a.AddrCluster.String(), a.Port, a.Protocol, scope) + return "[" + a.AddrCluster.String() + "]:" + strconv.FormatUint(uint64(a.Port), 10) + "/" + a.Protocol + scope } - return fmt.Sprintf("%s:%d/%s%s", a.AddrCluster.String(), a.Port, a.Protocol, scope) + return a.AddrCluster.String() + ":" + strconv.FormatUint(uint64(a.Port), 10) + "/" + a.Protocol + scope } // StringID returns the L3n4Addr as string to be used for unique identification func (a *L3n4Addr) StringID() string { - // This does not include the protocol right now as the datapath does - // not include the protocol in the lookup of the service IP. return a.String() } @@ -768,14 +992,15 @@ func (a *L3n4Addr) StringID() string { // Note: the resulting string is meant to be used as a key for maps and is not // readable by a human eye when printed out. func (a L3n4Addr) Hash() string { - const lenProto = 0 // proto is omitted for now + const lenProto = 1 // proto is uint8 const lenScope = 1 // scope is uint8 which is an alias for byte const lenPort = 2 // port is uint16 which is 2 bytes b := make([]byte, cmtypes.AddrClusterLen+lenProto+lenScope+lenPort) ac20 := a.AddrCluster.As20() copy(b, ac20[:]) - // FIXME: add Protocol once we care about protocols + u8p, _ := u8proto.ParseProtocol(a.Protocol) + b[net.IPv6len] = byte(u8p) // scope is a uint8 which is an alias for byte so a cast is safe b[net.IPv6len+lenProto] = byte(a.Scope) // port is a uint16, so 2 bytes @@ -789,6 +1014,30 @@ func (a *L3n4Addr) IsIPv6() bool { return a.AddrCluster.Is6() } +// ProtocolsEqual returns true if protocols match for both L3 and L4. +func (l *L3n4Addr) ProtocolsEqual(o *L3n4Addr) bool { + return l.Protocol == o.Protocol && + (l.AddrCluster.Is4() && o.AddrCluster.Is4() || + l.AddrCluster.Is6() && o.AddrCluster.Is6()) +} + +// Bytes returns the address as a byte slice for indexing purposes. +// Similar to Hash() but includes the L4 protocol. +func (l L3n4Addr) Bytes() []byte { + const keySize = cmtypes.AddrClusterLen + + 2 /* Port */ + + 1 /* Protocol */ + + 1 /* Scope */ + + key := make([]byte, 0, keySize) + addr20 := l.AddrCluster.As20() + key = append(key, addr20[:]...) + key = binary.BigEndian.AppendUint16(key, l.Port) + key = append(key, L4TypeAsByte(l.Protocol)) + key = append(key, l.Scope) + return key +} + // L3n4AddrID is used to store, as an unique L3+L4 plus the assigned ID, in the // KVStore. // @@ -817,3 +1066,10 @@ func NewL3n4AddrID(protocol L4Type, addrCluster cmtypes.AddrCluster, portNumber func (l *L3n4AddrID) IsIPv6() bool { return l.L3n4Addr.IsIPv6() } + +func init() { + // Register the types for use with part.Map and part.Set. + part.RegisterKeyType( + func(name ServiceName) []byte { return []byte(name.String()) }) + part.RegisterKeyType(L3n4Addr.Bytes) +} diff --git a/vendor/github.com/cilium/cilium/pkg/loadbalancer/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/loadbalancer/zz_generated.deepcopy.go index 45d5cd88ce..465db266e8 100644 --- a/vendor/github.com/cilium/cilium/pkg/loadbalancer/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/loadbalancer/zz_generated.deepcopy.go @@ -106,10 +106,12 @@ func (in *SVC) DeepCopyInto(out *SVC) { } } } - if in.L7LBFrontendPorts != nil { - in, out := &in.L7LBFrontendPorts, &out.L7LBFrontendPorts - *out = make([]string, len(*in)) - copy(*out, *in) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } return } diff --git a/vendor/github.com/cilium/cilium/pkg/lock/map.go b/vendor/github.com/cilium/cilium/pkg/lock/map.go new file mode 100644 index 0000000000..04c3b09af4 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/lock/map.go @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package lock + +import "sync" + +// Map is a thin generic wrapper around sync.Map. The sync.Map description from +// the standard library follows (and is also propagated to the corresponding +// methods) for users' convenience: +// +// Map is like a Go map[interface{}]interface{} but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. +// Loads, stores, and deletes run in amortized constant time. +// +// The Map type is specialized. Most code should use a plain Go map instead, +// with separate locking or coordination, for better type safety and to make it +// easier to maintain other invariants along with the map content. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate Mutex or RWMutex. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +type Map[K comparable, V any] sync.Map + +// MapCmpValues is an extension of Map, which additionally wraps the two extra +// methods requiring values to be also of comparable type. +type MapCmpValues[K, V comparable] Map[K, V] + +// Load returns the value stored in the map for a key, or the zero value if no +// value is present. The ok result indicates whether value was found in the map. +func (m *Map[K, V]) Load(key K) (value V, ok bool) { + val, ok := (*sync.Map)(m).Load(key) + return m.convert(val, ok) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + val, loaded := (*sync.Map)(m).LoadOrStore(key, value) + return val.(V), loaded +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any +// (zero value otherwise). The loaded result reports whether the key was present. +func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) { + val, loaded := (*sync.Map)(m).LoadAndDelete(key) + return m.convert(val, loaded) +} + +// Store sets the value for a key. +func (m *Map[K, V]) Store(key K, value V) { + (*sync.Map)(m).Store(key, value) +} + +// Swap swaps the value for a key and returns the previous value if any (zero +// value otherwise). The loaded result reports whether the key was present. +func (m *Map[K, V]) Swap(key K, value V) (previous V, loaded bool) { + val, loaded := (*sync.Map)(m).Swap(key, value) + return m.convert(val, loaded) +} + +// Delete deletes the value for a key. +func (m *Map[K, V]) Delete(key K) { + (*sync.Map)(m).Delete(key) +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently (including by f), Range may reflect any +// mapping for that key from any point during the Range call. Range does not +// block other methods on the receiver; even f itself may call any method on m. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map[K, V]) Range(f func(key K, value V) bool) { + (*sync.Map)(m).Range(func(key, value any) bool { + return f(key.(K), value.(V)) + }) +} + +// CompareAndDelete deletes the entry for key if its value is equal to old. +// If there is no current value for key in the map, CompareAndDelete returns false +// (even if the old value is the nil interface value). +func (m *MapCmpValues[K, V]) CompareAndDelete(key K, old V) (deleted bool) { + return (*sync.Map)(m).CompareAndDelete(key, old) +} + +// CompareAndSwap swaps the old and new values for key if the value stored in +// the map is equal to old. +func (m *MapCmpValues[K, V]) CompareAndSwap(key K, old, new V) bool { + return (*sync.Map)(m).CompareAndSwap(key, old, new) +} + +func (m *Map[K, V]) convert(value any, ok bool) (V, bool) { + if !ok { + return *new(V), false + } + + return value.(V), true +} + +func (m *Map[K, V]) IsEmpty() bool { + empty := true + check := func(_ K, _ V) bool { + empty = false + return false // returning false breaks the iteration + } + m.Range(check) + return empty +} diff --git a/vendor/github.com/cilium/cilium/pkg/lock/sortable_mutex.go b/vendor/github.com/cilium/cilium/pkg/lock/sortable_mutex.go new file mode 100644 index 0000000000..3b700bdfbc --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/lock/sortable_mutex.go @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package lock + +import ( + "sort" + "sync" + "sync/atomic" + "time" +) + +// sortableMutexSeq is a global sequence counter for the creation of new +// SortableMutex's with unique sequence numbers. +var sortableMutexSeq atomic.Uint64 + +// sortableMutex implements SortableMutex. Not exported as the only way to +// initialize it is via NewSortableMutex(). +type sortableMutex struct { + sync.Mutex + seq uint64 + acquireDuration time.Duration +} + +func (s *sortableMutex) Lock() { + start := time.Now() + s.Mutex.Lock() + s.acquireDuration += time.Since(start) +} + +func (s *sortableMutex) Seq() uint64 { return s.seq } + +func (s *sortableMutex) AcquireDuration() time.Duration { return s.acquireDuration } + +// SortableMutex provides a Mutex that can be globally sorted with other +// sortable mutexes. This allows deadlock-safe locking of a set of mutexes +// as it guarantees consistent lock ordering. +type SortableMutex interface { + sync.Locker + Seq() uint64 + AcquireDuration() time.Duration // The amount of time it took to acquire the lock +} + +// SortableMutexes is a set of mutexes that can be locked in a safe order. +// Once Lock() is called it must not be mutated! +type SortableMutexes []SortableMutex + +// Len implements sort.Interface. +func (s SortableMutexes) Len() int { + return len(s) +} + +// Less implements sort.Interface. +func (s SortableMutexes) Less(i int, j int) bool { + return s[i].Seq() < s[j].Seq() +} + +// Swap implements sort.Interface. +func (s SortableMutexes) Swap(i int, j int) { + s[i], s[j] = s[j], s[i] +} + +// Lock sorts the mutexes, and then locks them in order. If any lock cannot be acquired, +// this will block while holding the locks with a lower sequence number. +func (s SortableMutexes) Lock() { + sort.Sort(s) + for _, mu := range s { + mu.Lock() + } +} + +// Unlock locks the sorted set of mutexes locked by prior call to Lock(). +func (s SortableMutexes) Unlock() { + for _, mu := range s { + mu.Unlock() + } +} + +var _ sort.Interface = SortableMutexes{} + +func NewSortableMutex() SortableMutex { + seq := sortableMutexSeq.Add(1) + return &sortableMutex{ + seq: seq, + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go b/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go index 219b9c83e5..8f5f2d6b0e 100644 --- a/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go +++ b/vendor/github.com/cilium/cilium/pkg/lock/stoppable_waitgroup.go @@ -14,7 +14,7 @@ type StoppableWaitGroup struct { noopAdd chan struct{} // i is the internal counter which can store tolerate negative values // as opposed the golang's library WaitGroup. - i *int64 + i atomic.Int64 doneOnce, stopOnce sync.Once } @@ -24,7 +24,6 @@ func NewStoppableWaitGroup() *StoppableWaitGroup { return &StoppableWaitGroup{ noopDone: make(chan struct{}), noopAdd: make(chan struct{}), - i: func() *int64 { i := int64(0); return &i }(), doneOnce: sync.Once{}, stopOnce: sync.Once{}, } @@ -37,11 +36,11 @@ func (l *StoppableWaitGroup) Stop() { l.stopOnce.Do(func() { // We will do an Add here so we can perform a Done after we close // the l.noopAdd channel. - l.Add() + done := l.Add() close(l.noopAdd) - // Calling Done() here so we know that in case 'l.i' will become zero + // Calling done() here so we know that in case 'l.i' will become zero // it will trigger a close of l.noopDone channel. - l.Done() + done() }) } @@ -58,36 +57,46 @@ func (l *StoppableWaitGroup) WaitChannel() <-chan struct{} { return l.noopDone } +// DoneFunc returned by Add() marks the goroutine as completed. +type DoneFunc func() + // Add adds the goroutine to the list of routines to that Wait() will have // to wait before it returns. // If the StoppableWaitGroup was stopped this will be a no-op. -func (l *StoppableWaitGroup) Add() { +// Returns a "done" function to mark the goroutine as completed. Wait() is +// unblocked once all done functions obtained before Stop() have been called. +func (l *StoppableWaitGroup) Add() DoneFunc { select { case <-l.noopAdd: + return func() {} default: - atomic.AddInt64(l.i, 1) + l.i.Add(1) + var once sync.Once + return func() { + once.Do(l.done) + } } } -// Done will decrement the number of goroutines the Wait() will have to wait +// done will decrement the number of goroutines the Wait() will have to wait // before it returns. // This function is a no-op once all goroutines that have called 'Add()' have // also called 'Done()' and the StoppableWaitGroup was stopped. -func (l *StoppableWaitGroup) Done() { +func (l *StoppableWaitGroup) done() { select { case <-l.noopDone: return default: select { case <-l.noopAdd: - a := atomic.AddInt64(l.i, -1) + a := l.i.Add(-1) if a <= 0 { l.doneOnce.Do(func() { close(l.noopDone) }) } default: - a := atomic.AddInt64(l.i, -1) + a := l.i.Add(-1) select { // in case the channel was close while we where in this default // case we will need to check if 'a' is less than zero and close diff --git a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go index f0bc6e1e61..89d2e46382 100644 --- a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go +++ b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go @@ -11,6 +11,21 @@ const ( // LogSubsys is the field denoting the subsystem when logging LogSubsys = "subsys" + // Version is a field for a generic version number + Version = "version" + + // NewVersion is a field for a new version number + NewVersion = "newVersion" + + // OldVersion is a field for a old version number + OldVersion = "oldVersion" + + // Stacktrace is a field for a stacktrace + Stacktrace = "stacktrace" + + // Changes is a generic field for any relevant changes + Changes = "changes" + // Signal is the field to print os signals on exit etc. Signal = "signal" @@ -32,12 +47,21 @@ const ( // EndpointState is the current endpoint state EndpointState = "endpointState" + // Error is the Go error + Error = "error" + // EventUUID is an event unique identifier EventUUID = "eventID" + // CNIAttachmentID uniquely identifies an endpoint + CNIAttachmentID = "cniAttachmentID" + // ContainerID is the container identifier ContainerID = "containerID" + // ContainerInterface is the name of the interface in the container namespace + ContainerInterface = "containerInterface" + // IdentityLabels are the labels relevant for the security identity IdentityLabels = "identityLabels" @@ -47,8 +71,11 @@ const ( // Labels are any label, they may not be relevant to the security identity. Labels = "labels" - // Source is the label or node information source - Source = "source" + // Label is a singular label, where relevant + Label = "label" + + // SourceFilter is the label or node information source + SourceFilter = "sourceFilter" // Controller is the name of the controller to log it. Controller = "controller" @@ -69,10 +96,25 @@ const ( // OldIdentity is a previously used security identity OldIdentity = "oldIdentity" + // PolicyKey is a policy map key + PolicyKey = "policyKey" + + // PolicyEntry is a policy map value + PolicyEntry = "policyEntry" + // PolicyRevision is the revision of the policy in the repository or of // the object in question PolicyRevision = "policyRevision" + // PolicyKeysAdded is a set of added policy map keys + PolicyKeysAdded = "policyKeysAdded" + + // PolicyKeysDeleted is a set of deleted policy map keys + PolicyKeysDeleted = "policyKeysDeleted" + + // PolicyEntriesOld is a set of old policy map keys and values + PolicyEntriesOld = "policyEntriesOld" + // DatapathPolicyRevision is the policy revision currently running in // the datapath DatapathPolicyRevision = "datapathPolicyRevision" @@ -178,6 +220,9 @@ const ( // Port is a L4 port Port = "port" + // Ports is a list of L4 ports + Ports = "ports" + // PortName is a k8s ContainerPort Name PortName = "portName" @@ -214,7 +259,7 @@ const ( // NewCIDR is the new subnet/CIDR NewCIDR = "newCIDR" - // IPAddrs is a lsit of IP addrs + // IPAddrs is a list of IP addrs IPAddrs = "ipAddrs" // MTU is the maximum transmission unit of one interface @@ -229,6 +274,12 @@ const ( // VethPair is a tuple of Veth that are paired VethPair = "vethPair" + // Netkit is a netkit object or ID + Netkit = "netkit" + + // NetkitPair is a tuple of Netkit that are paired + NetkitPair = "netkitPair" + // NetNSName is a name of a network namespace NetNSName = "netNSName" @@ -254,6 +305,12 @@ const ( // SessionAffinityTimeout is a timeout for the session affinity SessionAffinityTimeout = "sessionAffinityTimeout" + // LoadBalancerAlgorithm is algorithm for backend selection + LoadBalancerAlgorithm = "LoadBalancerAlgorithm" + + // LoadBalancerSourceRangesPolicy is the LB SVC source ranges policy + LoadBalancerSourceRangesPolicy = "loadBalancerSourceRangesPolicy" + // LoadBalancerSourceRanges is the LB SVC source ranges LoadBalancerSourceRanges = "loadBalancerSourceRanges" @@ -281,6 +338,9 @@ const ( // ServiceType is the type of the service ServiceType = "svcType" + // ServiceForwardingMode is the mode of the service (SNAT, DSR) + ServiceForwardingMode = "svcForwardingMode" + // ServiceHealthCheckNodePort is the port on which we serve health checks ServiceHealthCheckNodePort = "svcHealthCheckNodePort" @@ -308,6 +368,12 @@ const ( // BackendSlot is the backend slot number in a service BPF map BackendSlot = "backendSlot" + // ProxyName is the name of a proxy (e.g., "Envoy") + ProxyName = "proxyName" + + // ProxyPort is the port number of an L7 proxy listener. + ProxyPort = "ProxyPort" + // L7LBProxyPort is the port number of the Envoy listener a L7 LB service redirects traffic to for load balancing. L7LBProxyPort = "l7LBProxyPort" @@ -332,9 +398,6 @@ const ( // BPFClockSource denotes the internal clock source (ktime vs jiffies) BPFClockSource = "bpfClockSource" - // BPFInsnSet denotes the instruction set version - BPFInsnSet = "bpfInsnSet" - // CiliumLocalRedirectPolicyName is the name of a CiliumLocalRedirectPolicy CiliumLocalRedirectName = "ciliumLocalRedirectPolicyName" @@ -350,6 +413,9 @@ const ( // Listener is the name of an Envoy Listener defined in CEC or CCEC Listener = "listener" + // ListenerPriority is the priority of an Envoy Listener defined in CEC or CCEC + ListenerPriority = "listenerPriority" + // BPFMapKey is a key from a BPF map BPFMapKey = "bpfMapKey" @@ -384,6 +450,9 @@ const ( // Selector is a selector of any sort: endpoint, CIDR, toFQDNs Selector = "Selector" + // SelectorCacgeVersion is the version of the SelectorCache. + SelectorCacheVersion = "selectorCacheVersion" + // EndpointLabelSelector is a selector for Endpoints by label EndpointLabelSelector = "EndpointLabelSelector" @@ -638,6 +707,9 @@ const ( // CEPUID is the UID of the CiliumEndpoint. CEPUID = "ciliumEndpointUID" + // CIDName is the name of the CiliumIdentity. + CIDName = "ciliumIdentityName" + // CESName is the name of the CiliumEndpointSlice. CESName = "ciliumEndpointSliceName" @@ -648,16 +720,18 @@ const ( WorkQueueBurstLimit = "workQueueBurstLimit" // WorkQueueSyncBackoff is the backoff time used by workqueues before an attempt to retry sync with k8s-apiserver. - WorkQueueSyncBackOff = "workQueueSyncBackOff" - - // CESSliceMode indicates the name of algorithm used to batch CEPs in a CES. - CESSliceMode = "ciliumEndpointSliceMode" + WorkQueueSyncBackOff = "workQueueSyncBackOff" + WorkQueueMaxSyncBackOff = "workQueueMaxSyncBackOff" // SourceIP is a source IP SourceIP = "sourceIP" DestinationIP = "destinationIP" + LocalIP = "localIP" + + RemoteIP = "remoteIP" + SourceCIDR = "sourceCIDR" // DestinationCIDR is a destination CIDR @@ -732,4 +806,27 @@ const ( // State is the state of an individual component (apiserver, kvstore etc) State = "state" + + // EtcdQPSLimit is the QPS limit for an etcd client. + EtcdQPSLimit = "etcdQPSLimit" + + // LeaseID identifies a KVStore lease + LeaseID = "leaseID" + + // EventType identifies the type of KVStore events + EventType = "eventType" + + // Entries specifies the number of KVStore entries + Entries = "entries" + // Action is the summarized action from a reconciliation. + Action = "action" + + // EtcdClusterID is the ID of the etcd cluster + EtcdClusterID = "etcdClusterID" + + // NetnsCookie is the Linux kernel netns cookie. + NetnsCookie = "netnsCookie" + + // Target identifies a target value + Target = "target" ) diff --git a/vendor/github.com/cilium/cilium/pkg/logging/logging.go b/vendor/github.com/cilium/cilium/pkg/logging/logging.go index ccb7fb1353..afd927d5cf 100644 --- a/vendor/github.com/cilium/cilium/pkg/logging/logging.go +++ b/vendor/github.com/cilium/cilium/pkg/logging/logging.go @@ -8,6 +8,7 @@ import ( "bytes" "flag" "fmt" + "io" "os" "regexp" "strings" @@ -28,6 +29,7 @@ const ( FormatOpt = "format" LogFormatText LogFormat = "text" + LogFormatTextTimestamp LogFormat = "text-ts" LogFormatJSON LogFormat = "json" LogFormatJSONTimestamp LogFormat = "json-ts" @@ -35,15 +37,31 @@ const ( // we want to use (possible values: text or json) DefaultLogFormat LogFormat = LogFormatText + // DefaultLogFormatTimestamp is the string representation of the default logrus.Formatter + // including timestamps. + // We don't use this for general runtime logs since kubernetes log capture handles those. + // This is only used for applications such as CNI which is written to disk so we have no + // way to correlate with other logs. + DefaultLogFormatTimestamp LogFormat = LogFormatTextTimestamp + // DefaultLogLevel is the default log level we want to use for our logrus.Formatter DefaultLogLevel logrus.Level = logrus.InfoLevel ) // DefaultLogger is the base logrus logger. It is different from the logrus // default to avoid external dependencies from writing out unexpectedly -var DefaultLogger = InitializeDefaultLogger() +var DefaultLogger = initializeDefaultLogger() + +var klogErrorOverrides = []logLevelOverride{ + { + // TODO: We can drop the misspelled case here once client-go version is bumped to include: + // https://github.com/kubernetes/client-go/commit/ae43527480ee9d8750fbcde3d403363873fd3d89 + matcher: regexp.MustCompile("Failed to update lock (optimitically|optimistically).*falling back to slow path"), + targetLevel: logrus.InfoLevel, + }, +} -func initializeKLog() { +func initializeKLog() error { log := DefaultLogger.WithField(logfields.LogSubsys, "klog") //Create a new flag set and set error handler @@ -61,22 +79,130 @@ func initializeKLog() { // necessary. klogFlags.Set("skip_headers", "true") + errWriter, err := severityOverrideWriter(logrus.ErrorLevel, log, klogErrorOverrides) + if err != nil { + return fmt.Errorf("failed to setup klog error writer: %w", err) + } + klog.SetOutputBySeverity("INFO", log.WriterLevel(logrus.InfoLevel)) klog.SetOutputBySeverity("WARNING", log.WriterLevel(logrus.WarnLevel)) - klog.SetOutputBySeverity("ERROR", log.WriterLevel(logrus.ErrorLevel)) + klog.SetOutputBySeverity("ERROR", errWriter) klog.SetOutputBySeverity("FATAL", log.WriterLevel(logrus.FatalLevel)) // Do not repeat log messages on all severities in klog klogFlags.Set("one_output", "true") + + return nil +} + +type logLevelOverride struct { + matcher *regexp.Regexp + targetLevel logrus.Level +} + +func levelToPrintFunc(log *logrus.Entry, level logrus.Level) (func(args ...any), error) { + var printFunc func(args ...any) + switch level { + case logrus.InfoLevel: + printFunc = log.Info + case logrus.WarnLevel: + printFunc = log.Warn + case logrus.ErrorLevel: + printFunc = log.Error + default: + return nil, fmt.Errorf("unsupported log level %q", level) + } + return printFunc, nil +} + +func severityOverrideWriter(level logrus.Level, log *logrus.Entry, overrides []logLevelOverride) (*io.PipeWriter, error) { + printFunc, err := levelToPrintFunc(log, level) + if err != nil { + return nil, err + } + reader, writer := io.Pipe() + + for _, override := range overrides { + _, err := levelToPrintFunc(log, override.targetLevel) + if err != nil { + return nil, fmt.Errorf("failed to validate klog matcher level overrides (%s -> %s): %w", + override.matcher.String(), level, err) + } + } + go writerScanner(log, reader, printFunc, overrides) + return writer, nil +} + +// writerScanner scans the input from the reader and writes it to the appropriate +// log print func. +// In cases where the log message is overridden, that will be emitted via the specified +// target log level logger function. +// +// Based on code from logrus WriterLevel implementation [1] +// +// [1] https://github.com/sirupsen/logrus/blob/v1.9.3/writer.go#L66-L97 +func writerScanner( + entry *logrus.Entry, + reader *io.PipeReader, + defaultPrintFunc func(args ...interface{}), + overrides []logLevelOverride) { + + defer reader.Close() + + scanner := bufio.NewScanner(reader) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) + } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { + line := scanner.Text() + matched := false + for _, override := range overrides { + printFn, err := levelToPrintFunc(entry, override.targetLevel) + if err != nil { + entry.WithError(err).WithField("matcher", override.matcher). + Error("BUG: failed to get printer for klog override matcher") + continue + } + if override.matcher.FindString(line) != "" { + printFn(strings.TrimRight(line, "\r\n")) + matched = true + break + } + } + if !matched { + defaultPrintFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } + } + + if err := scanner.Err(); err != nil { + entry.WithError(err).Error("klog logrus override scanner stopped scanning with an error. " + + "This may mean that k8s client-go logs will no longer be emitted") + } } // LogOptions maps configuration key-value pairs related to logging. type LogOptions map[string]string -// InitializeDefaultLogger returns a logrus Logger with a custom text formatter. -func InitializeDefaultLogger() (logger *logrus.Logger) { +// initializeDefaultLogger returns a logrus Logger with the default logging +// settings. +func initializeDefaultLogger() (logger *logrus.Logger) { logger = logrus.New() - logger.SetFormatter(GetFormatter(DefaultLogFormat)) + logger.SetFormatter(GetFormatter(DefaultLogFormatTimestamp)) logger.SetLevel(DefaultLogLevel) return } @@ -103,16 +229,16 @@ func (o LogOptions) GetLogLevel() (level logrus.Level) { func (o LogOptions) GetLogFormat() LogFormat { formatOpt, ok := o[FormatOpt] if !ok { - return DefaultLogFormat + return DefaultLogFormatTimestamp } formatOpt = strings.ToLower(formatOpt) - re := regexp.MustCompile(`^(text|json|json-ts)$`) + re := regexp.MustCompile(`^(text|text-ts|json|json-ts)$`) if !re.MatchString(formatOpt) { logrus.WithError( - fmt.Errorf("incorrect log format configured '%s', expected 'text', 'json' or 'json-ts'", formatOpt), + fmt.Errorf("incorrect log format configured '%s', expected 'text', 'text-ts', 'json' or 'json-ts'", formatOpt), ).Warning("Ignoring user-configured log format") - return DefaultLogFormat + return DefaultLogFormatTimestamp } return LogFormat(formatOpt) @@ -140,7 +266,7 @@ func SetLogFormat(logFormat LogFormat) { // SetDefaultLogFormat updates the DefaultLogger with the DefaultLogFormat func SetDefaultLogFormat() { - DefaultLogger.SetFormatter(GetFormatter(DefaultLogFormat)) + DefaultLogger.SetFormatter(GetFormatter(DefaultLogFormatTimestamp)) } // AddHooks adds additional logrus hook to default logger @@ -157,6 +283,11 @@ func SetupLogging(loggers []string, logOpts LogOptions, tag string, debug bool) // background goroutines that are not cleaned up. initializeKLog() + if debug { + logOpts[LevelOpt] = "debug" + } + initializeSlog(logOpts, len(loggers) == 0) + // Updating the default log format SetLogFormat(logOpts.GetLogFormat()) @@ -201,6 +332,12 @@ func GetFormatter(format LogFormat) logrus.Formatter { DisableTimestamp: true, DisableColors: true, } + case LogFormatTextTimestamp: + return &logrus.TextFormatter{ + DisableTimestamp: false, + TimestampFormat: time.RFC3339Nano, + DisableColors: true, + } case LogFormatJSON: return &logrus.JSONFormatter{ DisableTimestamp: true, diff --git a/vendor/github.com/cilium/cilium/pkg/logging/slog.go b/vendor/github.com/cilium/cilium/pkg/logging/slog.go new file mode 100644 index 0000000000..ca062fe0f3 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/logging/slog.go @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package logging + +import ( + "context" + "log/slog" + "os" + "strings" + "time" + + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/logging/logfields" +) + +// logrErrorKey is the key used by the logr library for the error parameter. +const logrErrorKey = "err" + +// SlogNopHandler discards all logs. +var SlogNopHandler slog.Handler = nopHandler{} + +type nopHandler struct{} + +func (nopHandler) Enabled(context.Context, slog.Level) bool { return false } +func (nopHandler) Handle(context.Context, slog.Record) error { return nil } +func (n nopHandler) WithAttrs([]slog.Attr) slog.Handler { return n } +func (n nopHandler) WithGroup(string) slog.Handler { return n } + +var slogHandlerOpts = &slog.HandlerOptions{ + AddSource: false, + Level: slog.LevelInfo, + ReplaceAttr: replaceAttrFnWithoutTimestamp, +} + +// Default slog logger. Will be overwritten once initializeSlog is called. +var DefaultSlogLogger *slog.Logger = slog.New(slog.NewTextHandler( + os.Stderr, + slogHandlerOpts, +)) + +func slogLevel(l logrus.Level) slog.Level { + switch l { + case logrus.DebugLevel, logrus.TraceLevel: + return slog.LevelDebug + case logrus.InfoLevel: + return slog.LevelInfo + case logrus.WarnLevel: + return slog.LevelWarn + case logrus.ErrorLevel, logrus.PanicLevel, logrus.FatalLevel: + return slog.LevelError + default: + return slog.LevelInfo + } +} + +// Approximates the logrus output via slog for job groups during the transition +// phase. +func initializeSlog(logOpts LogOptions, useStdout bool) { + opts := *slogHandlerOpts + opts.Level = slogLevel(logOpts.GetLogLevel()) + + logFormat := logOpts.GetLogFormat() + switch logFormat { + case LogFormatJSON, LogFormatText: + opts.ReplaceAttr = replaceAttrFnWithoutTimestamp + case LogFormatJSONTimestamp, LogFormatTextTimestamp: + opts.ReplaceAttr = replaceAttrFn + } + + writer := os.Stderr + if useStdout { + writer = os.Stdout + } + + switch logFormat { + case LogFormatJSON, LogFormatJSONTimestamp: + DefaultSlogLogger = slog.New(slog.NewJSONHandler( + writer, + &opts, + )) + case LogFormatText, LogFormatTextTimestamp: + DefaultSlogLogger = slog.New(slog.NewTextHandler( + writer, + &opts, + )) + } +} + +func replaceAttrFn(groups []string, a slog.Attr) slog.Attr { + switch a.Key { + case slog.TimeKey: + // Adjust to timestamp format that logrus uses; except that we can't + // force slog to quote the value like logrus does... + return slog.String(slog.TimeKey, a.Value.Time().Format(time.RFC3339)) + case slog.LevelKey: + // Lower-case the log level + return slog.Attr{ + Key: a.Key, + Value: slog.StringValue(strings.ToLower(a.Value.String())), + } + case logrErrorKey: + // Uniform the attribute identifying the error + return slog.Attr{ + Key: logfields.Error, + Value: a.Value, + } + } + return a +} + +func replaceAttrFnWithoutTimestamp(groups []string, a slog.Attr) slog.Attr { + switch a.Key { + case slog.TimeKey: + // Drop timestamps + return slog.Attr{} + default: + return replaceAttrFn(groups, a) + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/mac/mac.go b/vendor/github.com/cilium/cilium/pkg/mac/mac.go index f846edb4fe..1938964d72 100644 --- a/vendor/github.com/cilium/cilium/pkg/mac/mac.go +++ b/vendor/github.com/cilium/cilium/pkg/mac/mac.go @@ -107,7 +107,7 @@ func (m *MAC) UnmarshalJSON(data []byte) error { func GenerateRandMAC() (MAC, error) { buf := make([]byte, 6) if _, err := rand.Read(buf); err != nil { - return nil, fmt.Errorf("Unable to retrieve 6 rnd bytes: %s", err) + return nil, fmt.Errorf("Unable to retrieve 6 rnd bytes: %w", err) } // Set locally administered addresses bit and reset multicast bit diff --git a/vendor/github.com/cilium/cilium/pkg/mac/mac_linux.go b/vendor/github.com/cilium/cilium/pkg/mac/mac_linux.go index 772bf01d8c..8f0d415e7b 100644 --- a/vendor/github.com/cilium/cilium/pkg/mac/mac_linux.go +++ b/vendor/github.com/cilium/cilium/pkg/mac/mac_linux.go @@ -3,11 +3,18 @@ package mac -import "github.com/vishvananda/netlink" +import ( + "errors" + "net" + + "github.com/vishvananda/netlink" + + "github.com/cilium/cilium/pkg/datapath/linux/safenetlink" +) // HasMacAddr returns true if the given network interface has L2 addr. func HasMacAddr(iface string) bool { - link, err := netlink.LinkByName(iface) + link, err := safenetlink.LinkByName(iface) if err != nil { return false } @@ -18,3 +25,19 @@ func HasMacAddr(iface string) bool { func LinkHasMacAddr(link netlink.Link) bool { return len(link.Attrs().HardwareAddr) != 0 } + +// ReplaceMacAddressWithLinkName replaces the MAC address of the given link +func ReplaceMacAddressWithLinkName(ifName, macAddress string) error { + l, err := safenetlink.LinkByName(ifName) + if err != nil { + if errors.As(err, &netlink.LinkNotFoundError{}) { + return nil + } + return err + } + hw, err := net.ParseMAC(macAddress) + if err != nil { + return err + } + return netlink.LinkSetHardwareAddr(l, hw) +} diff --git a/vendor/github.com/cilium/cilium/pkg/mac/mac_unspecified.go b/vendor/github.com/cilium/cilium/pkg/mac/mac_unspecified.go index e59e51bf8a..cff4c267e6 100644 --- a/vendor/github.com/cilium/cilium/pkg/mac/mac_unspecified.go +++ b/vendor/github.com/cilium/cilium/pkg/mac/mac_unspecified.go @@ -5,8 +5,14 @@ package mac +import "fmt" + // HasMacAddr returns true if the given network interface has L2 addr. // This is not supported for non-linux environment func HasMacAddr(iface string) bool { return false } + +func ReplaceMacAddressWithLinkName(ifName, macAddress string) error { + return fmt.Errorf("not implemented") +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go b/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go index ec5816542e..a44e151959 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go @@ -8,25 +8,52 @@ import ( "encoding/json" "fmt" "os/exec" - "time" + "slices" + "strings" "github.com/prometheus/client_golang/prometheus" "github.com/sirupsen/logrus" + "golang.org/x/sync/singleflight" + + "github.com/cilium/cilium/pkg/time" ) type bpfCollector struct { - bpfMapsMemory *prometheus.Desc - bpfProgMemory *prometheus.Desc + sfg singleflight.Group + + bpfMapsCount *prometheus.Desc + bpfMapsMemory *prometheus.Desc + bpfProgramsCount *prometheus.Desc + bpfProgramsMemory *prometheus.Desc +} + +type bpfUsage struct { + ids []uint64 + virtualMemoryMaxBytes float64 +} + +func (bu bpfUsage) count() float64 { + return float64(len(bu.ids)) } func newbpfCollector() *bpfCollector { return &bpfCollector{ + bpfMapsCount: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, "", "bpf_maps"), + "Total count of BPF maps.", + nil, nil, + ), bpfMapsMemory: prometheus.NewDesc( prometheus.BuildFQName(Namespace, "", "bpf_maps_virtual_memory_max_bytes"), "BPF maps kernel max memory usage size in bytes.", nil, nil, ), - bpfProgMemory: prometheus.NewDesc( + bpfProgramsCount: prometheus.NewDesc( + prometheus.BuildFQName(Namespace, "", "bpf_progs"), + "Total count of BPF programs.", + nil, nil, + ), + bpfProgramsMemory: prometheus.NewDesc( prometheus.BuildFQName(Namespace, "", "bpf_progs_virtual_memory_max_bytes"), "BPF programs kernel max memory usage size in bytes.", nil, nil, @@ -35,55 +62,111 @@ func newbpfCollector() *bpfCollector { } func (s *bpfCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- s.bpfMapsMemory - ch <- s.bpfProgMemory + prometheus.DescribeByCollect(s, ch) } type memoryEntry struct { + ID uint64 `json:"id"` + Name string `json:"name"` BytesMemlock uint64 `json:"bytes_memlock"` + + // (returned only for programs) + MapIDs []uint64 `json:"map_ids"` } -func getMemoryUsage(typ string) (uint64, error) { +func getBPFUsage(typ string, filter func(memoryEntry) bool) (bpfUsage, error) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer cancel() cmd := exec.CommandContext(ctx, "bpftool", "-j", typ, "show") out, err := cmd.Output() if err != nil { - return 0, fmt.Errorf("unable to get bpftool output: %w", err) + return bpfUsage{}, fmt.Errorf("unable to get bpftool output: %w", err) } - var memoryEntries []memoryEntry + var ( + memoryEntries []memoryEntry + usage bpfUsage + ) + err = json.Unmarshal(out, &memoryEntries) if err != nil { - return 0, fmt.Errorf("unable to unmarshal bpftool output: %w", err) + return usage, fmt.Errorf("unable to unmarshal bpftool output: %w", err) } - var totalMem uint64 + for _, entry := range memoryEntries { - totalMem += entry.BytesMemlock + if !filter(entry) { + continue + } + + usage.ids = append(usage.ids, entry.ID) + usage.virtualMemoryMaxBytes += float64(entry.BytesMemlock) } - return totalMem, nil + + return usage, nil } func (s *bpfCollector) Collect(ch chan<- prometheus.Metric) { - mapMem, err := getMemoryUsage("map") - if err != nil { - logrus.WithError(err).Error("Error while getting BPF maps memory usage") - } else { - ch <- prometheus.MustNewConstMetric( - s.bpfMapsMemory, - prometheus.GaugeValue, - float64(mapMem), - ) + type bpfUsageResults struct { + maps bpfUsage + programs bpfUsage } - progMem, err := getMemoryUsage("prog") - if err != nil { - logrus.WithError(err).Error("Error while getting BPF progs memory usage") - } else { - ch <- prometheus.MustNewConstMetric( - s.bpfProgMemory, - prometheus.GaugeValue, - float64(progMem), + // Avoid querying BPF multiple times concurrently, if it happens, additional callers will wait for the + // first one to finish and reuse its resulting values. + results, err, _ := s.sfg.Do("collect", func() (interface{}, error) { + var ( + results = bpfUsageResults{} + err error ) + + if results.maps, err = getBPFUsage("map", func(entry memoryEntry) bool { + // Filter on maps prefixed with cilium_ + return strings.HasPrefix(entry.Name, "cilium_") + }); err != nil { + return results, err + } + + if results.programs, err = getBPFUsage("prog", func(entry memoryEntry) bool { + // Filter on programs related to cilium maps + for i := 0; i < len(entry.MapIDs); i++ { + if slices.Contains(results.maps.ids, entry.MapIDs[i]) { + return true + } + } + return false + }); err != nil { + return results, err + } + + return results, nil + }) + + if err != nil { + logrus.WithError(err).Error("retrieving BPF maps & programs usage") + return } + + ch <- prometheus.MustNewConstMetric( + s.bpfMapsCount, + prometheus.GaugeValue, + results.(bpfUsageResults).maps.count(), + ) + + ch <- prometheus.MustNewConstMetric( + s.bpfMapsMemory, + prometheus.GaugeValue, + results.(bpfUsageResults).maps.virtualMemoryMaxBytes, + ) + + ch <- prometheus.MustNewConstMetric( + s.bpfProgramsCount, + prometheus.GaugeValue, + results.(bpfUsageResults).programs.count(), + ) + + ch <- prometheus.MustNewConstMetric( + s.bpfProgramsMemory, + prometheus.GaugeValue, + results.(bpfUsageResults).programs.virtualMemoryMaxBytes, + ) } diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/cell.go b/vendor/github.com/cilium/cilium/pkg/metrics/cell.go index e48aebe005..fcfa7cf0fc 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/cell.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/cell.go @@ -3,10 +3,127 @@ package metrics -import "github.com/cilium/cilium/pkg/hive/cell" +import ( + "fmt" + "reflect" + + "github.com/cilium/hive/cell" + "github.com/prometheus/client_golang/prometheus" + + pkgmetric "github.com/cilium/cilium/pkg/metrics/metric" +) var Cell = cell.Module("metrics", "Metrics", - cell.Invoke(NewRegistry), - cell.Metric(NewLegacyMetrics), + // Provide registry to hive, but also invoke if case no cells decide to use as dependency + cell.Provide(NewRegistry), + Metric(NewLegacyMetrics), cell.Config(defaultRegistryConfig), + cell.Invoke(func(_ *Registry) { + // This is a hack to ensure that errors/warnings collected in the pre hive initialization + // phase are emitted as metrics. + FlushLoggingMetrics() + }), + cell.Provide( + metricsCommands, + newSampler, + ), ) + +// Metric constructs a new metric cell. +// +// This cell type provides `S` to the hive as returned by `ctor`, it also makes each individual field +// value available via the `hive-metrics` value group. Infrastructure components such as a registry, +// inspection tool, or documentation generator can collect all metrics in the hive via this value group. +// +// The `ctor` constructor must return a struct or pointer to a struct of type `S`. The returned struct +// must only contain public fields. All field types should implement the +// `github.com/cilium/cilium/pkg/metrics/metric.WithMetadata` +// and `github.com/prometheus/client_golang/prometheus.Collector` interfaces. +func Metric[S any](ctor func() S) cell.Cell { + var ( + withMeta pkgmetric.WithMetadata + collector prometheus.Collector + ) + + var nilOut S + outTyp := reflect.TypeOf(nilOut) + if outTyp.Kind() == reflect.Ptr { + outTyp = outTyp.Elem() + } + + if outTyp.Kind() != reflect.Struct { + panic(fmt.Errorf( + "metrics.Metric must be invoked with a constructor function that returns a struct or pointer to a struct, "+ + "a constructor which returns a %s was supplied", + outTyp.Kind(), + )) + } + + // Let's be strict for now, could lift this in the future if we ever need to + if outTyp.NumField() == 0 { + panic(fmt.Errorf( + "metrics.Metric must be invoked with a constructor function that returns exactly a struct with at least 1 " + + "metric, a constructor which returns a struct with zero fields was supplied", + )) + } + + withMetaTyp := reflect.TypeOf(&withMeta).Elem() + collectorTyp := reflect.TypeOf(&collector).Elem() + for i := 0; i < outTyp.NumField(); i++ { + field := outTyp.Field(i) + if !field.IsExported() { + panic(fmt.Errorf( + "The struct returned by the constructor passed to metrics.Metric has a private field '%s', which "+ + "is not allowed. All fields on the returning struct must be exported", + field.Name, + )) + } + + if !field.Type.Implements(withMetaTyp) { + panic(fmt.Errorf( + "The struct returned by the constructor passed to metrics.Metric has a field '%s', which is not metric.WithMetadata.", + field.Name, + )) + } + + if !field.Type.Implements(collectorTyp) { + panic(fmt.Errorf( + "The struct returned by the constructor passed to metrics.Metric has a field '%s', which is not prometheus.Collector.", + field.Name, + )) + } + } + + return cell.Provide(ctor, provideMetrics[S]) +} + +type hiveMetricOut struct { + cell.Out + + Metrics []pkgmetric.WithMetadata `group:"hive-metrics,flatten"` +} + +func provideMetrics[S any](metricSet S) hiveMetricOut { + var metrics []pkgmetric.WithMetadata + + value := reflect.ValueOf(metricSet) + typ := value.Type() + if typ.Kind() == reflect.Pointer { + value = value.Elem() + typ = typ.Elem() + } + + if typ.Kind() != reflect.Struct { + return hiveMetricOut{} + } + + for i := 0; i < typ.NumField(); i++ { + if withMeta, ok := value.Field(i).Interface().(pkgmetric.WithMetadata); ok { + metrics = append(metrics, withMeta) + } + } + + return hiveMetricOut{ + Metrics: metrics, + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/cmd.go b/vendor/github.com/cilium/cilium/pkg/metrics/cmd.go new file mode 100644 index 0000000000..d9b9ffca15 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/cmd.go @@ -0,0 +1,513 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +import ( + "cmp" + _ "embed" + "encoding/json" + "fmt" + "html/template" + "io" + "maps" + "math" + "os" + "regexp" + "slices" + "sort" + "strings" + "text/tabwriter" + + "github.com/cilium/hive" + "github.com/cilium/hive/script" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/spf13/pflag" + "gopkg.in/yaml.v3" + + "github.com/cilium/cilium/api/v1/models" +) + +func metricsCommands(r *Registry, dc *sampler) hive.ScriptCmdsOut { + return hive.NewScriptCmds(map[string]script.Cmd{ + "metrics": metricsCommand(r, dc), + "metrics/plot": plotCommand(dc), + "metrics/html": htmlCommand(dc), + }) +} + +// metricsCommand implements the "metrics" script command. This can be accessed +// in script tests, via "cilium-dbg shell" or indirectly via 'cilium-dbg metrics list'. +func metricsCommand(r *Registry, dc *sampler) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "List registered metrics", + Args: "[match regex]", + Flags: func(fs *pflag.FlagSet) { + fs.StringP("out", "o", "", "Output file") + fs.BoolP("sampled", "s", false, "Show sampled metrics") + fs.StringP("format", "f", "table", "Output format, one of: table, json or yaml") + }, + RegexpArgs: func(rawArgs ...string) []int { + for i, arg := range rawArgs { + if !strings.HasPrefix(arg, "-") { + return []int{i} + } + if arg == "--" { + return []int{i + 1} + } + } + return nil + }, + Detail: []string{ + "To write the metrics to a file: 'metrics --out=/path/to/file'", + "To show metrics matching a regex: 'metrics foo.*'", + "To show samples from last 60 minutes: 'metrics --sampled'", + "", + "The metric samples can be plotted with 'metrics/plot' command.", + "", + "Run 'metrics -h' for extended help of the flags.", + "", + "Metrics can be filtered with a regexp. The match is made", + "against the metric name and its labels.", + "For example 'metrics regen.*scope=total' would match the", + "regenerations metric with one of the labels being scope=total", + "", + "In the sample output the 50th, 90th and 99th quantiles are shown", + "for histograms, e.g. in '15ms / 30ms / 60ms' 50th is 15ms and so on.", + }, + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + file, err := s.Flags.GetString("out") + if err != nil { + return nil, err + } + sampled, err := s.Flags.GetBool("sampled") + if err != nil { + return nil, err + } + format, err := s.Flags.GetString("format") + if err != nil { + return nil, err + } + var re *regexp.Regexp + if len(args) > 0 { + var err error + re, err = regexp.Compile(args[0]) + if err != nil { + return nil, fmt.Errorf("regex: %w", err) + } + } + + var w io.Writer + if file != "" { + f, err := os.OpenFile(s.Path(file), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + w = f + defer f.Close() + } else { + w = s.LogWriter() + } + + if sampled { + return nil, writeMetricsFromSamples(w, format, re, dc) + } + + return nil, writeMetricsFromRegistry(w, format, re, r.inner) + }, + ) +} + +// plotCommand implements the "metrics/plot" script command. This can be accessed +// in script tests, via "cilium-dbg shell" or indirectly via 'cilium-dbg metrics list'. +func plotCommand(dc *sampler) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Plot sampled metrics as a line graph", + Args: "[match regex]", + Flags: func(fs *pflag.FlagSet) { + fs.StringP("out", "o", "", "Output file") + fs.Bool("rate", false, "Plot the rate of change") + }, + RegexpArgs: func(rawArgs ...string) []int { + for i, arg := range rawArgs { + if !strings.HasPrefix(arg, "-") { + return []int{i} + } + if arg == "--" { + return []int{i + 1} + } + } + return nil + }, + Detail: []string{ + "The sampled metric is specified with the regex argument.", + "Both the metric name and its labels are matched against.", + "Use the 'metrics' command to search for the right regex.", + "", + "For example to plot the 'go_sched_latencies_seconds':", + "", + "cilium> metrics/plot go_sched_lat", + "", + "Or to plot the sysctl reconciliation durations:", + "", + "cilium> metrics/plot reconciler_duration.*sysctl", + "", + "Specify '-rate' to show the rate of change for a counter,", + "for example to plot how many bytes are allocated per minute:", + "", + "cilium> metrics/plot -rate go.*heap_alloc_bytes", + }, + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + s.Logf("args: %v\n", args) + + file, err := s.Flags.GetString("out") + if err != nil { + return nil, err + } + rate, err := s.Flags.GetBool("rate") + if err != nil { + return nil, err + } + var re *regexp.Regexp + if len(args) > 0 { + var err error + re, err = regexp.Compile(args[0]) + if err != nil { + return nil, fmt.Errorf("regex: %w", err) + } + } + + var w io.Writer + if file != "" { + f, err := os.OpenFile(s.Path(file), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + w = f + defer f.Close() + } else { + w = s.LogWriter() + } + + dc.mu.Lock() + defer dc.mu.Unlock() + + if re == nil { + fmt.Fprintln(w, "regexp needed to find metric") + return nil, nil + } + + sampledMetrics := slices.Collect(maps.Values(dc.metrics)) + slices.SortFunc(sampledMetrics, func(a, b debugSamples) int { + return cmp.Or( + cmp.Compare(a.getName(), b.getName()), + cmp.Compare(a.getLabels(), b.getLabels()), + ) + }) + + var ds debugSamples + matched := true + for _, ds = range sampledMetrics { + matched = re.MatchString(ds.getName() + ds.getLabels()) + if matched { + break + } + } + if !matched { + fmt.Fprintf(w, "no metric found matching regexp %q", re.String()) + return nil, nil + } + + switch ds := ds.(type) { + case *gaugeOrCounterSamples: + PlotSamples(w, rate, ds.getName(), ds.getLabels(), samplingTimeSpan, ds.samples.grab(), ds.bits) + case *histogramSamples: + PlotSamples(w, rate, ds.getName()+" (p50)", ds.getLabels(), samplingTimeSpan, ds.p50.grab(), ds.bits) + fmt.Fprintln(w) + PlotSamples(w, rate, ds.getName()+" (p90)", ds.getLabels(), samplingTimeSpan, ds.p90.grab(), ds.bits) + fmt.Fprintln(w) + PlotSamples(w, rate, ds.getName()+" (p99)", ds.getLabels(), samplingTimeSpan, ds.p99.grab(), ds.bits) + } + + return nil, nil + }, + ) +} + +//go:embed dump.html.tmpl +var htmlTemplate string + +func htmlCommand(dc *sampler) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Produce a HTML file from the sampled metrics", + Args: "", + Flags: func(fs *pflag.FlagSet) { + fs.StringP("out", "o", "", "Output file") + }, + Detail: []string{}, + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + file, err := s.Flags.GetString("out") + if err != nil { + return nil, err + } + var w io.Writer + if file != "" { + f, err := os.OpenFile(s.Path(file), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + w = f + defer f.Close() + } else { + w = s.LogWriter() + } + + dc.mu.Lock() + defer dc.mu.Unlock() + + dump := JSONSampleDump{ + NumSamples: numSamples, + IntervalSeconds: int(samplingInterval.Seconds()), + } + for _, ds := range dc.metrics { + dump.Samples = append(dump.Samples, ds.getJSON()) + } + slices.SortFunc(dump.Samples, func(a, b JSONSamples) int { + return cmp.Or( + cmp.Compare(a.Name, b.Name), + cmp.Compare(a.Labels, b.Labels), + ) + }) + + tmpl, err := template.New("metrics.html").Parse(htmlTemplate) + if err != nil { + return nil, err + } + return nil, tmpl.Execute(w, &dump) + }, + ) +} + +func writeMetricsFromSamples(outw io.Writer, format string, re *regexp.Regexp, dc *sampler) error { + dc.mu.Lock() + defer dc.mu.Unlock() + + sampledMetrics := slices.Collect(maps.Values(dc.metrics)) + slices.SortFunc(sampledMetrics, func(a, b debugSamples) int { + return cmp.Or( + cmp.Compare(a.getName(), b.getName()), + cmp.Compare(a.getLabels(), b.getLabels()), + ) + }) + + switch format { + case "json", "yaml": + dump := JSONSampleDump{ + NumSamples: numSamples, + IntervalSeconds: int(samplingInterval.Seconds()), + } + for _, ds := range sampledMetrics { + if re != nil && !re.MatchString(ds.getName()+ds.getLabels()) { + continue + } + dump.Samples = append(dump.Samples, ds.getJSON()) + } + if format == "json" { + enc := json.NewEncoder(outw) + enc.SetIndent("", " ") + return enc.Encode(dump) + } else { + enc := yaml.NewEncoder(outw) + return enc.Encode(dump) + } + case "table": + w := tabwriter.NewWriter(outw, 5, 0, 3, ' ', 0) + defer w.Flush() + _, err := fmt.Fprintln(w, "Metric\tLabels\t5min\t30min\t60min\t120min") + if err != nil { + return err + } + for _, ds := range sampledMetrics { + if re != nil && !re.MatchString(ds.getName()+ds.getLabels()) { + continue + } + m5, m30, m60, m120 := ds.get() + _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", ds.getName(), ds.getLabels(), m5, m30, m60, m120) + if err != nil { + return err + } + } + return nil + default: + return fmt.Errorf("unknown format %q", format) + } +} + +func writeMetricsFromRegistry(w io.Writer, format string, re *regexp.Regexp, reg *prometheus.Registry) error { + metrics, err := reg.Gather() + if err != nil { + return fmt.Errorf("gather: %w", err) + } + + var ( + // Since Gather() collects the metrics in unsorted order, we need + // to collect the lines we want to write and then sort them. + lines []string + + jsonMetrics []models.Metric + ) + + for _, val := range metrics { + metricName := val.GetName() + metricType := val.GetType() + + for _, metric := range val.Metric { + value, valueS := getMetricValue(metricName, metricType, metric) + label := joinLabels(metric.GetLabel()) + if re != nil && !re.MatchString(metricName+label) { + continue + } + if format == "table" { + lines = append(lines, fmt.Sprintf("%s\t%s\t%s\n", metricName, label, valueS)) + } else { + jsonMetrics = append(jsonMetrics, + models.Metric{ + Name: metricName, + Labels: labelsMap(metric.GetLabel()), + Value: value, + }) + } + } + } + + switch format { + case "json": + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + return enc.Encode(jsonMetrics) + case "yaml": + enc := yaml.NewEncoder(w) + return enc.Encode(jsonMetrics) + case "table": + sort.Strings(lines) + + tw := tabwriter.NewWriter(w, 5, 0, 3, ' ', 0) + defer tw.Flush() + if _, err := fmt.Fprintln(tw, "Metric\tLabels\tValue"); err != nil { + return err + } + for _, l := range lines { + _, err := tw.Write([]byte(l)) + if err != nil { + return err + } + } + return nil + default: + return fmt.Errorf("unknown format %q", format) + } +} + +// getMetricValue produces a single representative value out of the metric. +func getMetricValue(name string, typ dto.MetricType, m *dto.Metric) (float64, string) { + suffix := "" + if strings.HasSuffix(name, "seconds") { + suffix = "s" + } + + switch typ { + case dto.MetricType_COUNTER: + v := m.Counter.GetValue() + return v, fmt.Sprintf("%f", v) + case dto.MetricType_GAUGE: + v := m.Gauge.GetValue() + return v, fmt.Sprintf("%f", v) + case dto.MetricType_SUMMARY: + s := m.Summary + x := "" + for i, q := range s.Quantile { + x += fmt.Sprintf("p%d(%s%s)", int(100.0*(*q.Quantile)), prettyValue(*q.Value), suffix) + if i != len(s.Quantile)-1 { + x += " " + } + } + return 0.0, x + + case dto.MetricType_HISTOGRAM: + b := convertHistogram(m.Histogram) + p50 := getHistogramQuantile(b, 0.50) + p90 := getHistogramQuantile(b, 0.90) + p99 := getHistogramQuantile(b, 0.99) + return p90, fmt.Sprintf("%s%s / %s%s / %s%s", + prettyValue(p50), suffix, prettyValue(p90), suffix, prettyValue(p99), suffix) + default: + return -1, fmt.Sprintf("(?%s)", typ) + } +} + +func joinLabels(labels []*dto.LabelPair) string { + var b strings.Builder + for i, lp := range labels { + b.WriteString(lp.GetName()) + b.WriteByte('=') + b.WriteString(lp.GetValue()) + if i < len(labels)-1 { + b.WriteByte(' ') + } + } + return b.String() +} + +func labelsMap(labels []*dto.LabelPair) map[string]string { + m := map[string]string{} + for _, lp := range labels { + m[lp.GetName()] = lp.GetValue() + } + return m +} + +func prettyValue(v float64) string { + unit, multp := chooseUnit(v) + return fmt.Sprintf("%.4g%s", v*multp, unit) +} + +func chooseUnit(v float64) (string, float64) { + unit := "" + multp := 1.0 + v = math.Abs(v) + switch { + case v == 0.0: + case v > 1_000_000_000_000: + unit = "T" + multp = 0.000_000_000_001 + case v > 1_000_000_000: + unit = "G" + multp = 0.000_000_001 + case v > 1_000_000: + unit = "M" + multp = 0.000_001 + case v > 1000: + unit = "k" + multp = 0.001 + case v < 0.000_000_001: + unit = "p" + multp = 1_000_000_000_000 + case v < 0.000_001: + unit = "n" + multp = 1_000_000_000 + case v < 0.001: + unit = "µ" + multp = 1_000_000 + case v < 1: + unit = "m" + multp = 1000 + } + return unit, multp +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/dump.html.tmpl b/vendor/github.com/cilium/cilium/pkg/metrics/dump.html.tmpl new file mode 100644 index 0000000000..257b08981e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/dump.html.tmpl @@ -0,0 +1,159 @@ + + + + + + Metrics + + + + +
+ +
+ + + + + + + + + + +
NameLabelsLatest
+ + + + + + diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/histogram.go b/vendor/github.com/cilium/cilium/pkg/metrics/histogram.go new file mode 100644 index 0000000000..fb42706d37 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/histogram.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +import ( + "cmp" + "math" + "slices" + "sort" + + dto "github.com/prometheus/client_model/go" +) + +type histogramBucket struct { + cumulativeCount uint64 + upperBound float64 +} + +func convertHistogram(h *dto.Histogram) []histogramBucket { + histogram := make([]histogramBucket, len(h.GetBucket())) + for i, b := range h.GetBucket() { + histogram[i] = histogramBucket{b.GetCumulativeCount(), b.GetUpperBound()} + } + slices.SortFunc(histogram, + func(a, b histogramBucket) int { + return cmp.Compare(a.upperBound, b.upperBound) + }) + return histogram +} + +// subtractHistogram removes from 'a' the observations from 'b'. +func subtractHistogram(a, b []histogramBucket) { + if len(a) != len(b) { + panic("impossible: histogram bucket sizes do not match") + } + for i := range a { + if a[i].upperBound != b[i].upperBound { + panic("impossible: different upper bounds") + } + a[i].cumulativeCount -= b[i].cumulativeCount + } +} + +func histogramSampleCount(histogram []histogramBucket) uint64 { + if len(histogram) == 0 { + return 0 + } + return histogram[len(histogram)-1].cumulativeCount +} + +// getHistogramQuantile calculates quantile from the Prometheus Histogram message. +// For example: getHistogramQuantile(h, 0.95) returns the 95th quantile. +func getHistogramQuantile(histogram []histogramBucket, quantile float64) float64 { + if len(histogram) < 1 { + return 0.0 + } + if quantile < 0.0 { + return math.Inf(-1) + } else if quantile > 1.0 { + return math.Inf(+1) + } + + totalCount := histogram[len(histogram)-1].cumulativeCount + if totalCount == 0 { + return 0.0 + } + + // Find the bucket onto which the quantile falls + rank := quantile * float64(totalCount) + index := sort.Search( + len(histogram)-1, + func(i int) bool { + return float64(histogram[i].cumulativeCount) >= rank + }) + + if index == 0 { + // Sample in first bucket, interpolate between 0.0..UpperBound within the bucket. + return histogram[0].upperBound * (rank / float64(histogram[0].cumulativeCount)) + } + + // Return the linearly interpolated value between the upper bounds of the + // two buckets in between which the quantile falls. + start := histogram[index-1].upperBound + end := histogram[index].upperBound + relativeCount := float64(histogram[index].cumulativeCount - histogram[index-1].cumulativeCount) + relativeRank := rank - float64(histogram[index-1].cumulativeCount) + return start + (end-start)*(relativeRank/relativeCount) +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go b/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go index 7e32c4c11a..75b67076f1 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go @@ -4,12 +4,12 @@ package metrics import ( + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/cilium/cilium/api/v1/client/daemon" "github.com/cilium/cilium/api/v1/health/client/connectivity" metricpkg "github.com/cilium/cilium/pkg/metrics/metric" - - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" ) type daemonHealthGetter interface { @@ -24,13 +24,14 @@ var ( NoOpMetric prometheus.Metric = &mockMetric{} NoOpCollector prometheus.Collector = &collector{} - NoOpCounter metricpkg.Counter = &counter{NoOpMetric, NoOpCollector} - NoOpCounterVec metricpkg.Vec[metricpkg.Counter] = &counterVec{NoOpCollector} - NoOpObserver metricpkg.Observer = &observer{} - NoOpHistogram metricpkg.Histogram = &histogram{NoOpCollector} - NoOpObserverVec metricpkg.Vec[metricpkg.Observer] = &observerVec{NoOpCollector} - NoOpGauge metricpkg.Gauge = &gauge{NoOpMetric, NoOpCollector} - NoOpGaugeVec metricpkg.Vec[metricpkg.Gauge] = &gaugeVec{NoOpCollector} + NoOpCounter metricpkg.Counter = &counter{NoOpMetric, NoOpCollector} + NoOpCounterVec metricpkg.Vec[metricpkg.Counter] = &counterVec{NoOpCollector} + NoOpObserver metricpkg.Observer = &observer{} + NoOpHistogram metricpkg.Histogram = &histogram{NoOpCollector} + NoOpObserverVec metricpkg.Vec[metricpkg.Observer] = &observerVec{NoOpCollector} + NoOpGauge metricpkg.Gauge = &gauge{NoOpMetric, NoOpCollector} + NoOpGaugeVec metricpkg.Vec[metricpkg.Gauge] = &gaugeVec{NoOpCollector} + NoOpGaugeDeletableVec metricpkg.DeletableVec[metricpkg.Gauge] = &gaugeDeletableVec{gaugeVec{NoOpCollector}} ) // Metric @@ -156,6 +157,24 @@ func (g *gauge) Opts() metricpkg.Opts { return metricpkg.Opts{} } // GaugeVec +type gaugeDeletableVec struct { + gaugeVec +} + +func (*gaugeDeletableVec) Delete(ll prometheus.Labels) bool { + return false +} + +func (*gaugeDeletableVec) DeleteLabelValues(lvs ...string) bool { + return false +} + +func (*gaugeDeletableVec) DeletePartialMatch(labels prometheus.Labels) int { + return 0 +} + +func (*gaugeDeletableVec) Reset() {} + type gaugeVec struct { prometheus.Collector } diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/json.go b/vendor/github.com/cilium/cilium/pkg/metrics/json.go new file mode 100644 index 0000000000..c3ba7e224e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/json.go @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +type JSONSampleDump struct { + NumSamples int `json:"nsamples" yaml:"nsamples"` + IntervalSeconds int `json:"interval_seconds" yaml:"interval_seconds"` + Samples []JSONSamples `json:"samples" yaml:"samples"` +} + +type JSONGaugeOrCounter struct { + Samples []float32 `json:"samples,omitempty" yaml:"samples,omitempty"` +} + +type JSONHistogram struct { + P50 []float32 `json:"p50,omitempty" yaml:"p50,omitempty"` + P90 []float32 `json:"p90,omitempty" yaml:"p90,omitempty"` + P99 []float32 `json:"p99,omitempty" yaml:"p99,omitempty"` +} + +type JSONSamples struct { + Name string `json:"name" yaml:"name"` + Labels string `json:"labels,omitempty" yaml:"labels,omitempty"` + GaugeOrCounter *JSONGaugeOrCounter `json:"gaugeOrCounter,omitempty" yaml:"gaugeOrCounter,omitempty"` + Histogram *JSONHistogram `json:"histogram,omitempty" yaml:"histogram,omitempty"` + Latest string `json:"latest" yaml:"latest"` +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go b/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go index 62c368ecea..718ed44195 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go @@ -6,39 +6,54 @@ package metrics import ( "fmt" "reflect" + "sync" + "sync/atomic" "github.com/sirupsen/logrus" - "github.com/cilium/cilium/pkg/components" "github.com/cilium/cilium/pkg/logging/logfields" - "github.com/cilium/cilium/pkg/metrics/metric" ) +var ( + metricsInitialized chan struct{} = make(chan struct{}) + flushMetrics = sync.Once{} +) + +// FlushLoggingMetrics will cause all logging hook metrics accumulated prior +// to the errors_warnings metrics being registered with the Prometheus collector +// to be incremented to their respective errors_warnings metrics tuple. +func FlushLoggingMetrics() { + flushMetrics.Do(func() { + if metricsInitialized != nil { + close(metricsInitialized) + } + }) +} + // LoggingHook is a hook for logrus which counts error and warning messages as a // Prometheus metric. type LoggingHook struct { - metric metric.Vec[metric.Counter] + errs, warn atomic.Uint64 } // NewLoggingHook returns a new instance of LoggingHook for the given Cilium // component. -func NewLoggingHook(component string) *LoggingHook { - // NOTE(mrostecki): For now errors and warning metric exists only for Cilium - // daemon, but support of Prometheus metrics in some other components (i.e. - // cilium-health - GH-4268) is planned. - - // Pick a metric for the component. - var metric metric.Vec[metric.Counter] - switch component { - case components.CiliumAgentName: - metric = ErrorsWarnings - case components.CiliumOperatortName: - metric = ErrorsWarnings - default: - panic(fmt.Sprintf("component %s is unsupported by LoggingHook", component)) - } - - return &LoggingHook{metric: metric} +func NewLoggingHook() *LoggingHook { + lh := &LoggingHook{} + go func() { + // This channel is closed after registry is created. At this point if the errs/warnings metric + // is enabled we flush counts of errors/warnings we collected before the registry was created. + // This is a hack to ensure that errors/warnings collected in the pre hive initialization + // phase are emitted as metrics. + // Because the ErrorsWarnings metric is a counter, this means that the rate of these errors won't be + // accurate, however init errors can only happen during initialization so it probably doesn't make + // a big difference in practice. + <-metricsInitialized + metricsInitialized = nil + ErrorsWarnings.WithLabelValues(logrus.ErrorLevel.String(), "init").Add(float64(lh.errs.Load())) + ErrorsWarnings.WithLabelValues(logrus.WarnLevel.String(), "init").Add(float64(lh.warn.Load())) + }() + return lh } // Levels returns the list of logging levels on which the hook is triggered. @@ -66,8 +81,16 @@ func (h *LoggingHook) Fire(entry *logrus.Entry) error { return fmt.Errorf("type of the 'subsystem' log entry field is not string but %s", reflect.TypeOf(iSubsystem)) } + // We count errors/warnings outside of the prometheus metric. + switch entry.Level { + case logrus.ErrorLevel: + h.errs.Add(1) + case logrus.WarnLevel: + h.warn.Add(1) + } + // Increment the metric. - h.metric.WithLabelValues(entry.Level.String(), subsystem).Inc() + ErrorsWarnings.WithLabelValues(entry.Level.String(), subsystem).Inc() return nil } diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/collections/product.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/collections/product.go new file mode 100644 index 0000000000..28c24f7f24 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/collections/product.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package collections + +// CartesianProduct returns the cartesian product of the input vectors as +// a vector of vectors, each with length the same as the number of input vectors. +func CartesianProduct[T any](vs ...[]T) [][]T { + if len(vs) == 0 { + return [][]T{} + } + + dimension := len(vs) // Each output will be a vector of this length. + // Iterate to find out the number of output vectors. + size := len(vs[0]) + for i := 1; i < len(vs); i++ { + size *= len(vs[i]) + } + + // Allocate the output vectors. + dst := make([][]T, size) + for i := range dst { + dst[i] = make([]T, dimension) + } + + lastm := 1 + for i := 0; i < dimension; i++ { + permuteColumn[T](dst, i, lastm, vs[i]) + lastm = lastm * len(vs[i]) + } + return dst +} + +// permuteColumn fills in the nth column of the output vectors of the cartesian +// product of the input vectors. +// +// leftPermSize is the number of vectors as a result of permuting 0,..,col-1 columns. +// That is, this is the block size upon which we will repeat the values of v0 such that +// every previous permutation is again permuted with each value of v0. +// +// For ex. +// CartesianProduct[string]({"a", "b"}, {"x", "y", "z"}) +// +// Iteration (i.e. col, leftPermSize=1) 1: +// +// dst = [ +// ["a"], +// ["b"], +// ["a"] +// ["b"] +// ["a"] +// ["b"] +// ] +// +// Iteration (leftPermSize=2): +// +// dst = [ +// ["a", "x"], // <- each elem of vec is repeated leftPermSize times. +// ["b", "x"], +// ["a", "y"] +// ["b", "y"] +// ["a", "z"] +// ["b", "z"] +// ] +func permuteColumn[T any](dst [][]T, col int, leftPermSize int, vec []T) { + // Go down the column with the current lhs. + // You want to skip along, lastm elements at a time. + for i := 0; i < len(dst); i += leftPermSize { // So we're skipping n rows at a time, + vi := (i / leftPermSize) % len(vec) + for off := 0; off < leftPermSize; off++ { // this is a repeat + dst[i+off][col] = vec[vi] + } + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go index 4755a468dc..83a84b16c1 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/counter.go @@ -30,12 +30,6 @@ type counter struct { metric } -func (c *counter) Collect(metricChan chan<- prometheus.Metric) { - if c.enabled { - c.Counter.Collect(metricChan) - } -} - func (c *counter) Get() float64 { var pm dto.Metric err := c.Counter.Write(&pm) @@ -45,22 +39,9 @@ func (c *counter) Get() float64 { return 0 } -// Inc increments the counter by 1. Use Add to increment it by arbitrary -// non-negative values. -func (c *counter) Inc() { - if c.enabled { - c.Counter.Inc() - } -} - -// Add adds the given value to the counter. It panics if the value is < 0. -func (c *counter) Add(val float64) { - if c.enabled { - c.Counter.Add(val) - } -} - -func NewCounterVec(opts CounterOpts, labelNames []string) DeletableVec[Counter] { +// NewCounterVec creates a new DeletableVec[Counter] based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *counterVec { return &counterVec{ CounterVec: prometheus.NewCounterVec(opts.toPrometheus(), labelNames), metric: metric{ @@ -70,12 +51,47 @@ func NewCounterVec(opts CounterOpts, labelNames []string) DeletableVec[Counter] } } +// NewCounterVecWithLabels creates a new DeletableVec[Counter] based on the provided CounterOpts and +// partitioned by the given labels. +// This will also initialize the labels with the provided values so that metrics with known label value +// ranges can be pre-initialized to zero upon init. +// +// This should only be used when all label values are known at init, otherwise use of the +// metric vector with uninitialized labels will result in warnings. +// +// Note: Disabled metrics will not have their label values initialized. +// +// For example: +// +// NewCounterVecWithLabels(CounterOpts{ +// Namespace: "cilium", +// Subsystem: "subsystem", +// Name: "cilium_test", +// Disabled: false, +// }, Labels{ +// {Name: "foo", Values: NewValues("0", "1")}, +// {Name: "bar", Values: NewValues("a", "b")}, +// }) +// +// Will initialize the following metrics to: +// +// cilium_subsystem_cilium_test{foo="0", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="0", bar="b"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="b"} 0 +func NewCounterVecWithLabels(opts CounterOpts, labels Labels) *counterVec { + cv := NewCounterVec(opts, labels.labelNames()) + initLabels[Counter](&cv.metric, labels, cv, opts.Disabled) + return cv +} + type counterVec struct { *prometheus.CounterVec metric } func (cv *counterVec) CurryWith(labels prometheus.Labels) (Vec[Counter], error) { + cv.checkLabels(labels) vec, err := cv.CounterVec.CurryWith(labels) if err == nil { return &counterVec{CounterVec: vec, metric: cv.metric}, nil @@ -84,12 +100,6 @@ func (cv *counterVec) CurryWith(labels prometheus.Labels) (Vec[Counter], error) } func (cv *counterVec) GetMetricWith(labels prometheus.Labels) (Counter, error) { - if !cv.enabled { - return &counter{ - metric: metric{enabled: false}, - }, nil - } - promCounter, err := cv.CounterVec.GetMetricWith(labels) if err == nil { return &counter{ @@ -101,12 +111,6 @@ func (cv *counterVec) GetMetricWith(labels prometheus.Labels) (Counter, error) { } func (cv *counterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - if !cv.enabled { - return &counter{ - metric: metric{enabled: false}, - }, nil - } - promCounter, err := cv.CounterVec.GetMetricWithLabelValues(lvs...) if err == nil { return &counter{ @@ -118,12 +122,7 @@ func (cv *counterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { } func (cv *counterVec) With(labels prometheus.Labels) Counter { - if !cv.enabled { - return &counter{ - metric: metric{enabled: false}, - } - } - + cv.checkLabels(labels) promCounter := cv.CounterVec.With(labels) return &counter{ Counter: promCounter, @@ -132,12 +131,7 @@ func (cv *counterVec) With(labels prometheus.Labels) Counter { } func (cv *counterVec) WithLabelValues(lvs ...string) Counter { - if !cv.enabled { - return &counter{ - metric: metric{enabled: false}, - } - } - + cv.checkLabelValues(lvs...) promCounter := cv.CounterVec.WithLabelValues(lvs...) return &counter{ Counter: promCounter, diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go index 445afde06d..c2636952e5 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/gauge.go @@ -30,17 +30,7 @@ type gauge struct { metric } -func (g *gauge) Collect(metricChan chan<- prometheus.Metric) { - if g.enabled { - g.Gauge.Collect(metricChan) - } -} - func (g *gauge) Get() float64 { - if !g.enabled { - return 0 - } - var pm dto.Metric err := g.Gauge.Write(&pm) if err == nil { @@ -49,60 +39,51 @@ func (g *gauge) Get() float64 { return 0 } -// Set sets the Gauge to an arbitrary value. -func (g *gauge) Set(val float64) { - if g.enabled { - g.Gauge.Set(val) - } -} - -// Inc increments the Gauge by 1. Use Add to increment it by arbitrary -// values. -func (g *gauge) Inc() { - if g.enabled { - g.Gauge.Inc() - } -} - -// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary -// values. -func (g *gauge) Dec() { - if g.enabled { - g.Gauge.Dec() - } -} - -// Add adds the given value to the Gauge. (The value can be negative, -// resulting in a decrease of the Gauge.) -func (g *gauge) Add(val float64) { - if g.enabled { - g.Gauge.Add(val) - } -} - -// Sub subtracts the given value from the Gauge. (The value can be -// negative, resulting in an increase of the Gauge.) -func (g *gauge) Sub(i float64) { - if g.enabled { - g.Gauge.Sub(i) - } -} - -// SetToCurrentTime sets the Gauge to the current Unix time in seconds. -func (g *gauge) SetToCurrentTime() { - if g.enabled { - g.Gauge.SetToCurrentTime() - } -} - -func NewGaugeVec(opts GaugeOpts, labelNames []string) DeletableVec[Gauge] { - return &gaugeVec{ +// NewGaugeVec creates a new DeletableVec[Gauge] based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *gaugeVec { + gv := &gaugeVec{ GaugeVec: prometheus.NewGaugeVec(opts.toPrometheus(), labelNames), metric: metric{ enabled: !opts.Disabled, opts: Opts(opts), }, } + return gv +} + +// NewGaugeVecWithLabels creates a new DeletableVec[Gauge] based on the provided CounterOpts and +// partitioned by the given labels. +// This will also initialize the labels with the provided values so that metrics with known label value +// ranges can be pre-initialized to zero upon init. +// +// This should only be used when all label values are known at init, otherwise use of the +// metric vector with uninitialized labels will result in warnings. +// +// Note: Disabled metrics will not have their label values initialized. +// +// For example: +// +// NewGaugeVecWithLabels(GaugeOpts{ +// Namespace: "cilium", +// Subsystem: "subsystem", +// Name: "cilium_test", +// Disabled: false, +// }, Labels{ +// {Name: "foo", Values: NewValues("0", "1")}, +// {Name: "bar", Values: NewValues("a", "b")}, +// }) +// +// Will initialize the following metrics to: +// +// cilium_subsystem_cilium_test{foo="0", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="0", bar="b"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="a"} 0 +// cilium_subsystem_cilium_test{foo="1", bar="b"} 0 +func NewGaugeVecWithLabels(opts GaugeOpts, labels Labels) *gaugeVec { + gv := NewGaugeVec(opts, labels.labelNames()) + initLabels[Gauge](&gv.metric, labels, gv, opts.Disabled) + return gv } type gaugeVec struct { @@ -111,6 +92,7 @@ type gaugeVec struct { } func (gv *gaugeVec) CurryWith(labels prometheus.Labels) (Vec[Gauge], error) { + gv.checkLabels(labels) vec, err := gv.GaugeVec.CurryWith(labels) if err == nil { return &gaugeVec{GaugeVec: vec, metric: gv.metric}, nil @@ -119,12 +101,6 @@ func (gv *gaugeVec) CurryWith(labels prometheus.Labels) (Vec[Gauge], error) { } func (gv *gaugeVec) GetMetricWith(labels prometheus.Labels) (Gauge, error) { - if !gv.enabled { - return &gauge{ - metric: metric{enabled: false}, - }, nil - } - promGauge, err := gv.GaugeVec.GetMetricWith(labels) if err == nil { return &gauge{ @@ -136,12 +112,6 @@ func (gv *gaugeVec) GetMetricWith(labels prometheus.Labels) (Gauge, error) { } func (gv *gaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - if !gv.enabled { - return &gauge{ - metric: metric{enabled: false}, - }, nil - } - promGauge, err := gv.GaugeVec.GetMetricWithLabelValues(lvs...) if err == nil { return &gauge{ @@ -153,11 +123,7 @@ func (gv *gaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { } func (gv *gaugeVec) With(labels prometheus.Labels) Gauge { - if !gv.enabled { - return &gauge{ - metric: metric{enabled: false}, - } - } + gv.checkLabels(labels) promGauge := gv.GaugeVec.With(labels) return &gauge{ @@ -167,11 +133,7 @@ func (gv *gaugeVec) With(labels prometheus.Labels) Gauge { } func (gv *gaugeVec) WithLabelValues(lvs ...string) Gauge { - if !gv.enabled { - return &gauge{ - metric: metric{enabled: false}, - } - } + gv.checkLabelValues(lvs...) promGauge := gv.GaugeVec.WithLabelValues(lvs...) return &gauge{ @@ -208,12 +170,6 @@ type gaugeFunc struct { metric } -func (gf *gaugeFunc) Collect(metricChan chan<- prometheus.Metric) { - if gf.enabled { - gf.GaugeFunc.Collect(metricChan) - } -} - type GaugeOpts Opts func (o GaugeOpts) toPrometheus() prometheus.GaugeOpts { diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go index f1ddb526a2..c908cb48b7 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/histogram.go @@ -29,25 +29,6 @@ type histogram struct { metric } -func (h *histogram) Collect(metricChan chan<- prometheus.Metric) { - if h.enabled { - h.Histogram.Collect(metricChan) - } -} - -// Observe adds a single observation to the histogram. Observations are -// usually positive or zero. Negative observations are accepted but -// prevent current versions of Prometheus from properly detecting -// counter resets in the sum of observations. (The experimental Native -// Histograms handle negative observations properly.) See -// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations -// for details. -func (h *histogram) Observe(val float64) { - if h.enabled { - h.Histogram.Observe(val) - } -} - type Observer interface { prometheus.Observer WithMetadata @@ -58,20 +39,9 @@ type observer struct { metric } -// Observe adds a single observation to the histogram. Observations are -// usually positive or zero. Negative observations are accepted but -// prevent current versions of Prometheus from properly detecting -// counter resets in the sum of observations. (The experimental Native -// Histograms handle negative observations properly.) See -// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations -// for details. -func (o *observer) Observe(val float64) { - if o.enabled { - o.Observer.Observe(val) - } -} - -func NewHistogramVec(opts HistogramOpts, labelNames []string) Vec[Observer] { +// NewHistogramVec creates a new Vec[Observer] (i.e. Histogram Vec) based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *histogramVec { return &histogramVec{ ObserverVec: prometheus.NewHistogramVec(opts.toPrometheus(), labelNames), metric: metric{ @@ -81,12 +51,28 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) Vec[Observer] { } } +// NewHistogramVec creates a new Vec[Observer] based on the provided CounterOpts and +// partitioned by the given labels. +// This will also initialize the labels with the provided values so that metrics with known label value +// ranges can be pre-initialized to zero upon init. +// +// This should only be used when all label values are known at init, otherwise use of the +// metric vector with uninitialized labels will result in warnings. +// +// Note: Disabled metrics will not have their label values initialized. +func NewHistogramVecWithLabels(opts HistogramOpts, labels Labels) *histogramVec { + hv := NewHistogramVec(opts, labels.labelNames()) + initLabels(&hv.metric, labels, hv, opts.Disabled) + return hv +} + type histogramVec struct { prometheus.ObserverVec metric } func (cv *histogramVec) CurryWith(labels prometheus.Labels) (Vec[Observer], error) { + cv.checkLabels(labels) vec, err := cv.ObserverVec.CurryWith(labels) if err == nil { return &histogramVec{ObserverVec: vec, metric: cv.metric}, nil @@ -95,12 +81,6 @@ func (cv *histogramVec) CurryWith(labels prometheus.Labels) (Vec[Observer], erro } func (cv *histogramVec) GetMetricWith(labels prometheus.Labels) (Observer, error) { - if !cv.enabled { - return &observer{ - metric: metric{enabled: false}, - }, nil - } - promObserver, err := cv.ObserverVec.GetMetricWith(labels) if err == nil { return &observer{ @@ -112,12 +92,6 @@ func (cv *histogramVec) GetMetricWith(labels prometheus.Labels) (Observer, error } func (cv *histogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - if !cv.enabled { - return &observer{ - metric: metric{enabled: false}, - }, nil - } - promObserver, err := cv.ObserverVec.GetMetricWithLabelValues(lvs...) if err == nil { return &observer{ @@ -129,11 +103,7 @@ func (cv *histogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error } func (cv *histogramVec) With(labels prometheus.Labels) Observer { - if !cv.enabled { - return &observer{ - metric: metric{enabled: false}, - } - } + cv.checkLabels(labels) promObserver := cv.ObserverVec.With(labels) return &observer{ @@ -143,11 +113,7 @@ func (cv *histogramVec) With(labels prometheus.Labels) Observer { } func (cv *histogramVec) WithLabelValues(lvs ...string) Observer { - if !cv.enabled { - return &observer{ - metric: metric{enabled: false}, - } - } + cv.checkLabelValues(lvs...) promObserver := cv.ObserverVec.WithLabelValues(lvs...) return &observer{ diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go b/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go index a41d079f72..39e13e9d8f 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metric/metric.go @@ -4,9 +4,19 @@ package metric import ( + "fmt" + "maps" + "slices" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + + "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/metrics/metric/collections" ) +var logger = logrus.WithField(logfields.LogSubsys, "metric") + // WithMetadata is the interface implemented by any metric defined in this package. These typically embed existing // prometheus metric types and add additional metadata. In addition, these metrics have the concept of being enabled // or disabled which is used in place of conditional registration so all metric types can always be registered. @@ -20,6 +30,53 @@ type WithMetadata interface { type metric struct { enabled bool opts Opts + labels *labelSet +} + +// forEachLabelVector performs a product of all possible label value combinations +// and calls the provided function for each combination. +func (b *metric) forEachLabelVector(fn func(lvls []string)) { + if b.labels == nil { + return + } + var labelValues [][]string + for _, label := range b.labels.lbls { + labelValues = append(labelValues, slices.Collect(maps.Keys(label.Values))) + } + for _, labelVector := range collections.CartesianProduct(labelValues...) { + fn(labelVector) + } +} + +// checkLabelValues checks that the provided label values are within the range +// of provided label values, if labels where defined using the Labels type. +// Violations are logged as errors for detection, but metrics should still +// be collected as is. +func (b *metric) checkLabelValues(lvs ...string) { + if b.labels == nil { + return + } + if err := b.labels.checkLabelValues(lvs); err != nil { + logger.WithError(err). + WithFields(logrus.Fields{ + "metric": b.opts.Name, + }). + Warning("metric label constraints violated, metric will still be collected") + } +} + +func (b *metric) checkLabels(labels prometheus.Labels) { + if b.labels == nil { + return + } + + if err := b.labels.checkLabels(labels); err != nil { + logger.WithError(err). + WithFields(logrus.Fields{ + "metric": b.opts.Name, + }). + Warning("metric label constraints violated, metric will still be collected") + } } func (b *metric) IsEnabled() bool { @@ -34,11 +91,35 @@ func (b *metric) Opts() Opts { return b.opts } +type collectorWithMetadata interface { + prometheus.Collector + WithMetadata +} + +// EnabledCollector collects the underlying metric only when it's enabled. +type EnabledCollector struct { + C prometheus.Collector +} + +// Collect implements prometheus.Collector. +func (e EnabledCollector) Collect(ch chan<- prometheus.Metric) { + if m, ok := e.C.(WithMetadata); ok && !m.IsEnabled() { + return + } + e.C.Collect(ch) +} + +// Describe implements prometheus.Collector. +func (e EnabledCollector) Describe(ch chan<- *prometheus.Desc) { + e.C.Describe(ch) +} + +var _ prometheus.Collector = &EnabledCollector{} + // Vec is a generic type to describe the vectorized version of another metric type, for example Vec[Counter] would be // our version of a prometheus.CounterVec. type Vec[T any] interface { - prometheus.Collector - WithMetadata + collectorWithMetadata // CurryWith returns a vector curried with the provided labels, i.e. the // returned vector has those labels pre-set for all labeled operations performed @@ -189,3 +270,99 @@ type Opts struct { // If true, the metric has to be explicitly enabled via config or flags Disabled bool } + +func (b Opts) GetConfigName() string { + if b.ConfigName == "" { + return prometheus.BuildFQName(b.Namespace, b.Subsystem, b.Name) + } + return b.ConfigName +} + +// Label represents a metric label with a pre-defined range of values. +// This is used with the NewxxxVecWithLabels metrics constructors to initialize +// vector metrics with known label value ranges, avoiding empty metrics. +type Label struct { + Name string + // If defined, only these values are allowed. + Values Values +} + +// Values is a distinct set of possible label values for a particular Label. +type Values map[string]struct{} + +// NewValues constructs a Values type from a set of strings. +func NewValues(vs ...string) Values { + vals := Values{} + for _, v := range vs { + vals[v] = struct{}{} + } + return vals +} + +// Labels is a slice of labels that represents a label set for a vector type +// metric. +type Labels []Label + +func (lbls Labels) labelNames() []string { + lns := make([]string, len(lbls)) + for i, label := range lbls { + lns[i] = label.Name + } + return lns +} + +type labelSet struct { + lbls Labels + m map[string]map[string]struct{} +} + +func (l *labelSet) namesToValues() map[string]map[string]struct{} { + if l.m != nil { + return l.m + } + l.m = make(map[string]map[string]struct{}) + for _, label := range l.lbls { + l.m[label.Name] = label.Values + } + return l.m +} + +func (l *labelSet) checkLabels(labels prometheus.Labels) error { + for name, value := range labels { + if lvs, ok := l.namesToValues()[name]; ok { + if _, ok := lvs[value]; !ok { + return fmt.Errorf("unexpected label vector value for label %q: value %q not defined in label range %v", + name, value, maps.Keys(lvs)) + } + } else { + return fmt.Errorf("invalid label name: %s", name) + } + } + return nil +} + +func (l *labelSet) checkLabelValues(lvs []string) error { + if len(l.lbls) != len(lvs) { + return fmt.Errorf("unexpected label vector length: expected %d, got %d", len(l.lbls), len(lvs)) + } + for i, label := range l.lbls { + if _, ok := label.Values[lvs[i]]; !ok { + return fmt.Errorf("unexpected label vector value for label %q: value %q not defined in label range %v", + label.Name, lvs[i], maps.Keys(label.Values)) + } + } + return nil +} + +// initLabels is a helper function to initialize the labels of a metric. +// It is used by xxxVecWithLabels metrics constructors to initialize the +// labels of the metric and the vector (i.e. registering all possible label value combinations). +func initLabels[T any](m *metric, labels Labels, vec Vec[T], disabled bool) { + if disabled { + return + } + m.labels = &labelSet{lbls: labels} + m.forEachLabelVector(func(vs []string) { + vec.WithLabelValues(vs...) + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go index e75db20edb..ac6a77f132 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go @@ -12,15 +12,15 @@ package metrics import ( "context" - "time" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" "github.com/sirupsen/logrus" - "github.com/cilium/cilium/api/v1/models" "github.com/cilium/cilium/pkg/metrics/metric" "github.com/cilium/cilium/pkg/promise" + "github.com/cilium/cilium/pkg/source" + "github.com/cilium/cilium/pkg/time" "github.com/cilium/cilium/pkg/version" ) @@ -53,6 +53,9 @@ const ( // SubsystemK8sClient is the subsystem to scope metrics related to the kubernetes client. SubsystemK8sClient = "k8s_client" + // SubsystemWorkQueue is the subsystem to scope metrics related to the workqueue. + SubsystemWorkQueue = "k8s_workqueue" + // SubsystemKVStore is the subsystem to scope metrics related to the kvstore. SubsystemKVStore = "kvstore" @@ -79,6 +82,9 @@ const ( // Cilium KVStoreMesh CiliumKVStoreMeshNamespace = "cilium_kvstoremesh" + // CiliumOperatorNamespace is used to scope metrics from the Cilium Operator + CiliumOperatorNamespace = "cilium_operator" + // LabelError indicates the type of error (string) LabelError = "error" @@ -90,12 +96,25 @@ const ( // Labels + // LabelValueFalse is the string value for true metric label values. + LabelValueTrue = "true" + + // LabelValueFalse is the string value for false metric label values. + LabelValueFalse = "false" + // LabelValueOutcomeSuccess is used as a successful outcome of an operation LabelValueOutcomeSuccess = "success" // LabelValueOutcomeFail is used as an unsuccessful outcome of an operation LabelValueOutcomeFail = "fail" + // LabelValueOutcomeFailure is used as an unsuccessful outcome of an operation. + // NOTE: This should only be used for existing metrics, new metrics should use LabelValueOutcomeFail. + LabelValueOutcomeFailure = "failure" + + // LabelDropReason is used to describe reason for dropping a packets/bytes + LabelDropReason = "reason" + // LabelEventSourceAPI marks event-related metrics that come from the API LabelEventSourceAPI = "api" @@ -136,6 +155,8 @@ const ( // LabelPolicySource is the label used to see the enforcement status LabelPolicySource = "source" + LabelSource = "source" + // LabelScope is the label used to defined multiples scopes in the same // metric. For example, one counter may measure a metric over the scope of // the entire event (scope=global), or just part of an event @@ -179,6 +200,8 @@ const ( // LabelMapName is the label for the BPF map name LabelMapName = "map_name" + LabelMapGroup = "map_group" + // LabelVersion is the label for the version number LabelVersion = "version" @@ -213,6 +236,12 @@ const ( LabelLocationRemoteIntraCluster = "remote_intra_cluster" LabelLocationRemoteInterCluster = "remote_inter_cluster" + // Rule label is a label for a L7 rule name. + LabelL7Rule = "rule" + + // LabelL7ProxyType is the label for denoting a L7 proxy type. + LabelL7ProxyType = "proxy_type" + // LabelType is the label for type in general (e.g. endpoint, node) LabelType = "type" LabelPeerEndpoint = "endpoint" @@ -224,9 +253,18 @@ const ( LabelAddressType = "address_type" LabelAddressTypePrimary = "primary" LabelAddressTypeSecondary = "secondary" + + // LabelConnectivityStatus is the label for connectivity statuses + LabelConnectivityStatus = "status" + LabelReachable = "reachable" + LabelUnreachable = "unreachable" + LabelUnknown = "unknown" ) var ( + // LabelValuesBool is metric label value set for boolean type. + LabelValuesBool = metric.NewValues(LabelValueTrue, LabelValueFalse) + // Namespace is used to scope metrics from cilium. It is prepended to metric // names and separated with a '_' Namespace = CiliumAgentNamespace @@ -246,11 +284,19 @@ var ( // NodeConnectivityStatus is the connectivity status between local node to // other node intra or inter cluster. - NodeConnectivityStatus = NoOpGaugeVec + NodeConnectivityStatus = NoOpGaugeDeletableVec // NodeConnectivityLatency is the connectivity latency between local node to // other node intra or inter cluster. - NodeConnectivityLatency = NoOpGaugeVec + NodeConnectivityLatency = NoOpGaugeDeletableVec + + // NodeHealthConnectivityStatus is the number of connections with connectivity status + // between local node to other node intra or inter cluster. + NodeHealthConnectivityStatus = NoOpGaugeVec + + // NodeHealthConnectivityLatency is the histogram connectivity latency between local node to + // other node intra or inter cluster. + NodeHealthConnectivityLatency = NoOpObserverVec // Endpoint @@ -258,6 +304,9 @@ var ( // It must be thread-safe. Endpoint metric.GaugeFunc + // EndpointMaxIfindex is the maximum observed interface index for existing endpoints + EndpointMaxIfindex = NoOpGauge + // EndpointRegenerationTotal is a count of the number of times any endpoint // has been regenerated and success/fail outcome EndpointRegenerationTotal = NoOpCounterVec @@ -280,19 +329,16 @@ var ( // PolicyRegenerationCount is the total number of successful policy // regenerations. + // Deprecated: Use EndpointRegenerationTotal. PolicyRegenerationCount = NoOpCounter - // PolicyRegenerationTimeStats is the total time taken to generate policies + // PolicyRegenerationTimeStats is the total time taken to generate policies. + // Deprecated: Use EndpointRegenerationTimeStats. PolicyRegenerationTimeStats = NoOpObserverVec // PolicyRevision is the current policy revision number for this agent PolicyRevision = NoOpGauge - // PolicyImportErrorsTotal is a count of failed policy imports. - // This metric was deprecated in Cilium 1.14 and is to be removed in 1.15. - // It is replaced by PolicyChangeTotal metric. - PolicyImportErrorsTotal = NoOpCounter - // PolicyChangeTotal is a count of policy changes by outcome ("success" or // "failure") PolicyChangeTotal = NoOpCounterVec @@ -309,26 +355,25 @@ var ( // CIDRGroup - // CIDRGroupTranslationTimeStats is the time taken to translate the policy field `FromCIDRGroupRef` - // after the referenced CIDRGroups have been updated or deleted. - CIDRGroupTranslationTimeStats = NoOpHistogram - - // CIDRGroupPolicies is the number of CNPs and CCNPs referencing at least one CiliumCIDRGroup. - // CNPs with empty or non-existing CIDRGroupRefs are not considered - CIDRGroupPolicies = NoOpGauge + // CIDRGroupsReferenced is the number of CNPs and CCNPs referencing at least one CiliumCIDRGroup. + // CNPs with empty or non-existing CIDRGroupRefs are not considered. + CIDRGroupsReferenced = NoOpGauge // Identity // Identity is the number of identities currently in use on the node by type Identity = NoOpGaugeVec - // Events + // IdentityLabelSources is the number of identities in use on the node with + // have a particular label source. Note that an identity may contain labels + // from multiple sources and thus might be counted in multiple buckets + IdentityLabelSources = NoOpGaugeVec - // EventTS*is the time in seconds since epoch that we last received an - // event that we will handle - // source is one of k8s, docker or apia + // Events - // EventTS is the timestamp of k8s resource events. + // EventTS is the time in seconds since epoch that we last received an + // event that was handled by Cilium. This metric tracks the source of the + // event which can be one of K8s or Cilium's API. EventTS = NoOpGaugeVec // EventLagK8s is the lag calculation for k8s Pod events. @@ -342,22 +387,6 @@ var ( // ProxyPolicyL7Total is a count of all l7 requests handled by proxy ProxyPolicyL7Total = NoOpCounterVec - // ProxyParseErrors is a count of failed parse errors on proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyParseErrors = NoOpCounter - - // ProxyForwarded is a count of all forwarded requests by proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyForwarded = NoOpCounter - - // ProxyDenied is a count of all denied requests by policy by the proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyDenied = NoOpCounter - - // ProxyReceived is a count of all received requests by the proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyReceived = NoOpCounter - // ProxyUpstreamTime is how long the upstream server took to reply labeled // by error, protocol and span time ProxyUpstreamTime = NoOpObserverVec @@ -368,22 +397,6 @@ var ( // L3-L4 statistics - // DropCount is the total drop requests, - // tagged by drop reason and direction(ingress/egress) - DropCount = NoOpCounterVec - - // DropBytes is the total dropped bytes, - // tagged by drop reason and direction(ingress/egress) - DropBytes = NoOpCounterVec - - // ForwardCount is the total forwarded packets, - // tagged by ingress/egress direction - ForwardCount = NoOpCounterVec - - // ForwardBytes is the total forwarded bytes, - // tagged by ingress/egress direction - ForwardBytes = NoOpCounterVec - // Datapath statistics // ConntrackGCRuns is the number of times that the conntrack GC @@ -415,6 +428,10 @@ var ( // ServicesEventsCount counts the number of services ServicesEventsCount = NoOpCounterVec + // ServiceImplementationDelay the execution duration of the service handler in milliseconds. + // The metric reflects the time it took to program the service excluding the event queue latency. + ServiceImplementationDelay = NoOpObserverVec + // Errors and warnings // ErrorsWarnings is the number of errors and warnings in cilium-agent instances @@ -461,9 +478,14 @@ var ( // IPAM events - // IpamEvent is the number of IPAM events received labeled by action and + // IPAMEvent is the number of IPAM events received labeled by action and // datapath family type - IpamEvent = NoOpCounterVec + IPAMEvent = NoOpCounterVec + + // IPAMCapacity tracks the total number of IPs that could be allocated. To + // get the current number of available IPs, it would be this metric + // subtracted by IPAMEvent{allocated}. + IPAMCapacity = NoOpGaugeVec // KVstore events @@ -477,14 +499,6 @@ var ( // KVStoreQuorumErrors records the number of kvstore quorum errors KVStoreQuorumErrors = NoOpCounterVec - // KVStoreSyncQueueSize records the number of elements queued for - // synchronization in the kvstore. - KVStoreSyncQueueSize = NoOpGaugeVec - - // KVStoreInitialSyncCompleted records whether the initial synchronization - // from/to the kvstore has completed. - KVStoreInitialSyncCompleted = NoOpGaugeVec - // FQDNGarbageCollectorCleanedTotal is the number of domains cleaned by the // GC job. FQDNGarbageCollectorCleanedTotal = NoOpCounter @@ -503,6 +517,9 @@ var ( // connection (aka zombie), per endpoint. FQDNAliveZombieConnections = NoOpGaugeVec + // FQDNSelectors is the total number of registered ToFQDN selectors + FQDNSelectors = NoOpGauge + // FQDNSemaphoreRejectedTotal is the total number of DNS requests rejected // by the DNS proxy because too many requests were in flight, as enforced by // the admission semaphore. @@ -523,17 +540,8 @@ var ( // bpf map. BPFMapOps = NoOpCounterVec - // TriggerPolicyUpdateTotal is the metric to count total number of - // policy update triggers - TriggerPolicyUpdateTotal = NoOpCounterVec - - // TriggerPolicyUpdateFolds is the current level folding that is - // happening when running policy update triggers - TriggerPolicyUpdateFolds = NoOpGauge - - // TriggerPolicyUpdateCallDuration measures the latency and call - // duration of policy update triggers - TriggerPolicyUpdateCallDuration = NoOpObserverVec + // BPFMapCapacity is the max capacity of bpf maps, labelled by map group classification. + BPFMapCapacity = NoOpGaugeVec // VersionMetric labelled by Cilium version VersionMetric = NoOpGaugeVec @@ -565,14 +573,91 @@ var ( // APILimiterProcessedRequests is the counter of the number of // processed (successful and failed) requests APILimiterProcessedRequests = NoOpCounterVec + + // WorkQueueDepth is the depth of the workqueue + // + // We set actual metrics here instead of NoOp for the workqueue metrics + // because these metrics will be registered with workqueue.SetProvider + // by init function in watcher.go. Otherwise, we will register NoOps. + // + WorkQueueDepth = metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_depth", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "depth", + Help: "Current depth of workqueue.", + }, []string{"name"}) + + // WorkQueueAddsTotal is the total number of adds to the workqueue + WorkQueueAddsTotal = metric.NewCounterVec(metric.CounterOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_adds_total", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "adds_total", + Help: "Total number of adds handled by workqueue.", + }, []string{"name"}) + + // WorkQueueLatency is the latency of how long an item stays in the workqueue + WorkQueueLatency = metric.NewHistogramVec(metric.HistogramOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_queue_duration_seconds", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "queue_duration_seconds", + Help: "How long in seconds an item stays in workqueue before being requested.", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + // WorkQueueDuration is the duration of how long processing an item for the workqueue + WorkQueueDuration = metric.NewHistogramVec(metric.HistogramOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_work_duration_seconds", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "work_duration_seconds", + Help: "How long in seconds processing an item from workqueue takes.", + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }, []string{"name"}) + + // WorkQueueUnfinishedWork is how many seconds of work has been done that is in progress + WorkQueueUnfinishedWork = metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_unfinished_work_seconds", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "unfinished_work_seconds", + Help: "How many seconds of work has been done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }, []string{"name"}) + + // WorkQueueLongestRunningProcessor is the longest running processor in the workqueue + WorkQueueLongestRunningProcessor = metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_longest_running_processor_seconds", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "longest_running_processor_seconds", + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + }, []string{"name"}) + + // WorkQueueRetries is the number of retries for handled by the workqueue + WorkQueueRetries = metric.NewCounterVec(metric.CounterOpts{ + ConfigName: Namespace + "_" + SubsystemWorkQueue + "_retries_total", + Namespace: Namespace, + Subsystem: SubsystemWorkQueue, + Name: "retries_total", + Help: "Total number of retries handled by workqueue.", + }, []string{"name"}) ) type LegacyMetrics struct { BootstrapTimes metric.Vec[metric.Observer] APIInteractions metric.Vec[metric.Observer] - NodeConnectivityStatus metric.Vec[metric.Gauge] - NodeConnectivityLatency metric.Vec[metric.Gauge] + NodeConnectivityStatus metric.DeletableVec[metric.Gauge] + NodeConnectivityLatency metric.DeletableVec[metric.Gauge] + NodeHealthConnectivityStatus metric.Vec[metric.Gauge] + NodeHealthConnectivityLatency metric.Vec[metric.Observer] Endpoint metric.GaugeFunc + EndpointMaxIfindex metric.Gauge EndpointRegenerationTotal metric.Vec[metric.Counter] EndpointStateCount metric.Vec[metric.Gauge] EndpointRegenerationTimeStats metric.Vec[metric.Observer] @@ -581,27 +666,18 @@ type LegacyMetrics struct { PolicyRegenerationCount metric.Counter PolicyRegenerationTimeStats metric.Vec[metric.Observer] PolicyRevision metric.Gauge - PolicyImportErrorsTotal metric.Counter PolicyChangeTotal metric.Vec[metric.Counter] PolicyEndpointStatus metric.Vec[metric.Gauge] PolicyImplementationDelay metric.Vec[metric.Observer] - CIDRGroupTranslationTimeStats metric.Histogram - CIDRGroupPolicies metric.Gauge + CIDRGroupsReferenced metric.Gauge Identity metric.Vec[metric.Gauge] + IdentityLabelSources metric.Vec[metric.Gauge] EventTS metric.Vec[metric.Gauge] EventLagK8s metric.Gauge ProxyRedirects metric.Vec[metric.Gauge] ProxyPolicyL7Total metric.Vec[metric.Counter] - ProxyParseErrors metric.Counter - ProxyForwarded metric.Counter - ProxyDenied metric.Counter - ProxyReceived metric.Counter ProxyUpstreamTime metric.Vec[metric.Observer] ProxyDatapathUpdateTimeout metric.Counter - DropCount metric.Vec[metric.Counter] - DropBytes metric.Vec[metric.Counter] - ForwardCount metric.Vec[metric.Counter] - ForwardBytes metric.Vec[metric.Counter] ConntrackGCRuns metric.Vec[metric.Counter] ConntrackGCKeyFallbacks metric.Vec[metric.Counter] ConntrackGCSize metric.Vec[metric.Gauge] @@ -610,6 +686,7 @@ type LegacyMetrics struct { ConntrackDumpResets metric.Vec[metric.Counter] SignalsHandled metric.Vec[metric.Counter] ServicesEventsCount metric.Vec[metric.Counter] + ServiceImplementationDelay metric.Vec[metric.Observer] ErrorsWarnings metric.Vec[metric.Counter] ControllerRuns metric.Vec[metric.Counter] ControllerRunsDuration metric.Vec[metric.Observer] @@ -621,24 +698,22 @@ type LegacyMetrics struct { KubernetesAPICallsTotal metric.Vec[metric.Counter] KubernetesCNPStatusCompletion metric.Vec[metric.Observer] TerminatingEndpointsEvents metric.Counter - IpamEvent metric.Vec[metric.Counter] + IPAMEvent metric.Vec[metric.Counter] + IPAMCapacity metric.Vec[metric.Gauge] KVStoreOperationsDuration metric.Vec[metric.Observer] KVStoreEventsQueueDuration metric.Vec[metric.Observer] KVStoreQuorumErrors metric.Vec[metric.Counter] - KVStoreSyncQueueSize metric.Vec[metric.Gauge] - KVStoreInitialSyncCompleted metric.Vec[metric.Gauge] FQDNGarbageCollectorCleanedTotal metric.Counter FQDNActiveNames metric.Vec[metric.Gauge] FQDNActiveIPs metric.Vec[metric.Gauge] FQDNAliveZombieConnections metric.Vec[metric.Gauge] + FQDNSelectors metric.Gauge FQDNSemaphoreRejectedTotal metric.Counter IPCacheErrorsTotal metric.Vec[metric.Counter] IPCacheEventsTotal metric.Vec[metric.Counter] BPFSyscallDuration metric.Vec[metric.Observer] BPFMapOps metric.Vec[metric.Counter] - TriggerPolicyUpdateTotal metric.Vec[metric.Counter] - TriggerPolicyUpdateFolds metric.Gauge - TriggerPolicyUpdateCallDuration metric.Vec[metric.Observer] + BPFMapCapacity metric.Vec[metric.Gauge] VersionMetric metric.Vec[metric.Gauge] APILimiterWaitHistoryDuration metric.Vec[metric.Observer] APILimiterWaitDuration metric.Vec[metric.Gauge] @@ -647,6 +722,13 @@ type LegacyMetrics struct { APILimiterRateLimit metric.Vec[metric.Gauge] APILimiterAdjustmentFactor metric.Vec[metric.Gauge] APILimiterProcessedRequests metric.Vec[metric.Counter] + WorkQueueDepth metric.Vec[metric.Gauge] + WorkQueueAddsTotal metric.Vec[metric.Counter] + WorkQueueLatency metric.Vec[metric.Observer] + WorkQueueDuration metric.Vec[metric.Observer] + WorkQueueUnfinishedWork metric.Vec[metric.Gauge] + WorkQueueLongestRunningProcessor metric.Vec[metric.Gauge] + WorkQueueRetries metric.Vec[metric.Counter] } func NewLegacyMetrics() *LegacyMetrics { @@ -668,13 +750,18 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Duration of processed API calls labeled by path, method and return code.", }, []string{LabelPath, LabelMethod, LabelAPIReturnCode}), - EndpointRegenerationTotal: metric.NewCounterVec(metric.CounterOpts{ + EndpointRegenerationTotal: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_endpoint_regenerations_total", Namespace: Namespace, Name: "endpoint_regenerations_total", Help: "Count of all endpoint regenerations that have completed, tagged by outcome", - }, []string{"outcome"}), + }, metric.Labels{ + { + Name: LabelOutcome, + Values: metric.NewValues(LabelValueOutcomeSuccess, LabelValueOutcomeFailure), + }, + }), EndpointStateCount: metric.NewGaugeVec(metric.GaugeOpts{ ConfigName: Namespace + "_endpoint_state", @@ -721,20 +808,18 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Highest policy revision number in the agent", }), - PolicyImportErrorsTotal: metric.NewCounter(metric.CounterOpts{ - ConfigName: Namespace + "_policy_import_errors_total", - Namespace: Namespace, - Name: "policy_import_errors_total", - Help: "Number of times a policy import has failed", - }), - - PolicyChangeTotal: metric.NewCounterVec(metric.CounterOpts{ + PolicyChangeTotal: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_policy_change_total", Namespace: Namespace, Name: "policy_change_total", Help: "Number of policy changes by outcome", - }, []string{"outcome"}), + }, metric.Labels{ + { + Name: LabelOutcome, + Values: metric.NewValues(LabelValueOutcomeSuccess, LabelValueOutcomeFailure), + }, + }), PolicyEndpointStatus: metric.NewGaugeVec(metric.GaugeOpts{ ConfigName: Namespace + "_policy_endpoint_enforcement_status", @@ -744,29 +829,25 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of endpoints labeled by policy enforcement status", }, []string{LabelPolicyEnforcement}), - PolicyImplementationDelay: metric.NewHistogramVec(metric.HistogramOpts{ + PolicyImplementationDelay: metric.NewHistogramVecWithLabels(metric.HistogramOpts{ ConfigName: Namespace + "_policy_implementation_delay", Namespace: Namespace, Name: "policy_implementation_delay", Help: "Time between a policy change and it being fully deployed into the datapath", - }, []string{LabelPolicySource}), - - CIDRGroupTranslationTimeStats: metric.NewHistogram(metric.HistogramOpts{ - ConfigName: Namespace + "_cidrgroup_translation_time_stats_seconds", - Disabled: true, - - Namespace: Namespace, - Name: "cidrgroup_translation_time_stats_seconds", - Help: "CIDRGroup translation time stats", + }, metric.Labels{ + { + Name: LabelPolicySource, + Values: metric.NewValues(string(source.Kubernetes), string(source.CustomResource), string(source.LocalAPI)), + }, }), - CIDRGroupPolicies: metric.NewGauge(metric.GaugeOpts{ - ConfigName: Namespace + "_cidrgroup_policies", + CIDRGroupsReferenced: metric.NewGauge(metric.GaugeOpts{ + ConfigName: Namespace + "cidrgroups_referenced", Namespace: Namespace, - Name: "cidrgroup_policies", - Help: "Number of CNPs and CCNPs referencing at least one CiliumCIDRGroup", + Name: "cidrgroups_referenced", + Help: "Number of CNPs and CCNPs referencing at least one CiliumCIDRGroup. CNPs with empty or non-existing CIDRGroupRefs are not considered", }), Identity: metric.NewGaugeVec(metric.GaugeOpts{ @@ -777,11 +858,19 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of identities currently allocated", }, []string{LabelType}), + IdentityLabelSources: metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_identity_label_sources", + + Namespace: Namespace, + Name: "identity_label_sources", + Help: "Number of identities which contain at least one label of the given label source", + }, []string{LabelSource}), + EventTS: metric.NewGaugeVec(metric.GaugeOpts{ ConfigName: Namespace + "_event_ts", Namespace: Namespace, Name: "event_ts", - Help: "Last timestamp when we received an event", + Help: "Last timestamp when Cilium received an event from a control plane source, per resource and per action", }, []string{LabelEventSource, LabelScope, LabelAction}), EventLagK8s: metric.NewGauge(metric.GaugeOpts{ @@ -801,41 +890,20 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of redirects installed for endpoints, labeled by protocol", }, []string{LabelProtocolL7}), - ProxyPolicyL7Total: metric.NewCounterVec(metric.CounterOpts{ + ProxyPolicyL7Total: metric.NewCounterVecWithLabels(metric.CounterOpts{ ConfigName: Namespace + "_policy_l7_total", - - Namespace: Namespace, - Name: "policy_l7_total", - Help: "Number of total proxy requests handled", - }, []string{"rule"}), - - ProxyParseErrors: metric.NewCounter(metric.CounterOpts{ - ConfigName: Namespace + "_policy_l7_parse_errors_total", Namespace: Namespace, - Name: "policy_l7_parse_errors_total", - Help: "Number of total L7 parse errors", - }), - - ProxyForwarded: metric.NewCounter(metric.CounterOpts{ - ConfigName: Namespace + "_policy_l7_forwarded_total", - Namespace: Namespace, - Name: "policy_l7_forwarded_total", - Help: "Number of total L7 forwarded requests/responses", - }), - - ProxyDenied: metric.NewCounter(metric.CounterOpts{ - ConfigName: Namespace + "_policy_l7_denied_total", - Namespace: Namespace, - Name: "policy_l7_denied_total", - Help: "Number of total L7 denied requests/responses due to policy", - }), - - ProxyReceived: metric.NewCounter(metric.CounterOpts{ - ConfigName: Namespace + "_policy_l7_received_total", - - Namespace: Namespace, - Name: "policy_l7_received_total", - Help: "Number of total L7 received requests/responses", + Name: "policy_l7_total", + Help: "Number of total proxy requests handled", + }, metric.Labels{ + { + Name: LabelL7Rule, + Values: metric.NewValues("received", "forwarded", "denied", "parse_errors"), + }, + { + Name: LabelL7ProxyType, + Values: metric.NewValues("fqdn", "envoy"), + }, }), ProxyUpstreamTime: metric.NewHistogramVec(metric.HistogramOpts{ @@ -854,38 +922,6 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of total datapath update timeouts due to FQDN IP updates", }), - DropCount: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_drop_count_total", - Namespace: Namespace, - Name: "drop_count_total", - Help: "Total dropped packets, tagged by drop reason and ingress/egress direction", - }, - []string{"reason", LabelDirection}), - - DropBytes: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_drop_bytes_total", - Namespace: Namespace, - Name: "drop_bytes_total", - Help: "Total dropped bytes, tagged by drop reason and ingress/egress direction", - }, - []string{"reason", LabelDirection}), - - ForwardCount: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_forward_count_total", - Namespace: Namespace, - Name: "forward_count_total", - Help: "Total forwarded packets, tagged by ingress/egress direction", - }, - []string{LabelDirection}), - - ForwardBytes: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_forward_bytes_total", - Namespace: Namespace, - Name: "forward_bytes_total", - Help: "Total forwarded bytes, tagged by ingress/egress direction", - }, - []string{LabelDirection}), - ConntrackGCRuns: metric.NewCounterVec(metric.CounterOpts{ ConfigName: Namespace + "_" + SubsystemDatapath + "_conntrack_gc_runs_total", Namespace: Namespace, @@ -956,12 +992,15 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of services events labeled by action type", }, []string{LabelAction}), - ErrorsWarnings: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_errors_warnings_total", + ServiceImplementationDelay: metric.NewHistogramVec(metric.HistogramOpts{ + ConfigName: Namespace + "_service_implementation_delay", Namespace: Namespace, - Name: "errors_warnings_total", - Help: "Number of total errors in cilium-agent instances", - }, []string{"level", "subsystem"}), + Name: "service_implementation_delay", + Help: "Duration in seconds to propagate the data plane programming of a service, its network and endpoints " + + "from the time the service or the service pod was changed excluding the event queue latency", + }, []string{LabelAction}), + + ErrorsWarnings: newErrorsWarningsMetric(), ControllerRuns: metric.NewCounterVec(metric.CounterOpts{ ConfigName: Namespace + "_controllers_runs_total", @@ -1039,13 +1078,20 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of terminating endpoint events received from Kubernetes", }), - IpamEvent: metric.NewCounterVec(metric.CounterOpts{ + IPAMEvent: metric.NewCounterVec(metric.CounterOpts{ ConfigName: Namespace + "_ipam_events_total", Namespace: Namespace, Name: "ipam_events_total", Help: "Number of IPAM events received labeled by action and datapath family type", }, []string{LabelAction, LabelDatapathFamily}), + IPAMCapacity: metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_ipam_capacity", + Namespace: Namespace, + Name: "ipam_capacity", + Help: "Total number of IPs in the IPAM pool labeled by family", + }, []string{LabelDatapathFamily}), + KVStoreOperationsDuration: metric.NewHistogramVec(metric.HistogramOpts{ ConfigName: Namespace + "_" + SubsystemKVStore + "_operations_duration_seconds", Namespace: Namespace, @@ -1071,22 +1117,6 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of quorum errors", }, []string{LabelError}), - KVStoreSyncQueueSize: metric.NewGaugeVec(metric.GaugeOpts{ - ConfigName: Namespace + "_" + SubsystemKVStore + "_sync_queue_size", - Namespace: Namespace, - Subsystem: SubsystemKVStore, - Name: "sync_queue_size", - Help: "Number of elements queued for synchronization in the kvstore", - }, []string{LabelScope, LabelSourceCluster}), - - KVStoreInitialSyncCompleted: metric.NewGaugeVec(metric.GaugeOpts{ - ConfigName: Namespace + "_" + SubsystemKVStore + "_initial_sync_completed", - Namespace: Namespace, - Subsystem: SubsystemKVStore, - Name: "initial_sync_completed", - Help: "Whether the initial synchronization from/to the kvstore has completed", - }, []string{LabelScope, LabelSourceCluster, LabelAction}), - IPCacheErrorsTotal: metric.NewCounterVec(metric.CounterOpts{ ConfigName: Namespace + "_" + SubsystemIPCache + "_errors_total", Namespace: Namespace, @@ -1139,6 +1169,14 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Number of IPs associated with domains that have expired (by TTL) yet still associated with an active connection (aka zombie), per endpoint", }, []string{LabelPeerEndpoint}), + FQDNSelectors: metric.NewGauge(metric.GaugeOpts{ + ConfigName: Namespace + "_" + SubsystemFQDN + "_selectors", + Namespace: Namespace, + Subsystem: SubsystemFQDN, + Name: "selectors", + Help: "Number of registered ToFQDN selectors", + }), + FQDNSemaphoreRejectedTotal: metric.NewCounter(metric.CounterOpts{ ConfigName: Namespace + "_" + SubsystemFQDN + "_semaphore_rejected_total", Disabled: true, @@ -1165,29 +1203,13 @@ func NewLegacyMetrics() *LegacyMetrics { Help: "Total operations on map, tagged by map name", }, []string{LabelMapName, LabelOperation, LabelOutcome}), - TriggerPolicyUpdateTotal: metric.NewCounterVec(metric.CounterOpts{ - ConfigName: Namespace + "_" + SubsystemTriggers + "_policy_update_total", + BPFMapCapacity: metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_" + SubsystemBPF + "_map_capacity", Namespace: Namespace, - Subsystem: SubsystemTriggers, - Name: "policy_update_total", - Help: "Total number of policy update trigger invocations labeled by reason", - }, []string{"reason"}), - - TriggerPolicyUpdateFolds: metric.NewGauge(metric.GaugeOpts{ - ConfigName: Namespace + "_" + SubsystemTriggers + "_policy_update_folds", - Namespace: Namespace, - Subsystem: SubsystemTriggers, - Name: "policy_update_folds", - Help: "Current number of folds", - }), - - TriggerPolicyUpdateCallDuration: metric.NewHistogramVec(metric.HistogramOpts{ - ConfigName: Namespace + "_" + SubsystemTriggers + "_policy_update_call_duration_seconds", - Namespace: Namespace, - Subsystem: SubsystemTriggers, - Name: "policy_update_call_duration_seconds", - Help: "Duration of policy update trigger", - }, []string{LabelType}), + Subsystem: SubsystemBPF, + Name: "map_capacity", + Help: "Capacity of map, tagged by map group. All maps with a capacity of 65536 are grouped under 'default'", + }, []string{LabelMapGroup}), VersionMetric: metric.NewGaugeVec(metric.GaugeOpts{ ConfigName: Namespace + "_version", @@ -1251,7 +1273,7 @@ func NewLegacyMetrics() *LegacyMetrics { Subsystem: SubsystemAPILimiter, Name: "processed_requests_total", Help: "Total number of API requests processed", - }, []string{"api_call", LabelOutcome}), + }, []string{"api_call", LabelOutcome, LabelAPIReturnCode}), EndpointPropagationDelay: metric.NewHistogramVec(metric.HistogramOpts{ ConfigName: Namespace + "_endpoint_propagation_delay_seconds", @@ -1291,16 +1313,63 @@ func NewLegacyMetrics() *LegacyMetrics { LabelProtocol, LabelAddressType, }), + + NodeHealthConnectivityStatus: metric.NewGaugeVec(metric.GaugeOpts{ + ConfigName: Namespace + "_node_health_connectivity_status", + Namespace: Namespace, + Name: "node_health_connectivity_status", + Help: "The number of endpoints with last observed status of both ICMP and HTTP connectivity between the current Cilium agent and other Cilium nodes", + }, []string{ + LabelSourceCluster, + LabelSourceNodeName, + LabelType, + LabelConnectivityStatus, + }), + + NodeHealthConnectivityLatency: metric.NewHistogramVec(metric.HistogramOpts{ + ConfigName: Namespace + "_node_health_connectivity_latency_seconds", + Namespace: Namespace, + Name: "node_health_connectivity_latency_seconds", + Help: "The histogram for last observed latency between the current Cilium agent and other Cilium nodes in seconds", + Buckets: []float64{0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0}, + }, []string{ + LabelSourceCluster, + LabelSourceNodeName, + LabelType, + LabelProtocol, + LabelAddressType, + }), + + WorkQueueDepth: WorkQueueDepth, + WorkQueueAddsTotal: WorkQueueAddsTotal, + WorkQueueLatency: WorkQueueLatency, + WorkQueueDuration: WorkQueueDuration, + WorkQueueUnfinishedWork: WorkQueueUnfinishedWork, + WorkQueueLongestRunningProcessor: WorkQueueLongestRunningProcessor, + WorkQueueRetries: WorkQueueRetries, + } + + ifindexOpts := metric.GaugeOpts{ + ConfigName: Namespace + "_endpoint_max_ifindex", + Disabled: !enableIfIndexMetric(), + Namespace: Namespace, + Name: "endpoint_max_ifindex", + Help: "Maximum interface index observed for existing endpoints", } + lm.EndpointMaxIfindex = metric.NewGauge(ifindexOpts) v := version.GetCiliumVersion() lm.VersionMetric.WithLabelValues(v.Version, v.Revision, v.Arch) + lm.BPFMapCapacity.WithLabelValues("default").Set(DefaultMapCapacity) BootstrapTimes = lm.BootstrapTimes APIInteractions = lm.APIInteractions NodeConnectivityStatus = lm.NodeConnectivityStatus NodeConnectivityLatency = lm.NodeConnectivityLatency + NodeHealthConnectivityStatus = lm.NodeHealthConnectivityStatus + NodeHealthConnectivityLatency = lm.NodeHealthConnectivityLatency Endpoint = lm.Endpoint + EndpointMaxIfindex = lm.EndpointMaxIfindex EndpointRegenerationTotal = lm.EndpointRegenerationTotal EndpointStateCount = lm.EndpointStateCount EndpointRegenerationTimeStats = lm.EndpointRegenerationTimeStats @@ -1309,27 +1378,18 @@ func NewLegacyMetrics() *LegacyMetrics { PolicyRegenerationCount = lm.PolicyRegenerationCount PolicyRegenerationTimeStats = lm.PolicyRegenerationTimeStats PolicyRevision = lm.PolicyRevision - PolicyImportErrorsTotal = lm.PolicyImportErrorsTotal PolicyChangeTotal = lm.PolicyChangeTotal PolicyEndpointStatus = lm.PolicyEndpointStatus PolicyImplementationDelay = lm.PolicyImplementationDelay - CIDRGroupTranslationTimeStats = lm.CIDRGroupTranslationTimeStats - CIDRGroupPolicies = lm.CIDRGroupPolicies + CIDRGroupsReferenced = lm.CIDRGroupsReferenced Identity = lm.Identity + IdentityLabelSources = lm.IdentityLabelSources EventTS = lm.EventTS EventLagK8s = lm.EventLagK8s ProxyRedirects = lm.ProxyRedirects ProxyPolicyL7Total = lm.ProxyPolicyL7Total - ProxyParseErrors = lm.ProxyParseErrors - ProxyForwarded = lm.ProxyForwarded - ProxyDenied = lm.ProxyDenied - ProxyReceived = lm.ProxyReceived ProxyUpstreamTime = lm.ProxyUpstreamTime ProxyDatapathUpdateTimeout = lm.ProxyDatapathUpdateTimeout - DropCount = lm.DropCount - DropBytes = lm.DropBytes - ForwardCount = lm.ForwardCount - ForwardBytes = lm.ForwardBytes ConntrackGCRuns = lm.ConntrackGCRuns ConntrackGCKeyFallbacks = lm.ConntrackGCKeyFallbacks ConntrackGCSize = lm.ConntrackGCSize @@ -1338,6 +1398,7 @@ func NewLegacyMetrics() *LegacyMetrics { ConntrackDumpResets = lm.ConntrackDumpResets SignalsHandled = lm.SignalsHandled ServicesEventsCount = lm.ServicesEventsCount + ServiceImplementationDelay = lm.ServiceImplementationDelay ErrorsWarnings = lm.ErrorsWarnings ControllerRuns = lm.ControllerRuns ControllerRunsDuration = lm.ControllerRunsDuration @@ -1349,24 +1410,22 @@ func NewLegacyMetrics() *LegacyMetrics { KubernetesAPICallsTotal = lm.KubernetesAPICallsTotal KubernetesCNPStatusCompletion = lm.KubernetesCNPStatusCompletion TerminatingEndpointsEvents = lm.TerminatingEndpointsEvents - IpamEvent = lm.IpamEvent + IPAMEvent = lm.IPAMEvent + IPAMCapacity = lm.IPAMCapacity KVStoreOperationsDuration = lm.KVStoreOperationsDuration KVStoreEventsQueueDuration = lm.KVStoreEventsQueueDuration KVStoreQuorumErrors = lm.KVStoreQuorumErrors - KVStoreSyncQueueSize = lm.KVStoreSyncQueueSize - KVStoreInitialSyncCompleted = lm.KVStoreInitialSyncCompleted FQDNGarbageCollectorCleanedTotal = lm.FQDNGarbageCollectorCleanedTotal FQDNActiveNames = lm.FQDNActiveNames FQDNActiveIPs = lm.FQDNActiveIPs FQDNAliveZombieConnections = lm.FQDNAliveZombieConnections + FQDNSelectors = lm.FQDNSelectors FQDNSemaphoreRejectedTotal = lm.FQDNSemaphoreRejectedTotal IPCacheErrorsTotal = lm.IPCacheErrorsTotal IPCacheEventsTotal = lm.IPCacheEventsTotal BPFSyscallDuration = lm.BPFSyscallDuration BPFMapOps = lm.BPFMapOps - TriggerPolicyUpdateTotal = lm.TriggerPolicyUpdateTotal - TriggerPolicyUpdateFolds = lm.TriggerPolicyUpdateFolds - TriggerPolicyUpdateCallDuration = lm.TriggerPolicyUpdateCallDuration + BPFMapCapacity = lm.BPFMapCapacity VersionMetric = lm.VersionMetric APILimiterWaitHistoryDuration = lm.APILimiterWaitHistoryDuration APILimiterWaitDuration = lm.APILimiterWaitDuration @@ -1379,6 +1438,20 @@ func NewLegacyMetrics() *LegacyMetrics { return lm } +// InitOperatorMetrics is used to init legacy metrics necessary during operator init. +func InitOperatorMetrics() { + ErrorsWarnings = newErrorsWarningsMetric() +} + +func newErrorsWarningsMetric() metric.Vec[metric.Counter] { + return metric.NewCounterVec(metric.CounterOpts{ + ConfigName: Namespace + "_errors_warnings_total", + Namespace: Namespace, + Name: "errors_warnings_total", + Help: "Number of total errors in cilium-agent instances", + }, []string{"level", "subsystem"}) +} + // GaugeWithThreshold is a prometheus gauge that registers itself with // prometheus if over a threshold value and unregisters when under. type GaugeWithThreshold struct { @@ -1442,22 +1515,15 @@ func Reinitialize() { } } -// MustRegister adds the collector to the registry, exposing this metric to -// prometheus scrapes. -// It will panic on error. -func MustRegister(c ...prometheus.Collector) { - withRegistry(func(reg *Registry) { - reg.MustRegister(c...) - }) -} - // Register registers a collector func Register(c prometheus.Collector) error { + var err error + withRegistry(func(reg *Registry) { - reg.Register(c) + err = reg.Register(c) }) - return nil + return err } // RegisterList registers a list of collectors. If registration of one @@ -1482,19 +1548,6 @@ func Unregister(c prometheus.Collector) bool { return false } -// DumpMetrics gets the current Cilium metrics and dumps all into a -// models.Metrics structure.If metrics cannot be retrieved, returns an error -func DumpMetrics() ([]*models.Metric, error) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - reg, err := registry.Await(ctx) - if err == nil { - return reg.DumpMetrics() - } - - return nil, nil -} - // withRegistry waits up to 1 second for the registry promise to resolve, if it does not then // we might be calling this function before hive has been started, so to avoid a deadlock, // wait in a routine so actions are deferred until the registry is initialized. @@ -1547,9 +1600,30 @@ func Error2Outcome(err error) string { return LabelValueOutcomeSuccess } +// LabelOutcome2Code converts a label outcome to a code +func LabelOutcome2Code(outcome string) int { + if outcome == LabelValueOutcomeSuccess { + return 200 + } + return 500 +} + func BoolToFloat64(v bool) float64 { if v { return 1 } return 0 } + +// In general, most bpf maps are allocated to occupy a 16-bit key size. +// To reduce the number of metrics that need to be emitted for map capacity, +// we assume a default map size of 2^16 entries for all maps, which can be +// assumed unless specified otherwise. +const DefaultMapCapacity = 65536 + +func UpdateMapCapacity(groupName string, capacity uint32) { + if capacity == 0 || capacity == DefaultMapCapacity { + return + } + BPFMapCapacity.WithLabelValues(groupName).Set(float64(capacity)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go index 9521c6d1ac..184d3baa71 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go @@ -5,7 +5,11 @@ package metrics -import "golang.org/x/sys/unix" +import ( + "golang.org/x/sys/unix" + + "github.com/cilium/cilium/pkg/datapath/linux/probes" +) // Errno2Outcome converts a unix.Errno to LabelOutcome func Errno2Outcome(errno unix.Errno) string { @@ -15,3 +19,12 @@ func Errno2Outcome(errno unix.Errno) string { return LabelValueOutcomeSuccess } + +func enableIfIndexMetric() bool { + // On kernels which do not provide ifindex via the FIB, Cilium needs + // to store it in the CT map, with a field limit of max(uint16). + // The EndpointMaxIfindex metric can be used to determine if that + // limit is approaching. However, it should only be enabled by + // default if we observe that the FIB is not providing the ifindex. + return probes.HaveFibIfindex() != nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics_windows.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_windows.go new file mode 100644 index 0000000000..dc4333ab32 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_windows.go @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +func enableIfIndexMetric() bool { + return false +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/plot.go b/vendor/github.com/cilium/cilium/pkg/metrics/plot.go new file mode 100644 index 0000000000..82555a92e2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/plot.go @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +import ( + "fmt" + "io" + "math" + "runtime" + "slices" + "strings" + + "github.com/mitchellh/go-wordwrap" + + "github.com/cilium/cilium/pkg/time" +) + +// PlotSamples plots the given samples as a line graph using the unicode braille characters. +func PlotSamples(w io.Writer, rate bool, name, labels string, timeSpan time.Duration, samples []float32, sb SampleBitmap) { + // Do not let panics propagate from here. Log the sample input that caused the panic. + defer func() { + if err := recover(); err != nil { + _, file, line, _ := runtime.Caller(2) + fmt.Fprintf(w, "panic: samples=%v, err=%s, source=%s:%d\n", samples, err, file, line) + } + }() + + title := name + + // Reverse the samples (samples is a fixed size array, thus was passed by value). + // We want them ordered from oldest to newest the same as our X-axis. + slices.Reverse(samples[:]) + if rate { + // Compute the rate per second by iterating from oldest to newest and + // subtracting the previous sample and dividing by our sampling + // interval. + prev := samples[0] + for i := 1; i < len(samples); i++ { + s := samples[i] + samples[i] = (s - prev) / float32(samplingInterval.Seconds()) + prev = s + } + samples[0] = 0 + title += " (rate per second)" + } + sampleExists := func(index int) bool { + if index < 0 || index >= len(samples) { + return false + } + return sb.exists(len(samples) - 1 - int(index)) + } + + // Set up coordinates. We have two systems here, one for character + // coordinates (width, height, originX, originY, plotHeight, plotWidth) + // and one for the "dot" coordinates (plotHeightDots, plotWidthDots) using + // the braille symbols and thus 4x the height and 2x the width. + const width, height = 80, 10 + originX, originY := 11, 7 + plotHeight := height - 3 + plotHeightDots := plotHeight * 4 + plotWidth := width - originX - 1 + plotWidthDots := plotWidth * 2 + indentPlotOriginX := strings.Repeat(" ", originX) + + // Write the name of the metric at the center. + fmt.Fprintf(w, "%s%s%s\n", + indentPlotOriginX, + strings.Repeat(" ", plotWidth/2-len(title)/2), + title) + + // Write out the labels, also centered, but leave some margins. + if labels != "" { + for _, line := range strings.Split(wordwrap.WrapString(labels, uint(plotWidth-4)), "\n") { + fmt.Fprintf(w, "%s%s[ %s ]\n", + indentPlotOriginX, + strings.Repeat(" ", plotWidth/2-(len(line)+4)/2), + line) + } + } + + // Set up a canvas into which to draw in. + canvas := make([]rune, width*height) + for x := 0; x < width; x++ { + for y := 0; y < height; y++ { + if x >= originX && y <= originY { + // initialize the plot area to the braille base. this way we can + // just OR in the dots we want to show. + canvas[y*width+x] = '\u2800' + } else { + canvas[y*width+x] = ' ' + } + } + } + // setDot sets a braille dot within the dot coordinate system + // (0,0)...(plotWidthDots,plotHeightDots). + setDot := func(x, y int) { + var braillePixels = [][]rune{ + {0x1, 0x2, 0x4, 0x40}, // left dots (even 'x') + {0x08, 0x10, 0x20, 0x80}, // right + } + pos := rune((plotHeightDots - y - 1) % 4) + canvas[(originY-y/4)*width+originX+x/2] |= braillePixels[x%2][pos] + } + writeText := func(y, x int, format string, args ...any) { + copy(canvas[y*width+x:], []rune(fmt.Sprintf(format, args...))) + } + + // Calculate the graph minimum and maximum values + minY, maxY := float32(math.Inf(+1)), float32(math.Inf(-1)) + for _, y := range samples { + minY = min(minY, y) + maxY = max(maxY, y) + } + midY := (maxY + minY) / 2 + + // Figure out how to show the Y units + suffix := "" + if strings.Contains(name, "seconds") { + suffix = "s" + } + unit, multp := chooseUnit(float64(maxY)) + fmtY := func(v float32) string { + return fmt.Sprintf("%.1f%s%s", v*float32(multp), unit, suffix) + } + + // Render the labels and the box. + writeText(0, originX-1, "╭"+strings.Repeat("─", width-originX-1)+"╮") + writeText(1, 1, "%8s ┤", fmtY(maxY)) + writeText(1, width-1, "│") + writeText(2, originX-1, "│") + writeText(2, width-1, "│") + writeText(3, originX-1, "│") + writeText(3, width-1, "│") + writeText(4, 1, "%8s ┤", fmtY(midY)) + writeText(4, width-1, "│") + writeText(5, originX-1, "│") + writeText(5, width-1, "│") + writeText(6, originX-1, "│") + writeText(6, width-1, "│") + writeText(7, 1, "%8s ┤", fmtY(minY)) + writeText(7, width-1, "│") + writeText(8, originX-1, "╰"+strings.Repeat("─", width-originX-1)+"╯") + writeText(8, originX+3, "┬") + writeText(9, originX, "-%.0fmin", timeSpan.Minutes()) + writeText(8, originX+3, "┬") + writeText(8, originX+3+((width-10)/2)-3, "┬") + writeText(9, originX+((width-10)/2)-3, "-%.0fmin", timeSpan.Minutes()/2) + writeText(8, width-3, "┬") + writeText(9, width-4, "now") + + // Normalize negative values for plotting + if minY < 0.0 { + for i := range samples { + samples[i] += -minY + } + maxY += -minY + minY = 0.0 + } + if maxY == 0.0 { + maxY = 0.000001 + } + + // getSample returns the interpolated sample for the given x position + // in the dot coordinates. + getSample := func(x int) (float32, bool) { + // find which sample is closest to x (rounding down) + pos := float64(x) / float64(plotWidthDots) + index := int(float64(len(samples)-1) * pos) + + if !sampleExists(int(index)) { + return 0.0, false + } else if !sampleExists(index + 1) { + // the next sample is either out of range or not present, + // just return this sample without any interpolation. + return samples[index], true + } + + // interpolate between two samples for estimate value of 'x' + prevPos := float64(index) / float64(len(samples)-1) + nextPos := float64(index+1) / float64(len(samples)-1) + rel := float32((pos - prevPos) / (nextPos - prevPos)) + + return samples[index] + (samples[index+1]-samples[index])*rel, true + } + + // mapToY maps the value to the Y position + mapToY := func(v float32) int { + return int(((v - minY) / maxY) * (float32(plotHeightDots) - 0.001)) + } + + // Plot the samples (up to second to last column) + for x := 0; x < plotWidthDots-1; x++ { + if v, exists := getSample(x); exists { + setDot(x, mapToY(v)) + } + } + // Plot the last sample without interpolation so that we always show + // the latest sample even if it's the only one. + if sampleExists(len(samples) - 1) { + setDot( + plotWidthDots-1, + mapToY(samples[len(samples)-1]), + ) + } + + // Finally write out our canvas. + for i := range height { + fmt.Fprintln(w, string(canvas[i*width:i*width+width])) + } +} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/registry.go b/vendor/github.com/cilium/cilium/pkg/metrics/registry.go index 6bec5a3b12..c426696e07 100644 --- a/vendor/github.com/cilium/cilium/pkg/metrics/registry.go +++ b/vendor/github.com/cilium/cilium/pkg/metrics/registry.go @@ -9,22 +9,21 @@ import ( "regexp" "strings" - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/hive" - "github.com/cilium/cilium/pkg/hive/cell" - metricpkg "github.com/cilium/cilium/pkg/metrics/metric" - "github.com/cilium/cilium/pkg/option" - + "github.com/cilium/hive" + "github.com/cilium/hive/cell" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" - dto "github.com/prometheus/client_model/go" "github.com/sirupsen/logrus" "github.com/spf13/pflag" + + "github.com/cilium/cilium/pkg/lock" + metricpkg "github.com/cilium/cilium/pkg/metrics/metric" + "github.com/cilium/cilium/pkg/option" ) var defaultRegistryConfig = RegistryConfig{ - PrometheusServeAddr: ":9962", + PrometheusServeAddr: "", } type RegistryConfig struct { @@ -45,7 +44,7 @@ type RegistryParams struct { Logger logrus.FieldLogger Shutdowner hive.Shutdowner - Lifecycle hive.Lifecycle + Lifecycle cell.Lifecycle AutoMetrics []metricpkg.WithMetadata `group:"hive-metrics"` Config RegistryConfig @@ -57,8 +56,15 @@ type RegistryParams struct { // on which all enabled metrics will be available. A reference to this registry can also be used to dynamically // register or unregister `prometheus.Collector`s. type Registry struct { + // inner registry of metrics. + // Served under the default /metrics endpoint. Each collector is wrapped with + // [metric.EnabledCollector] to only collect enabled metrics. inner *prometheus.Registry + // collectors holds all registered collectors. Used to periodically sample the + // metrics. + collectors collectorSet + params RegistryParams } @@ -82,8 +88,8 @@ func NewRegistry(params RegistryParams) *Registry { Handler: mux, } - params.Lifecycle.Append(hive.Hook{ - OnStart: func(hc hive.HookContext) error { + params.Lifecycle.Append(cell.Hook{ + OnStart: func(hc cell.HookContext) error { go func() { params.Logger.Infof("Serving prometheus metrics on %s", params.Config.PrometheusServeAddr) err := srv.ListenAndServe() @@ -93,7 +99,7 @@ func NewRegistry(params RegistryParams) *Registry { }() return nil }, - OnStop: func(hc hive.HookContext) error { + OnStop: func(hc cell.HookContext) error { return srv.Shutdown(hc) }, }) @@ -104,11 +110,13 @@ func NewRegistry(params RegistryParams) *Registry { // Register registers a collector func (r *Registry) Register(c prometheus.Collector) error { - return r.inner.Register(c) + r.collectors.add(c) + return r.inner.Register(metricpkg.EnabledCollector{C: c}) } // Unregister unregisters a collector func (r *Registry) Unregister(c prometheus.Collector) bool { + r.collectors.remove(c) return r.inner.Unregister(c) } @@ -125,12 +133,15 @@ func (r *Registry) Reinitialize() { collectors.WithGoCollectorRuntimeMetrics( collectors.GoRuntimeMetricsRule{Matcher: goCustomCollectorsRX}, ))) - r.MustRegister(newStatusCollector()) - r.MustRegister(newbpfCollector()) + + // Don't register status and BPF collectors into the [r.collectors] as it is + // expensive to sample and currently not terrible useful to keep data on. + r.inner.MustRegister(metricpkg.EnabledCollector{C: newStatusCollector()}) + r.inner.MustRegister(metricpkg.EnabledCollector{C: newbpfCollector()}) metrics := make(map[string]metricpkg.WithMetadata) for i, autoMetric := range r.params.AutoMetrics { - metrics[autoMetric.Opts().ConfigName] = r.params.AutoMetrics[i] + metrics[autoMetric.Opts().GetConfigName()] = r.params.AutoMetrics[i] } // This is a bodge for a very specific feature, inherited from the old `Daemon.additionalMetrics`. @@ -179,8 +190,11 @@ func (r *Registry) Reinitialize() { // MustRegister adds the collector to the registry, exposing this metric to // prometheus scrapes. // It will panic on error. -func (r *Registry) MustRegister(c ...prometheus.Collector) { - r.inner.MustRegister(c...) +func (r *Registry) MustRegister(cs ...prometheus.Collector) { + for _, c := range cs { + r.collectors.add(c) + r.inner.MustRegister(metricpkg.EnabledCollector{C: c}) + } } // RegisterList registers a list of collectors. If registration of one @@ -202,48 +216,38 @@ func (r *Registry) RegisterList(list []prometheus.Collector) error { return nil } -// DumpMetrics gets the current Cilium metrics and dumps all into a -// models.Metrics structure.If metrics cannot be retrieved, returns an error -func (r *Registry) DumpMetrics() ([]*models.Metric, error) { - result := []*models.Metric{} - currentMetrics, err := r.inner.Gather() - if err != nil { - return result, err - } - - for _, val := range currentMetrics { - metricName := val.GetName() - metricType := val.GetType() - - for _, metricLabel := range val.Metric { - labels := map[string]string{} - for _, label := range metricLabel.GetLabel() { - labels[label.GetName()] = label.GetValue() - } - - var value float64 - switch metricType { - case dto.MetricType_COUNTER: - value = metricLabel.Counter.GetValue() - case dto.MetricType_GAUGE: - value = metricLabel.GetGauge().GetValue() - case dto.MetricType_UNTYPED: - value = metricLabel.GetUntyped().GetValue() - case dto.MetricType_SUMMARY: - value = metricLabel.GetSummary().GetSampleSum() - case dto.MetricType_HISTOGRAM: - value = metricLabel.GetHistogram().GetSampleSum() - default: - continue - } +// collectorSet holds the prometheus collectors so that we can sample them +// periodically. The collectors are not wrapped with [EnabledCollector] so +// that they're sampled regardless if they're enabled or not. +type collectorSet struct { + mu lock.Mutex + collectors map[prometheus.Collector]struct{} +} - metric := &models.Metric{ - Name: metricName, - Labels: labels, - Value: value, - } - result = append(result, metric) +func (cs *collectorSet) collect() <-chan prometheus.Metric { + ch := make(chan prometheus.Metric, 100) + go func() { + cs.mu.Lock() + defer cs.mu.Unlock() + defer close(ch) + for c := range cs.collectors { + c.Collect(ch) } + }() + return ch +} + +func (cs *collectorSet) add(c prometheus.Collector) { + cs.mu.Lock() + if cs.collectors == nil { + cs.collectors = make(map[prometheus.Collector]struct{}) } - return result, nil + cs.collectors[c] = struct{}{} + cs.mu.Unlock() +} + +func (cs *collectorSet) remove(c prometheus.Collector) { + cs.mu.Lock() + delete(cs.collectors, c) + cs.mu.Unlock() } diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/sampler.go b/vendor/github.com/cilium/cilium/pkg/metrics/sampler.go new file mode 100644 index 0000000000..76ce354d50 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/metrics/sampler.go @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package metrics + +import ( + "context" + "fmt" + "log/slog" + "slices" + "strings" + + "github.com/cespare/xxhash/v2" + "github.com/cilium/hive/cell" + "github.com/cilium/hive/job" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" + + "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/cilium/pkg/time" +) + +// sampler periodically samples all metrics (enabled or not). +// The sampled metrics can be inspected with the 'metrics' command. +// 'metrics -s' lists all metrics with samples from the past 2 hours, +// and 'metrics/plot (regex)' plots the matching metric. See files in +// 'testdata/' for examples. +type sampler struct { + reg *Registry + log *slog.Logger + mu lock.Mutex + metrics map[metricKey]debugSamples + maxWarningLogged bool +} + +func newSampler(log *slog.Logger, reg *Registry, jg job.Group) *sampler { + sampler := &sampler{ + log: log, + reg: reg, + metrics: make(map[metricKey]debugSamples), + } + jg.Add( + job.OneShot("collect", sampler.collectLoop), + job.Timer("cleanup", sampler.cleanup, metricDeadDuration/2), + ) + return sampler +} + +const ( + // Sample every 5 minutes and keep 2 hours of samples. + samplingInterval = 5 * time.Minute + // if you change this, fix m*Index below. + samplingTimeSpan = 2 * time.Hour + numSamples = int(samplingTimeSpan / samplingInterval) // 24 samples + m30Index = numSamples/4 - 1 + m60Index = numSamples/2 - 1 + m120Index = numSamples - 1 + + // Cap the number of metrics we keep around to put an upper limit on memory usage. + // As there's way fewer histograms than gauges or counters, we can roughly estimate + // the memory usage as: + // max 2000 (20% histo): 400 * sizeof(histogram) + 1600 * sizeof(gaugeOrCounter) + // ~= 400 * 508 + 1600 * 164 + // ~= 466kB + // worst (100% histo): 2000 * 520 ~= 1MB + // sizeof(baseSamples) = 24+2*16 = 56 + // sizeof(sampleRing) = 24*4+4 = 100 + // sizeof(histogramSamples): sizeof(baseSamples) + 24+16*8 /* prev */ + 3*sizeof(sampleRing) = 508 + // sizeof(gaugeOrCounterSamples): sizeof(baseSamples) + sizeof(sampleRing) + 8 = 164 + // See also TestSamplerMaxMemoryUsage. + maxSampledMetrics = 2000 + + // The amount of time that has to pass before a sampled metric is considered + // dead/unregistered. Once passed the sampled data is dropped. + metricDeadDuration = samplingInterval * time.Duration(numSamples) +) + +// metricKey identifies a single metric. We are relying on the fact that +// Desc() always returns by pointer the same Desc. +type metricKey struct { + desc *prometheus.Desc + labelsHash uint64 +} + +func (k *metricKey) fqName() string { + // Unfortunately we need to rely on the implementation details of Desc.String() + // here to extract the name. If it ever changes our tests will catch it. + // This method is only invoked when the 'metrics' or 'metrics/plot' commands + // are used, so efficiency is not a huge concern. + s := k.desc.String() + const fqNamePrefix = `fqName: "` + start := strings.Index(s, fqNamePrefix) + if start < 0 { + return "???" + } + start += len(fqNamePrefix) + end := strings.Index(s[start:], `"`) + if end < 0 { + return "???" + } + return s[start : start+end] +} + +// SampleBitmap tracks which of the 'numSamples' actually exists. +// For histograms we only mark it sampled when the counts have changed. +type SampleBitmap uint64 + +func (sb *SampleBitmap) mark(b bool) { + *sb <<= 1 + if b { + *sb |= 1 + } +} + +func (sb SampleBitmap) exists(index int) bool { + return (sb>>index)&1 == 1 +} + +type debugSamples interface { + getName() string + getLabels() string + getJSON() JSONSamples + + get() (m5, m30, m60, m120 string) + getUpdatedAt() time.Time +} + +type baseSamples struct { + updatedAt time.Time + name string + labels string +} + +func (bs baseSamples) getName() string { + return bs.name +} +func (bs baseSamples) getLabels() string { + return bs.labels +} + +type gaugeOrCounterSamples struct { + baseSamples + + samples sampleRing + + // pos points to index where the next sample goes. + // the latest sample is pos-1. + bits SampleBitmap +} + +type sampleRing struct { + samples [numSamples]float32 + pos int +} + +func (r *sampleRing) push(sample float32) { + r.samples[r.pos] = sample + r.pos = (r.pos + 1) % numSamples +} + +func (r *sampleRing) grab() []float32 { + var samples [numSamples]float32 + pos := r.pos - 1 + if pos < 0 { + pos = numSamples - 1 + } + for i := range numSamples { + samples[i] = r.samples[pos] + pos = pos - 1 + if pos < 0 { + pos = numSamples - 1 + } + } + return samples[:] +} + +func (g *gaugeOrCounterSamples) getUpdatedAt() time.Time { + return g.updatedAt +} + +func (g *gaugeOrCounterSamples) getJSON() JSONSamples { + samples := g.samples.grab() + return JSONSamples{ + Name: g.name, + Labels: g.labels, + GaugeOrCounter: &JSONGaugeOrCounter{ + Samples: samples[:], + }, + Latest: prettyValue(float64(samples[0])), + } +} + +func (g *gaugeOrCounterSamples) get() (m1, m30, m60, m120 string) { + samples := g.samples.grab() + return prettyValue(float64(samples[0])), + prettyValue(float64(samples[m30Index])), + prettyValue(float64(samples[m60Index])), + prettyValue(float64(samples[m120Index])) +} + +type histogramSamples struct { + baseSamples + prev []histogramBucket + p50, p90, p99 sampleRing + bits SampleBitmap + isSeconds bool +} + +func (h *histogramSamples) get() (m5, m30, m60, m120 string) { + suffix := "" + if h.isSeconds { + suffix = "s" + } + pretty := func(p50, p90, p99 float32) string { + return fmt.Sprintf("%s%s / %s%s / %s%s", + prettyValue(float64(p50)), + suffix, prettyValue(float64(p90)), + suffix, prettyValue(float64(p99)), suffix) + } + p50, p90, p99 := h.p50.grab(), h.p90.grab(), h.p99.grab() + + m5 = pretty(p50[0], p90[0], p99[0]) + m30 = pretty(p50[m30Index], p90[m30Index], p99[m30Index]) + m60 = pretty(p50[m60Index], p90[m60Index], p99[m60Index]) + m120 = pretty(p50[m120Index], p90[m120Index], p99[m120Index]) + return +} + +func (h *histogramSamples) getUpdatedAt() time.Time { + return h.updatedAt +} + +func (h *histogramSamples) getJSON() JSONSamples { + p50, p90, p99 := h.p50.grab(), h.p90.grab(), h.p99.grab() + suffix := "" + if h.isSeconds { + suffix = "s" + } + return JSONSamples{ + Name: h.name, + Labels: h.labels, + Histogram: &JSONHistogram{ + P50: p50[:], + P90: p90[:], + P99: p99[:], + }, + Latest: fmt.Sprintf("%s%s / %s%s / %s%s", + prettyValue(float64(p50[0])), + suffix, prettyValue(float64(p90[0])), + suffix, prettyValue(float64(p99[0])), suffix), + } +} + +// cleanup runs every hour to remove samples that have not been updated +// in more than an hour (e.g. the metric has been unregistered). +func (dc *sampler) cleanup(ctx context.Context) error { + dc.mu.Lock() + defer dc.mu.Unlock() + for k, s := range dc.metrics { + if time.Since(s.getUpdatedAt()) > metricDeadDuration { + delete(dc.metrics, k) + } + } + return nil +} + +func (dc *sampler) collectLoop(ctx context.Context, health cell.Health) error { + ticker := time.NewTicker(samplingInterval) + defer ticker.Stop() + + for { + dc.collect(health) + + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + } + } +} + +func (dc *sampler) collect(health cell.Health) { + dc.mu.Lock() + defer dc.mu.Unlock() + + health.OK("Collecting metrics") + + t0 := time.Now() + + // Since this is meant to have very low overhead we want to avoid heap allocations + // and other expensive operations as much as possible. Thus we're using Collect() + // to collect metric one at a time (vs Gather() that does a lot in parallel) and + // also avoiding building up temporary data structures. + // One downside of this approach is that we need to parse Desc.String to extract + // the fqName and the labels, but we do this only when encountering a new metric + // and tests catch if it ever breaks. + + metricChan := dc.reg.collectors.collect() + + addNewMetric := func(key metricKey, s debugSamples) bool { + if len(dc.metrics) >= maxSampledMetrics { + if !dc.maxWarningLogged { + dc.log.Debug("maximum number of sampled metrics reached") + dc.maxWarningLogged = true + } + return false + } + dc.metrics[key] = s + return true + } + + numSampled := 0 + + for metric := range metricChan { + var msg dto.Metric + desc := metric.Desc() + if err := metric.Write(&msg); err != nil { + continue + } + key := newMetricKey(desc, msg.Label) + + if msg.Histogram != nil { + var histogram *histogramSamples + if samples, ok := dc.metrics[key]; !ok { + name := key.fqName() + histogram = &histogramSamples{ + baseSamples: baseSamples{name: name, labels: concatLabels(msg.Label)}, + isSeconds: strings.Contains(name, "seconds"), + } + if !addNewMetric(key, histogram) { + continue + } + } else { + histogram = samples.(*histogramSamples) + } + histogram.updatedAt = t0 + buckets := convertHistogram(msg.GetHistogram()) + + updated := histogramSampleCount(buckets) != histogramSampleCount(histogram.prev) + if updated { + b := buckets + if histogram.prev != nil { + // Previous sample exists, deduct the counts from it to get the quantiles + // of the last period. + b = slices.Clone(buckets) + subtractHistogram(b, histogram.prev) + } + histogram.p50.push(float32(getHistogramQuantile(b, 0.50))) + histogram.p90.push(float32(getHistogramQuantile(b, 0.90))) + histogram.p99.push(float32(getHistogramQuantile(b, 0.99))) + histogram.bits.mark(true) + } else { + histogram.p50.push(0.0) + histogram.p90.push(0.0) + histogram.p99.push(0.0) + histogram.bits.mark(false) + } + histogram.prev = buckets + } else { + var s *gaugeOrCounterSamples + if samples, ok := dc.metrics[key]; !ok { + s = &gaugeOrCounterSamples{ + baseSamples: baseSamples{name: key.fqName(), labels: concatLabels(msg.Label)}, + } + if !addNewMetric(key, s) { + continue + } + } else { + s = samples.(*gaugeOrCounterSamples) + } + s.updatedAt = t0 + + var value float64 + switch { + case msg.Counter != nil: + value = msg.Counter.GetValue() + case msg.Gauge != nil: + value = msg.Gauge.GetValue() + case msg.Summary != nil: + value = msg.Summary.GetSampleSum() / float64(msg.Summary.GetSampleCount()) + default: + value = -1.0 + } + s.samples.push(float32(value)) + s.bits.mark(true) + } + + numSampled++ + } + + health.OK(fmt.Sprintf("Sampled %d metrics in %s, next collection at %s", numSamples, time.Since(t0), t0.Add(samplingInterval))) +} + +var sep = []byte{model.SeparatorByte} + +// newMetricKey constructs a key to uniquely identify a specific metric. Designed +// to avoid heap allocations. +func newMetricKey(desc *prometheus.Desc, labels []*dto.LabelPair) metricKey { + var xxh xxhash.Digest + xxh.Reset() + for _, lp := range labels { + xxh.WriteString(lp.GetName()) + xxh.Write(sep) + xxh.WriteString(lp.GetValue()) + } + return metricKey{ + desc: desc, + labelsHash: xxh.Sum64(), + } +} + +func concatLabels(labels []*dto.LabelPair) string { + var b strings.Builder + for i, lp := range labels { + b.WriteString(lp.GetName()) + b.WriteByte('=') + b.WriteString(lp.GetValue()) + if i < len(labels)-1 { + b.WriteByte(' ') + } + } + return b.String() +} diff --git a/vendor/github.com/cilium/cilium/pkg/netns/doc.go b/vendor/github.com/cilium/cilium/pkg/netns/doc.go new file mode 100644 index 0000000000..4cb8bfa2d7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/netns/doc.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package netns contains various utility functions to work with network +// namespaces +package netns diff --git a/vendor/github.com/cilium/cilium/pkg/netns/netns_linux.go b/vendor/github.com/cilium/cilium/pkg/netns/netns_linux.go new file mode 100644 index 0000000000..5e2f1633c2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/netns/netns_linux.go @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package netns + +import ( + "fmt" + "os" + "runtime" + + "golang.org/x/sync/errgroup" + "golang.org/x/sys/unix" +) + +type NetNS struct { + f *os.File +} + +// newNetNS constructs a new NetNS and supplies it with a finalizer. +func newNetNS(f *os.File) *NetNS { + ns := &NetNS{f: f} + + // Prevent resource leaks by eventually closing the underlying file descriptor + // after ns is garbage collected. + runtime.SetFinalizer(ns, (*NetNS).Close) + + return ns +} + +// New creates a network namespace and returns a handle to it. +// +// The namespace created by this call is not pinned and will be closed when the +// last process in the namespace terminates, or when the handle is either +// Close()d explicitly or garbage collected. +// +// Not calling Close() is an error. +func New() (*NetNS, error) { + var f *os.File + + // Perform network namespace creation in a new goroutine to give us the + // possibility of terminating the underlying OS thread (by terminating the + // goroutine) if something goes wrong. + var g errgroup.Group + g.Go(func() error { + restoreUnlock, err := lockOSThread() + if err != nil { + return fmt.Errorf("lock OS thread: %w", err) + } + + // Move the underlying OS thread to a new network namespace. This can be + // undone by calling restoreUnlock(). + if err := unshare(); err != nil { + return fmt.Errorf("create new netns: %w", err) + } + + // Take out a reference to the new netns. + f, err = getCurrent() + if err != nil { + return fmt.Errorf("get current netns: %w (terminating OS thread)", err) + } + + // Restore the OS thread to its original network namespace or implicitly + // terminate it if something went wrong. + if err := restoreUnlock(); err != nil { + return fmt.Errorf("restore current netns: %w (terminating OS thread)", err) + } + + return nil + }) + + if err := g.Wait(); err != nil { + return nil, err + } + + return newNetNS(f), nil +} + +// OpenPinned opens a handle to the existing, pinned network namespace at the +// given path. Useful for running code within a netns managed by another process +// that pinned a network namespace to an nsfs. +// +// Not calling Close() is an error. +func OpenPinned(path string) (*NetNS, error) { + f, err := getFromPath(path) + if err != nil { + return nil, err + } + + return newNetNS(f), nil +} + +// Current returns a handle to the network namespace of the calling goroutine's +// underlying OS thread. +func Current() (*NetNS, error) { + f, err := getCurrent() + if err != nil { + return nil, err + } + + return newNetNS(f), nil +} + +// GetNetNSCookie tries to retrieve the cookie of the host netns. +func GetNetNSCookie() (uint64, error) { + s, err := unix.Socket(unix.AF_INET, unix.SOCK_STREAM, 0) + if err != nil { + return 0, err + } + defer unix.Close(s) + + cookie, err := unix.GetsockoptUint64(s, unix.SOL_SOCKET, unix.SO_NETNS_COOKIE) + if err != nil { + return 0, err + } + + return cookie, nil +} + +// FD returns the underlying file descriptor representing the netns handle. +func (h *NetNS) FD() int { + if h.f == nil { + return -1 + } + + return int(h.f.Fd()) +} + +// Close closes the handle to the network namespace. This does not necessarily +// mean destroying the network namespace itself, which only happens when all +// references to it are gone and all of its processes have been terminated. +func (h *NetNS) Close() error { + if h.f == nil { + return nil + } + + if err := h.f.Close(); err != nil { + return err + } + h.f = nil + + return nil +} + +// Do runs the provided func in the netns without changing the calling thread's +// netns. +// +// The code in f and any code called by f must NOT call [runtime.LockOSThread], +// as this could leave the goroutine created by Do permanently pinned to an OS +// thread. +func (h *NetNS) Do(f func() error) error { + + // Start the func in a new goroutine and lock it to an exclusive thread. This + // ensures that if execution of the goroutine fails unexpectedly before we + // call UnlockOSThread, the go runtime will ensure the underlying OS thread is + // disposed of, rather than reused in a potentially undefined state. + // + // See also: https://pkg.go.dev/runtime#UnlockOSThread + var g errgroup.Group + g.Go(func() error { + // Lock the newly-created goroutine to the OS thread it's running on so we + // can safely move it into another network namespace. (per-thread state) + restoreUnlock, err := lockOSThread() + if err != nil { + return err + } + + if err := set(h.f); err != nil { + return fmt.Errorf("set netns: %w (terminating OS thread)", err) + } + + ferr := f() + + // Attempt to restore the underlying OS thread to its original network + // namespace and unlock the running goroutine from its OS thread. Any + // failures during this process will leave the goroutine locked, making the + // underlying OS thread terminate when this function returns. + if err := restoreUnlock(); err != nil { + return fmt.Errorf("restore original netns: %w (terminating OS thread)", err) + } + return ferr + }) + + return g.Wait() +} + +// lockOSThread locks the calling goroutine to its underlying OS thread and +// returns a function that can later be used to unlock and restore the OS thread +// to its network namespace at the time of the initial call. +func lockOSThread() (func() error, error) { + runtime.LockOSThread() + + orig, err := getCurrent() + if err != nil { + runtime.UnlockOSThread() + return nil, fmt.Errorf("get current namespace: %w", err) + } + + return func() error { + defer orig.Close() + + if err := set(orig); err != nil { + // We didn't manage to restore the OS thread to its original namespace. + // Don't unlock the current goroutine from its thread, so the thread will + // terminate when the current goroutine does. + return err + } + + // Original netns was restored, release the OS thread back into the + // schedulable pool. + runtime.UnlockOSThread() + + return nil + }, nil +} + +// unshare moves the calling OS thread of the calling goroutine to a new network +// namespace. Must only be called after a prior call to lockOSThread(). +func unshare() error { + if err := unix.Unshare(unix.CLONE_NEWNET); err != nil { + return err + } + return nil +} + +// set sets the underlying OS thread of the calling goroutine to the netns +// pointed at by f. +func set(f *os.File) error { + return unix.Setns(int(f.Fd()), unix.CLONE_NEWNET) +} + +// getCurrent gets a file descriptor to the current thread network namespace. +func getCurrent() (*os.File, error) { + return getFromThread(os.Getpid(), unix.Gettid()) +} + +// getFromPath gets a file descriptor to the network namespace pinned at path. +func getFromPath(path string) (*os.File, error) { + return os.OpenFile(path, unix.O_RDONLY|unix.O_CLOEXEC, 0) +} + +// getFromThread gets a file descriptor to the network namespace of a given pid +// and tid. +func getFromThread(pid, tid int) (*os.File, error) { + return getFromPath(fmt.Sprintf("/proc/%d/task/%d/ns/net", pid, tid)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/netns/netns_other.go b/vendor/github.com/cilium/cilium/pkg/netns/netns_other.go new file mode 100644 index 0000000000..2c4a687dba --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/netns/netns_other.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +//go:build !linux + +package netns + +import ( + "fmt" +) + +type NetNS struct{} + +func New() (*NetNS, error) { + return nil, fmt.Errorf("not implemented") +} + +func OpenPinned(string) (*NetNS, error) { + return nil, fmt.Errorf("not implemented") +} + +func (h *NetNS) FD() int { + return -1 +} + +func (h *NetNS) Close() error { + return fmt.Errorf("not implemented") +} + +func (h *NetNS) Do(func() error) error { + return fmt.Errorf("not implemented") +} diff --git a/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go b/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go index f75d584727..345786de3f 100644 --- a/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go +++ b/vendor/github.com/cilium/cilium/pkg/node/addressing/addresstype.go @@ -3,9 +3,13 @@ package addressing +import ( + "net" +) + // AddressType represents a type of IP address for a node. They are copied // from k8s.io/api/core/v1/types.go to avoid pulling in a lot of Kubernetes -// imports into this package.s +// imports into this package. type AddressType string const ( @@ -16,3 +20,46 @@ const ( NodeInternalDNS AddressType = "InternalDNS" NodeCiliumInternalIP AddressType = "CiliumInternalIP" ) + +type Address interface { + AddrType() AddressType + ToString() string +} + +// ExtractNodeIP returns one of the provided IP addresses available with the following priority: +// - NodeInternalIP +// - NodeExternalIP +// - other IP address type +// An error is returned if ExtractNodeIP fails to get an IP based on the provided address family. +func ExtractNodeIP[T Address](addrs []T, ipv6 bool) net.IP { + var backupIP net.IP + for _, addr := range addrs { + parsed := net.ParseIP(addr.ToString()) + if parsed == nil { + continue + } + if (ipv6 && parsed.To4() != nil) || + (!ipv6 && parsed.To4() == nil) { + continue + } + switch addr.AddrType() { + // Ignore CiliumInternalIPs + case NodeCiliumInternalIP: + continue + // Always prefer a cluster internal IP + case NodeInternalIP: + return parsed + case NodeExternalIP: + // Fall back to external Node IP + // if no internal IP could be found + backupIP = parsed + default: + // As a last resort, if no internal or external + // IP was found, use any node address available + if backupIP == nil { + backupIP = parsed + } + } + } + return backupIP +} diff --git a/vendor/github.com/cilium/cilium/pkg/option/.gitignore b/vendor/github.com/cilium/cilium/pkg/option/.gitignore new file mode 100644 index 0000000000..68fddb5ff9 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/option/.gitignore @@ -0,0 +1 @@ +agent-runtime-config*.json diff --git a/vendor/github.com/cilium/cilium/pkg/option/config.go b/vendor/github.com/cilium/cilium/pkg/option/config.go index 6f94733c6f..8d76f8a412 100644 --- a/vendor/github.com/cilium/cilium/pkg/option/config.go +++ b/vendor/github.com/cilium/cilium/pkg/option/config.go @@ -5,6 +5,7 @@ package option import ( "bytes" + "crypto/sha256" "encoding/json" "errors" "fmt" @@ -13,16 +14,21 @@ import ( "net/netip" "os" "path/filepath" + "regexp" "runtime" - "sort" + "slices" "strconv" "strings" - "time" + "unicode" + "unicode/utf8" - "github.com/shirou/gopsutil/v3/mem" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/mackerelio/go-osstat/memory" "github.com/sirupsen/logrus" "github.com/spf13/cast" "github.com/spf13/cobra" + "github.com/spf13/pflag" "github.com/spf13/viper" k8sLabels "k8s.io/apimachinery/pkg/labels" @@ -33,10 +39,10 @@ import ( "github.com/cilium/cilium/pkg/defaults" "github.com/cilium/cilium/pkg/ip" ipamOption "github.com/cilium/cilium/pkg/ipam/option" - "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/logging" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/mac" + "github.com/cilium/cilium/pkg/time" "github.com/cilium/cilium/pkg/version" ) @@ -107,6 +113,9 @@ const ( // ConntrackGCInterval is the name of the ConntrackGCInterval option ConntrackGCInterval = "conntrack-gc-interval" + // ConntrackGCMaxInterval is the name of the ConntrackGCMaxInterval option + ConntrackGCMaxInterval = "conntrack-gc-max-interval" + // DebugArg is the argument enables debugging mode DebugArg = "debug" @@ -116,32 +125,28 @@ const ( // Devices facing cluster/external network for attaching bpf_host Devices = "devices" + // Forces the auto-detection of devices, even if specific devices are explicitly listed + ForceDeviceDetection = "force-device-detection" + // DirectRoutingDevice is the name of a device used to connect nodes in // direct routing mode (only required by BPF NodePort) DirectRoutingDevice = "direct-routing-device" - // LBDevInheritIPAddr is device name which IP addr is inherited by devices - // running BPF loadbalancer program - LBDevInheritIPAddr = "bpf-lb-dev-ip-addr-inherit" - - // DisableEnvoyVersionCheck do not perform Envoy binary version check on startup - DisableEnvoyVersionCheck = "disable-envoy-version-check" - // EnablePolicy enables policy enforcement in the agent. EnablePolicy = "enable-policy" // EnableExternalIPs enables implementation of k8s services with externalIPs in datapath EnableExternalIPs = "enable-external-ips" - // K8sEnableEndpointSlice enables the k8s EndpointSlice feature into Cilium - K8sEnableEndpointSlice = "enable-k8s-endpoint-slice" - // EnableL7Proxy is the name of the option to enable L7 proxy EnableL7Proxy = "enable-l7-proxy" // EnableTracing enables tracing mode in the agent. EnableTracing = "enable-tracing" + // EnableIPIPTermination is the name of the option to enable IPIP termination + EnableIPIPTermination = "enable-ipip-termination" + // Add unreachable routes on pod deletion EnableUnreachableRoutes = "enable-unreachable-routes" @@ -151,29 +156,18 @@ const ( // EncryptNode enables node IP encryption EncryptNode = "encrypt-node" - // EnvoyLog sets the path to a separate Envoy log file, if any - EnvoyLog = "envoy-log" - // GopsPort is the TCP port for the gops server. GopsPort = "gops-port" - // ProxyPrometheusPort specifies the port to serve Cilium host proxy metrics on. - ProxyPrometheusPort = "proxy-prometheus-port" - - // ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for the proxy - ProxyMaxRequestsPerConnection = "proxy-max-requests-per-connection" - - // ProxyMaxConnectionDuration specifies the max_connection_duration setting for the proxy in seconds - ProxyMaxConnectionDuration = "proxy-max-connection-duration-seconds" - - // ProxyIdleTimeout specifies the idle_timeout setting (in seconds), which applies - // for the connection from proxy to upstream cluster - ProxyIdleTimeout = "proxy-idle-timeout-seconds" - // FixedIdentityMapping is the key-value for the fixed identity mapping // which allows to use reserved label for fixed identities FixedIdentityMapping = "fixed-identity-mapping" + // FixedZoneMapping is the key-value for the fixed zone mapping which + // is used to map zone value (string) from EndpointSlice to ID (uint8) + // in lb{4,6}_backend in BPF map. + FixedZoneMapping = "fixed-zone-mapping" + // IPv4Range is the per-node IPv4 endpoint prefix, e.g. 10.16.0.0/16 IPv4Range = "ipv4-range" @@ -257,18 +251,19 @@ const ( NodePortAlg = "node-port-algorithm" // NodePortAcceleration indicates whether NodePort should be accelerated - // via XDP ("none", "generic" or "native") + // via XDP ("none", "generic", "native", or "best-effort") NodePortAcceleration = "node-port-acceleration" // Alias to NodePortMode LoadBalancerMode = "bpf-lb-mode" + // LoadBalancerModeAnnotation tells whether controller should check service + // level annotation for configuring bpf loadbalancing method (snat vs dsr). + LoadBalancerModeAnnotation = "bpf-lb-mode-annotation" + // Alias to DSR dispatch method LoadBalancerDSRDispatch = "bpf-lb-dsr-dispatch" - // Alias to DSR L4 translation method - LoadBalancerDSRL4Xlate = "bpf-lb-dsr-l4-xlate" - // Alias to DSR/IPIP IPv4 source CIDR LoadBalancerRSSv4CIDR = "bpf-lb-rss-ipv4-src-cidr" @@ -276,16 +271,21 @@ const ( LoadBalancerRSSv6CIDR = "bpf-lb-rss-ipv6-src-cidr" // Alias to NodePortAlg - LoadBalancerAlg = "bpf-lb-algorithm" + LoadBalancerAlgorithm = "bpf-lb-algorithm" + + // LoadBalancerAlgorithmAnnotation tells whether controller should check service + // level annotation for configuring bpf loadbalancing algorithm. + LoadBalancerAlgorithmAnnotation = "bpf-lb-algorithm-annotation" // Alias to NodePortAcceleration LoadBalancerAcceleration = "bpf-lb-acceleration" - // MaglevTableSize determines the size of the backend table per service - MaglevTableSize = "bpf-lb-maglev-table-size" + // LoadBalancerExternalControlPlane switch skips connectivity to kube-apiserver + // which is relevant in lb-only mode + LoadBalancerExternalControlPlane = "bpf-lb-external-control-plane" - // MaglevHashSeed contains the cluster-wide seed for the hash - MaglevHashSeed = "bpf-lb-maglev-hash-seed" + // LoadBalancerProtocolDifferentiation enables support for service protocol differentiation (TCP, UDP, SCTP) + LoadBalancerProtocolDifferentiation = "bpf-lb-proto-diff" // NodePortBindProtection rejects bind requests to NodePort service ports NodePortBindProtection = "node-port-bind-protection" @@ -305,8 +305,6 @@ const ( // EnableSessionAffinity enables a support for service sessionAffinity EnableSessionAffinity = "enable-session-affinity" - EnableServiceTopology = "enable-service-topology" - // EnableIdentityMark enables setting the mark field with the identity for // local traffic. This may be disabled if chaining modes and Cilium use // conflicting marks. @@ -321,12 +319,6 @@ const ( // considered local ones with HOST_ID in the ipcache AddressScopeMax = "local-max-addr-scope" - // EnableBandwidthManager enables EDT-based pacing - EnableBandwidthManager = "enable-bandwidth-manager" - - // EnableBBR enables BBR TCP congestion control for the node including Pods - EnableBBR = "enable-bbr" - // EnableRecorder enables the datapath pcap recorder EnableRecorder = "enable-recorder" @@ -348,29 +340,20 @@ const ( // LogOpt sets log driver options for cilium LogOpt = "log-opt" - // Logstash enables logstash integration - Logstash = "logstash" - // EnableIPv4Masquerade masquerades IPv4 packets from endpoints leaving the host. EnableIPv4Masquerade = "enable-ipv4-masquerade" // EnableIPv6Masquerade masquerades IPv6 packets from endpoints leaving the host. EnableIPv6Masquerade = "enable-ipv6-masquerade" - // EnableIPv6BIGTCP enables IPv6 BIG TCP (larger GSO/GRO limits) for the node including pods. - EnableIPv6BIGTCP = "enable-ipv6-big-tcp" - - // EnableIPv4BIGTCP enables IPv4 BIG TCP (larger GSO/GRO limits) for the node including pods. - EnableIPv4BIGTCP = "enable-ipv4-big-tcp" - // EnableBPFClockProbe selects a more efficient source clock (jiffies vs ktime) EnableBPFClockProbe = "enable-bpf-clock-probe" // EnableBPFMasquerade masquerades packets from endpoints leaving the host with BPF instead of iptables EnableBPFMasquerade = "enable-bpf-masquerade" - // DeriveMasqIPAddrFromDevice is device name which IP addr is used for BPF masquerades - DeriveMasqIPAddrFromDevice = "derive-masquerade-ip-addr-from-device" + // EnableMasqueradeRouteSource masquerades to the source route IP address instead of the interface one + EnableMasqueradeRouteSource = "enable-masquerade-to-route-source" // EnableIPMasqAgent enables BPF ip-masq-agent EnableIPMasqAgent = "enable-ip-masq-agent" @@ -378,18 +361,9 @@ const ( // EnableIPv4EgressGateway enables the IPv4 egress gateway EnableIPv4EgressGateway = "enable-ipv4-egress-gateway" - // EnableIngressController enables Ingress Controller - EnableIngressController = "enable-ingress-controller" - - // EnableGatewayAPI enables Gateway API support - EnableGatewayAPI = "enable-gateway-api" - // EnableEnvoyConfig enables processing of CiliumClusterwideEnvoyConfig and CiliumEnvoyConfig CRDs EnableEnvoyConfig = "enable-envoy-config" - // EnvoyConfigTimeout determines how long to wait Envoy to N/ACK resources - EnvoyConfigTimeout = "envoy-config-timeout" - // IPMasqAgentConfigPath is the configuration file path IPMasqAgentConfigPath = "ip-masq-agent-config-path" @@ -400,10 +374,9 @@ const ( // to skip netfilter connection tracking on all pod traffic. InstallNoConntrackIptRules = "install-no-conntrack-iptables-rules" - IPTablesLockTimeout = "iptables-lock-timeout" - - // IPTablesRandomFully sets iptables flag random-fully on masquerading rules - IPTablesRandomFully = "iptables-random-fully" + // ContainerIPLocalReservedPorts instructs the Cilium CNI plugin to reserve + // the provided comma-separated list of ports in the container network namespace + ContainerIPLocalReservedPorts = "container-ip-local-reserved-ports" // IPv6NodeAddr is the IPv6 address of node IPv6NodeAddr = "ipv6-node" @@ -414,9 +387,6 @@ const ( // Restore restores state, if possible, from previous daemon Restore = "restore" - // SidecarIstioProxyImage regular expression matching compatible Istio sidecar istio-proxy container image names - SidecarIstioProxyImage = "sidecar-istio-proxy-image" - // SocketPath sets daemon's socket path to listen for connections SocketPath = "socket-path" @@ -432,6 +402,9 @@ const ( // EnableXDPPrefilter enables XDP-based prefiltering EnableXDPPrefilter = "enable-xdp-prefilter" + // EnableTCX enables attaching endpoint programs using tcx if the kernel supports it + EnableTCX = "enable-tcx" + ProcFs = "procfs" // PrometheusServeAddr IP:Port on which to serve prometheus metrics (pass ":Port" to bind on all interfaces, "" is off) @@ -494,6 +467,17 @@ const ( // DNSProxyLockCount. DNSProxyLockTimeout = "dnsproxy-lock-timeout" + // DNSProxySocketLingerTimeout defines how many seconds we wait for the connection + // between the DNS proxy and the upstream server to be closed. + DNSProxySocketLingerTimeout = "dnsproxy-socket-linger-timeout" + + // DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy. + DNSProxyEnableTransparentMode = "dnsproxy-enable-transparent-mode" + + // DNSProxyInsecureSkipTransparentModeCheck is a hidden flag that allows users + // to disable transparent mode even if IPSec is enabled + DNSProxyInsecureSkipTransparentModeCheck = "dnsproxy-insecure-skip-transparent-mode-check" + // MTUName is the name of the MTU option MTUName = "mtu" @@ -512,25 +496,28 @@ const ( // BPFSocketLBHostnsOnly is the name of the BPFSocketLBHostnsOnly option BPFSocketLBHostnsOnly = "bpf-lb-sock-hostns-only" - // TunnelName is the name of the Tunnel option - TunnelName = "tunnel" + // EnableSocketLBPodConnectionTermination enables termination of pod connections + // to deleted service backends when socket-LB is enabled. + EnableSocketLBPodConnectionTermination = "bpf-lb-sock-terminate-pod-connections" // RoutingMode is the name of the option to choose between native routing and tunneling mode RoutingMode = "routing-mode" - // TunnelProtocol is the name of the option to select the tunneling protocol - TunnelProtocol = "tunnel-protocol" + // ServiceNoBackendResponse is the name of the option to pick how to handle traffic for services + // without any backends + ServiceNoBackendResponse = "service-no-backend-response" - // TunnelPortName is the name of the TunnelPort option - TunnelPortName = "tunnel-port" + // ServiceNoBackendResponseReject is the name of the option to reject traffic for services + // without any backends + ServiceNoBackendResponseReject = "reject" - // SingleClusterRouteName is the name of the SingleClusterRoute option - // - // SingleClusterRoute enables use of a single route covering the entire - // cluster CIDR to point to the cilium_host interface instead of using - // a separate route for each cluster node CIDR. This option is not - // compatible with Tunnel=TunnelDisabled - SingleClusterRouteName = "single-cluster-route" + // ServiceNoBackendResponseDrop is the name of the option to drop traffic for services + // without any backends + ServiceNoBackendResponseDrop = "drop" + + // MaxInternalTimerDelay sets a maximum on all periodic timers in + // the agent in order to flush out timer-related bugs in the agent. + MaxInternalTimerDelay = "max-internal-timer-delay" // MonitorAggregationName specifies the MonitorAggregationLevel on the // comandline. @@ -545,12 +532,6 @@ const ( // ciliumEnvPrefix is the prefix used for environment variables ciliumEnvPrefix = "CILIUM_" - // ClusterName is the name of the ClusterName option - ClusterName = "cluster-name" - - // ClusterIDName is the name of the ClusterID option - ClusterIDName = "cluster-id" - // CNIChainingMode configures which CNI plugin Cilium is chained with. CNIChainingMode = "cni-chaining-mode" @@ -567,6 +548,9 @@ const ( // AuthMapEntriesDefault defines the default auth map limit. AuthMapEntriesDefault = 1 << 19 + // BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled + BPFConntrackAccountingDefault = false + // AuthMapEntriesName configures max entries for BPF auth map. AuthMapEntriesName = "bpf-auth-map-max" @@ -652,6 +636,10 @@ const ( // PolicyMapEntriesName configures max entries for BPF policymap. PolicyMapEntriesName = "bpf-policy-map-max" + // PolicyMapFullReconciliationInterval sets the interval for performing the full + // reconciliation of the endpoint policy map. + PolicyMapFullReconciliationIntervalName = "bpf-policy-map-full-reconciliation-interval" + // SockRevNatEntriesName configures max entries for BPF sock reverse nat // entries. SockRevNatEntriesName = "bpf-sock-rev-map-max" @@ -664,10 +652,6 @@ const ( // load loggging LogSystemLoadConfigName = "log-system-load" - // PrependIptablesChainsName is the name of the option to enable - // prepending iptables chains instead of appending - PrependIptablesChainsName = "prepend-iptables-chains" - // DisableCiliumEndpointCRDName is the name of the option to disable // use of the CEP CRD DisableCiliumEndpointCRDName = "disable-endpoint-crd" @@ -710,6 +694,25 @@ const ( // IPv6MCastDevice is the name of the option to select IPv6 multicast device IPv6MCastDevice = "ipv6-mcast-device" + // BPFEventsDefaultRateLimit specifies limit of messages per second that can be written to + // BPF events map. This limit is defined for all types of events except dbg and pcap. + // The number of messages is averaged, meaning that if no messages were written + // to the map over 5 seconds, it's possible to write more events than the value of rate limit + // in the 6th second. + // + // If BPFEventsDefaultRateLimit > 0, non-zero value for BPFEventsDefaultBurstLimit must also be provided + // lest the configuration is considered invalid. + // If both rate and burst limit are 0 or not specified, no limit is imposed. + BPFEventsDefaultRateLimit = "bpf-events-default-rate-limit" + + // BPFEventsDefaultBurstLimit specifies the maximum number of messages that can be written + // to BPF events map in 1 second. This limit is defined for all types of events except dbg and pcap. + // + // If BPFEventsDefaultBurstLimit > 0, non-zero value for BPFEventsDefaultRateLimit must also be provided + // lest the configuration is considered invalid. + // If both burst and rate limit are 0 or not specified, no limit is imposed. + BPFEventsDefaultBurstLimit = "bpf-events-default-burst-limit" + // FQDNRejectResponseCode is the name for the option for dns-proxy reject response code FQDNRejectResponseCode = "tofqdns-dns-reject-response-code" @@ -736,12 +739,12 @@ const ( // EnableBPFTProxy option supports enabling or disabling BPF TProxy. EnableBPFTProxy = "enable-bpf-tproxy" - // EnableXTSocketFallbackName is the name of the EnableXTSocketFallback option - EnableXTSocketFallbackName = "enable-xt-socket-fallback" - // EnableAutoDirectRoutingName is the name for the EnableAutoDirectRouting option EnableAutoDirectRoutingName = "auto-direct-node-routes" + // DirectRoutingSkipUnreachableName is the name for the DirectRoutingSkipUnreachable option + DirectRoutingSkipUnreachableName = "direct-routing-skip-unreachable" + // EnableIPSecName is the name of the option to enable IPSec EnableIPSecName = "enable-ipsec" @@ -753,10 +756,25 @@ const ( // be necessary on key rotations. EnableIPsecKeyWatcher = "enable-ipsec-key-watcher" + // Enable caching for XfrmState for IPSec. Significantly reduces CPU usage + // in large clusters. + EnableIPSecXfrmStateCaching = "enable-ipsec-xfrm-state-caching" + // IPSecKeyFileName is the name of the option for ipsec key file IPSecKeyFileName = "ipsec-key-file" - // EnableWireguard is the name of the option to enable wireguard + // EnableIPSecEncrytpedOverlay is the name of the option which enables + // the EncryptedOverlay feature. + // + // This feature will encrypt overlay traffic before it leaves the cluster. + EnableIPSecEncryptedOverlay = "enable-ipsec-encrypted-overlay" + + // BootIDFilename is a hidden flag that allows users to specify a + // filename other than /proc/sys/kernel/random/boot_id. This can be + // useful for testing purposes in local containerized cluster. + BootIDFilename = "boot-id-file" + + // EnableWireguard is the name of the option to enable WireGuard EnableWireguard = "enable-wireguard" // EnableL2Announcements is the name of the option to enable l2 announcements @@ -771,8 +789,19 @@ const ( // L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time. L2AnnouncerRetryPeriod = "l2-announcements-retry-period" - // EnableWireguardUserspaceFallback is the name of the option that enables the fallback to wireguard userspace mode - EnableWireguardUserspaceFallback = "enable-wireguard-userspace-fallback" + // EnableEncryptionStrictMode is the name of the option to enable strict encryption mode. + EnableEncryptionStrictMode = "enable-encryption-strict-mode" + + // EncryptionStrictModeCIDR is the CIDR in which the strict ecryption mode should be enforced. + EncryptionStrictModeCIDR = "encryption-strict-mode-cidr" + + // EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of remote node identities. + // This is required when tunneling is used + // or direct routing is used and the node CIDR and pod CIDR overlap. + EncryptionStrictModeAllowRemoteNodeIdentities = "encryption-strict-mode-allow-remote-node-identities" + + // WireguardPersistentKeepalivee controls Wireguard PersistentKeepalive option. Set 0 to disable. + WireguardPersistentKeepalive = "wireguard-persistent-keepalive" // NodeEncryptionOptOutLabels is the name of the option for the node-to-node encryption opt-out labels NodeEncryptionOptOutLabels = "node-encryption-opt-out-labels" @@ -791,8 +820,9 @@ const ( // KVstoreConnectivityTimeout is the timeout when performing kvstore operations KVstoreConnectivityTimeout = "kvstore-connectivity-timeout" - // IPAllocationTimeout is the timeout when allocating CIDRs - IPAllocationTimeout = "ip-allocation-timeout" + // KVstorePodNetworkSupport enables the support for running the Cilium KVstore + // in pod network. + KVstorePodNetworkSupport = "kvstore-pod-network-support" // IdentityChangeGracePeriod is the name of the // IdentityChangeGracePeriod option @@ -811,9 +841,11 @@ const ( // EnableHealthCheckNodePort is the name of the EnableHealthCheckNodePort option EnableHealthCheckNodePort = "enable-health-check-nodeport" - // PolicyQueueSize is the size of the queues utilized by the policy - // repository. - PolicyQueueSize = "policy-queue-size" + // EnableHealthCheckLoadBalancerIP is the name of the EnableHealthCheckLoadBalancerIP option + EnableHealthCheckLoadBalancerIP = "enable-health-check-loadbalancer-ip" + + // HealthCheckICMPFailureThreshold is the name of the HealthCheckICMPFailureThreshold option + HealthCheckICMPFailureThreshold = "health-check-icmp-failure-threshold" // EndpointQueueSize is the size of the EventQueue per-endpoint. EndpointQueueSize = "endpoint-queue-size" @@ -822,8 +854,8 @@ const ( // endpoints that are no longer alive and healthy. EndpointGCInterval = "endpoint-gc-interval" - // K8sEventHandover is the name of the K8sEventHandover option - K8sEventHandover = "enable-k8s-event-handover" + // EndpointRegenInterval is the interval of the periodic endpoint regeneration loop. + EndpointRegenInterval = "endpoint-regen-interval" // LoopbackIPv4 is the address to use for service loopback SNAT LoopbackIPv4 = "ipv4-service-loopback-address" @@ -857,9 +889,15 @@ const ( // IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool IPAMMultiPoolPreAllocation = "ipam-multi-pool-pre-allocation" + // IPAMDefaultIPPool defines the default IP Pool when using multi-pool + IPAMDefaultIPPool = "ipam-default-ip-pool" + // XDPModeNative for loading progs with XDPModeLinkDriver XDPModeNative = "native" + // XDPModeBestEffort for loading progs with XDPModeLinkDriver + XDPModeBestEffort = "best-effort" + // XDPModeGeneric for loading progs with XDPModeLinkGeneric XDPModeGeneric = "testing-only" @@ -885,15 +923,19 @@ const ( // CiliumNode resource for the local node AutoCreateCiliumNodeResource = "auto-create-cilium-node-resource" + // ExcludeNodeLabelPatterns allows for excluding unnecessary labels from being propagated from k8s node to cilium + // node object. This allows for avoiding unnecessary events being broadcast to all nodes in the cluster. + ExcludeNodeLabelPatterns = "exclude-node-label-patterns" + // IPv4NativeRoutingCIDR describes a v4 CIDR in which pod IPs are routable IPv4NativeRoutingCIDR = "ipv4-native-routing-cidr" // IPv6NativeRoutingCIDR describes a v6 CIDR in which pod IPs are routable IPv6NativeRoutingCIDR = "ipv6-native-routing-cidr" - // EgressMasqueradeInterfaces is the selector used to select interfaces - // subject to egress masquerading - EgressMasqueradeInterfaces = "egress-masquerade-interfaces" + // MasqueradeInterfaces is the selector used to select interfaces subject to + // egress masquerading + MasqueradeInterfaces = "egress-masquerade-interfaces" // PolicyTriggerInterval is the amount of time between triggers of policy // updates are invoked. @@ -911,9 +953,13 @@ const ( // identity allocation IdentityAllocationModeCRD = "crd" - // DisableCNPStatusUpdates disables updating of CNP NodeStatus in the CNP - // CRD. - DisableCNPStatusUpdates = "disable-cnp-status-updates" + // IdentityAllocationModeDoubleWriteReadKVstore writes identities to the KVStore and as CRDs at the same time. + // Identities are then read from the KVStore. + IdentityAllocationModeDoubleWriteReadKVstore = "doublewrite-readkvstore" + + // IdentityAllocationModeDoubleWriteReadCRD writes identities to the KVStore and as CRDs at the same time. + // Identities are then read from the CRDs. + IdentityAllocationModeDoubleWriteReadCRD = "doublewrite-readcrd" // EnableLocalNodeRoute controls installation of the route which points // the allocation prefix of the local node. @@ -924,114 +970,21 @@ const ( // control plane, e.g. when using the managed etcd feature EnableWellKnownIdentities = "enable-well-known-identities" - // EnableRemoteNodeIdentity enables use of the remote-node identity - EnableRemoteNodeIdentity = "enable-remote-node-identity" - // PolicyAuditModeArg argument enables policy audit mode. PolicyAuditModeArg = "policy-audit-mode" - // EnableHubble enables hubble in the agent. - EnableHubble = "enable-hubble" - - // HubbleSocketPath specifies the UNIX domain socket for Hubble server to listen to. - HubbleSocketPath = "hubble-socket-path" - - // HubbleListenAddress specifies address for Hubble server to listen to. - HubbleListenAddress = "hubble-listen-address" - - // HubblePreferIpv6 controls whether IPv6 or IPv4 addresses should be preferred for - // communication to agents, if both are available. - HubblePreferIpv6 = "hubble-prefer-ipv6" - - // HubbleTLSDisabled allows the Hubble server to run on the given listen - // address without TLS. - HubbleTLSDisabled = "hubble-disable-tls" - - // HubbleTLSCertFile specifies the path to the public key file for the - // Hubble server. The file must contain PEM encoded data. - HubbleTLSCertFile = "hubble-tls-cert-file" - - // HubbleTLSKeyFile specifies the path to the private key file for the - // Hubble server. The file must contain PEM encoded data. - HubbleTLSKeyFile = "hubble-tls-key-file" + // PolicyAccountingArg argument enable policy accounting. + PolicyAccountingArg = "policy-accounting" - // HubbleTLSClientCAFiles specifies the path to one or more client CA - // certificates to use for TLS with mutual authentication (mTLS). The files - // must contain PEM encoded data. - HubbleTLSClientCAFiles = "hubble-tls-client-ca-files" + // K8sClientConnectionTimeout configures the timeout for K8s client connections. + K8sClientConnectionTimeout = "k8s-client-connection-timeout" - // HubbleEventBufferCapacity specifies the capacity of Hubble events buffer. - HubbleEventBufferCapacity = "hubble-event-buffer-capacity" - - // HubbleEventQueueSize specifies the buffer size of the channel to receive monitor events. - HubbleEventQueueSize = "hubble-event-queue-size" - - // HubbleMetricsServer specifies the addresses to serve Hubble metrics on. - HubbleMetricsServer = "hubble-metrics-server" - - // HubbleMetrics specifies enabled metrics and their configuration options. - HubbleMetrics = "hubble-metrics" - - // HubbleExportFilePath specifies the filepath to write Hubble events to. - // e.g. "/var/run/cilium/hubble/events.log" - HubbleExportFilePath = "hubble-export-file-path" - - // HubbleExportFileMaxSizeMB specifies the file size in MB at which to rotate - // the Hubble export file. - HubbleExportFileMaxSizeMB = "hubble-export-file-max-size-mb" - - // HubbleExportFileMaxBacks specifies the number of rotated files to keep. - HubbleExportFileMaxBackups = "hubble-export-file-max-backups" - - // HubbleExportFileCompress specifies whether rotated files are compressed. - HubbleExportFileCompress = "hubble-export-file-compress" - - // EnableHubbleRecorderAPI specifies if the Hubble Recorder API should be served - EnableHubbleRecorderAPI = "enable-hubble-recorder-api" - - // EnableHubbleOpenMetrics enables exporting hubble metrics in OpenMetrics format. - EnableHubbleOpenMetrics = "enable-hubble-open-metrics" - - // HubbleRecorderStoragePath specifies the directory in which pcap files - // created via the Hubble Recorder API are stored - HubbleRecorderStoragePath = "hubble-recorder-storage-path" - - // HubbleRecorderSinkQueueSize is the queue size for each recorder sink - HubbleRecorderSinkQueueSize = "hubble-recorder-sink-queue-size" - - // HubbleSkipUnknownCGroupIDs specifies if events with unknown cgroup ids should be skipped - HubbleSkipUnknownCGroupIDs = "hubble-skip-unknown-cgroup-ids" - - // HubbleMonitorEvents specifies Cilium monitor events for Hubble to observe. - // By default, Hubble observes all monitor events. - HubbleMonitorEvents = "hubble-monitor-events" - - // DisableIptablesFeederRules specifies which chains will be excluded - // when installing the feeder rules - DisableIptablesFeederRules = "disable-iptables-feeder-rules" + // K8sClientConnectionKeepAlive configures the keep alive duration for K8s client connections. + K8sClientConnectionKeepAlive = "k8s-client-connection-keep-alive" // K8sHeartbeatTimeout configures the timeout for apiserver heartbeat K8sHeartbeatTimeout = "k8s-heartbeat-timeout" - // EndpointStatus enables population of information in the - // CiliumEndpoint.Status resource - EndpointStatus = "endpoint-status" - - // EndpointStatusPolicy enables CiliumEndpoint.Status.Policy - EndpointStatusPolicy = "policy" - - // EndpointStatusHealth enables CiliumEndpoint.Status.Health - EndpointStatusHealth = "health" - - // EndpointStatusControllers enables CiliumEndpoint.Status.Controllers - EndpointStatusControllers = "controllers" - - // EndpointStatusLog enables CiliumEndpoint.Status.Log - EndpointStatusLog = "log" - - // EndpointStatusState enables CiliumEndpoint.Status.State - EndpointStatusState = "state" - // EnableIPv4FragmentsTrackingName is the name of the option to enable // IPv4 fragments tracking for L4-based lookups. Needs LRU map support. EnableIPv4FragmentsTrackingName = "enable-ipv4-fragment-tracking" @@ -1058,19 +1011,15 @@ const ( // LBAffinityMapMaxEntries configures max entries of bpf map for session affinity. LBAffinityMapMaxEntries = "bpf-lb-affinity-map-max" + // LBSourceRangeAllTypes configures service source ranges for all service types. + LBSourceRangeAllTypes = "bpf-lb-source-range-all-types" + // LBSourceRangeMapMaxEntries configures max entries of bpf map for service source ranges. LBSourceRangeMapMaxEntries = "bpf-lb-source-range-map-max" // LBMaglevMapMaxEntries configures max entries of bpf map for Maglev. LBMaglevMapMaxEntries = "bpf-lb-maglev-map-max" - // K8sServiceProxyName instructs Cilium to handle service objects only when - // service.kubernetes.io/service-proxy-name label equals the provided value. - K8sServiceProxyName = "k8s-service-proxy-name" - - // APIRateLimitName enables configuration of the API rate limits - APIRateLimitName = "api-rate-limit" - // CRDWaitTimeout is the timeout in which Cilium will exit if CRDs are not // available. CRDWaitTimeout = "crd-wait-timeout" @@ -1080,19 +1029,16 @@ const ( // Otherwise, it will use the old scheme. EgressMultiHomeIPRuleCompat = "egress-multi-home-ip-rule-compat" + // Install ingress/egress routes through uplink on host for Pods when working with + // delegated IPAM plugin. + InstallUplinkRoutesForDelegatedIPAM = "install-uplink-routes-for-delegated-ipam" + // EnableCustomCallsName is the name of the option to enable tail calls // for user-defined custom eBPF programs. EnableCustomCallsName = "enable-custom-calls" - // BGPAnnounceLBIP announces service IPs of type LoadBalancer via BGP - BGPAnnounceLBIP = "bgp-announce-lb-ip" - - // BGPAnnouncePodCIDR announces the node's pod CIDR via BGP - BGPAnnouncePodCIDR = "bgp-announce-pod-cidr" - - // BGPConfigPath is the file path to the BGP configuration. It is - // compatible with MetalLB's configuration. - BGPConfigPath = "bgp-config-path" + // BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from. + BGPSecretsNamespace = "bgp-secrets-namespace" // ExternalClusterIPName is the name of the option to enable // cluster external access to ClusterIP services. @@ -1101,6 +1047,9 @@ const ( // VLANBPFBypass instructs Cilium to bypass bpf logic for vlan tagged packets VLANBPFBypass = "vlan-bpf-bypass" + // DisableExternalIPMitigation disable ExternalIP mitigation (CVE-2020-8554) + DisableExternalIPMitigation = "disable-external-ip-mitigation" + // EnableICMPRules enables ICMP-based rule support for Cilium Network Policies. EnableICMPRules = "enable-icmp-rules" @@ -1138,14 +1087,8 @@ const ( // Flag to enable BGP control plane features EnableBGPControlPlane = "enable-bgp-control-plane" - // EnvoySecretsNamespace is the namespace having secrets used by CEC. - EnvoySecretsNamespace = "envoy-secrets-namespace" - - // IngressSecretsNamespace is the namespace having tls secrets used by CEC, originating from Ingress controller. - IngressSecretsNamespace = "ingress-secrets-namespace" - - // GatewayAPISecretsNamespace is the namespace having tls secrets used by CEC, originating from Gateway API. - GatewayAPISecretsNamespace = "gateway-api-secrets-namespace" + // EnableBGPControlPlaneStatusReport enables BGP Control Plane CRD status reporting + EnableBGPControlPlaneStatusReport = "enable-bgp-control-plane-status-report" // EnableRuntimeDeviceDetection is the name of the option to enable detection // of new and removed datapath devices during the agent runtime. @@ -1159,16 +1102,51 @@ const ( // and the max size and TTL of events in the buffers should be. BPFMapEventBuffers = "bpf-map-event-buffers" - // EnableStaleCiliumEndpointCleanup sets whether Cilium should perform cleanup of - // stale CiliumEndpoints during init. - EnableStaleCiliumEndpointCleanup = "enable-stale-cilium-endpoint-cleanup" - // IPAMCiliumnodeUpdateRate is the maximum rate at which the CiliumNode custom // resource is updated. IPAMCiliumNodeUpdateRate = "ipam-cilium-node-update-rate" // EnableK8sNetworkPolicy enables support for K8s NetworkPolicy. EnableK8sNetworkPolicy = "enable-k8s-networkpolicy" + + // EnableCiliumNetworkPolicy enables support for Cilium Network Policy. + EnableCiliumNetworkPolicy = "enable-cilium-network-policy" + + // EnableCiliumClusterwideNetworkPolicy enables support for Cilium Clusterwide + // Network Policy. + EnableCiliumClusterwideNetworkPolicy = "enable-cilium-clusterwide-network-policy" + + // PolicyCIDRMatchMode defines the entities that CIDR selectors can reach + PolicyCIDRMatchMode = "policy-cidr-match-mode" + + // EnableNodeSelectorLabels enables use of the node label based identity + EnableNodeSelectorLabels = "enable-node-selector-labels" + + // NodeLabels is the list of label prefixes used to determine identity of a node (requires enabling of + // EnableNodeSelectorLabels) + NodeLabels = "node-labels" + + // BPFEventsDropEnabled defines the DropNotification setting for any endpoint + BPFEventsDropEnabled = "bpf-events-drop-enabled" + + // BPFEventsPolicyVerdictEnabled defines the PolicyVerdictNotification setting for any endpoint + BPFEventsPolicyVerdictEnabled = "bpf-events-policy-verdict-enabled" + + // BPFEventsTraceEnabled defines the TraceNotification setting for any endpoint + BPFEventsTraceEnabled = "bpf-events-trace-enabled" + + // BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled + BPFConntrackAccounting = "bpf-conntrack-accounting" + + // EnableInternalTrafficPolicy enables handling routing for services with internalTrafficPolicy configured + EnableInternalTrafficPolicy = "enable-internal-traffic-policy" + + // EnableNonDefaultDenyPolicies allows policies to define whether they are operating in default-deny mode + EnableNonDefaultDenyPolicies = "enable-non-default-deny-policies" + + // EnableEndpointLockdownOnPolicyOverflow enables endpoint lockdown when an endpoint's + // policy map overflows. + EnableEndpointLockdownOnPolicyOverflow = "enable-endpoint-lockdown-on-policy-overflow" ) // Default string arguments @@ -1180,18 +1158,6 @@ var ( MonitorAggregationFlagsDefault = []string{"syn", "fin", "rst"} ) -// Available option for DaemonConfig.Tunnel -const ( - // TunnelVXLAN specifies VXLAN encapsulation - TunnelVXLAN = "vxlan" - - // TunnelGeneve specifies Geneve encapsulation - TunnelGeneve = "geneve" - - // TunnelDisabled specifies to disable encapsulation - TunnelDisabled = "disabled" -) - // Available options for DaemonConfig.RoutingMode const ( // RoutingModeNative specifies native routing mode @@ -1201,43 +1167,10 @@ const ( RoutingModeTunnel = "tunnel" ) -// Envoy option names const ( - // HTTPNormalizePath switches on Envoy HTTP path normalization options, which currently - // includes RFC 3986 path normalization, Envoy merge slashes option, and unescaping and - // redirecting for paths that contain escaped slashes. These are necessary to keep path based - // access control functional, and should not interfere with normal operation. Set this to - // false only with caution. - HTTPNormalizePath = "http-normalize-path" - // HTTP403Message specifies the response body for 403 responses, defaults to "Access denied" HTTP403Message = "http-403-msg" - // HTTPRequestTimeout specifies the time in seconds after which forwarded requests time out - HTTPRequestTimeout = "http-request-timeout" - - // HTTPIdleTimeout spcifies the time in seconds if http stream being idle after which the - // request times out - HTTPIdleTimeout = "http-idle-timeout" - - // HTTPMaxGRPCTimeout specifies the maximum time in seconds that limits the values of - // "grpc-timeout" headers being honored. - HTTPMaxGRPCTimeout = "http-max-grpc-timeout" - - // HTTPRetryCount specifies the number of retries performed after a forwarded request fails - HTTPRetryCount = "http-retry-count" - - // HTTPRetryTimeout is the time in seconds before an uncompleted request is retried. - HTTPRetryTimeout = "http-retry-timeout" - - // ProxyConnectTimeout specifies the time in seconds after which a TCP connection attempt - // is considered timed out - ProxyConnectTimeout = "proxy-connect-timeout" - - // ProxyGID specifies the group ID that has access to unix domain sockets opened by Cilium - // agent for proxy configuration and access logging. - ProxyGID = "proxy-gid" - // ReadCNIConfiguration reads the CNI configuration file and extracts // Cilium relevant information. This can be used to pass per node // configuration to Cilium. @@ -1252,6 +1185,9 @@ const ( // CNIExclusive tells the agent to remove other CNI configuration files CNIExclusive = "cni-exclusive" + // CNIExternalRouting delegates endpoint routing to the chained CNI plugin. + CNIExternalRouting = "cni-external-routing" + // CNILogFile is the path to a log file (on the host) for the CNI plugin // binary to use for logging. CNILogFile = "cni-log-file" @@ -1261,6 +1197,9 @@ const ( // EnableExternalWorkloads enables the support for external workloads. EnableExternalWorkloads = "enable-external-workloads" + + // EnableSourceIPVerification enables the source ip verification, defaults to true + EnableSourceIPVerification = "enable-source-ip-verification" ) const ( @@ -1294,12 +1233,6 @@ const ( // DSR dispatch mode to encapsulate to Geneve DSRDispatchGeneve = "geneve" - // DSR L4 translation to frontend port - DSRL4XlateFrontend = "frontend" - - // DSR L4 translation to backend port - DSRL4XlateBackend = "backend" - // NodePortAccelerationDisabled means we do not accelerate NodePort via XDP NodePortAccelerationDisabled = XDPModeDisabled @@ -1309,22 +1242,15 @@ const ( // NodePortAccelerationNative means we accelerate NodePort via native XDP in the driver (preferred) NodePortAccelerationNative = XDPModeNative - // KubeProxyReplacementPartial specifies to enable only selected kube-proxy - // replacement features (might panic) - KubeProxyReplacementPartial = "partial" - - // KubeProxyReplacementStrict specifies to enable all kube-proxy replacement - // features (might panic) - KubeProxyReplacementStrict = "strict" - - // KubeProxyReplacementDisabled specified to completely disable kube-proxy - // replacement - KubeProxyReplacementDisabled = "disabled" + // NodePortAccelerationBestEffort means we accelerate NodePort via native XDP in the driver (preferred), but will skip devices without driver support + NodePortAccelerationBestEffort = XDPModeBestEffort - // KubeProxyReplacementTrue has the same meaning as previous "strict". + // KubeProxyReplacementTrue specifies to enable all kube-proxy replacement + // features (might panic). KubeProxyReplacementTrue = "true" - // KubeProxyReplacementTrue has the same meaning as previous "partial". + // KubeProxyReplacementFalse specifies to enable only selected kube-proxy + // replacement features (might panic). KubeProxyReplacementFalse = "false" // KubeProxyReplacement healthz server bind address @@ -1337,11 +1263,6 @@ const ( PprofPortAgent = 6060 ) -// GetTunnelModes returns the list of all tunnel modes -func GetTunnelModes() string { - return fmt.Sprintf("%s, %s, %s", TunnelVXLAN, TunnelGeneve, TunnelDisabled) -} - // getEnvName returns the environment variable to be used for the given option name. func getEnvName(option string) string { under := strings.Replace(option, "-", "_", -1) @@ -1373,7 +1294,7 @@ func BindEnvWithLegacyEnvFallback(vp *viper.Viper, optName, legacyEnvName string // LogRegisteredOptions logs all options that where bound to viper. func LogRegisteredOptions(vp *viper.Viper, entry *logrus.Entry) { keys := vp.AllKeys() - sort.Strings(keys) + slices.Sort(keys) for _, k := range keys { ss := vp.GetStringSlice(k) if len(ss) == 0 { @@ -1393,42 +1314,37 @@ func LogRegisteredOptions(vp *viper.Viper, entry *logrus.Entry) { // DaemonConfig is the configuration used by Daemon. type DaemonConfig struct { - CreationTime time.Time - BpfDir string // BPF template files directory - LibDir string // Cilium library files directory - RunDir string // Cilium runtime directory - ExternalEnvoyProxy bool // Whether Envoy is deployed as external DaemonSet or not - devicesMu lock.RWMutex // Protects devices - devices []string // bpf_host device - DirectRoutingDevice string // Direct routing device (used by BPF NodePort and BPF Host Routing) - LBDevInheritIPAddr string // Device which IP addr used by bpf_host devices - EnableXDPPrefilter bool // Enable XDP-based prefiltering - XDPMode string // XDP mode, values: { xdpdrv | xdpgeneric | none } - HostV4Addr net.IP // Host v4 address of the snooping device - HostV6Addr net.IP // Host v6 address of the snooping device - EncryptInterface []string // Set of network facing interface to encrypt over - EncryptNode bool // Set to true for encrypting node IP traffic + // Private sum of the config written to file. Used to check that the config is not changed + // after. + shaSum [32]byte + + CreationTime time.Time + BpfDir string // BPF template files directory + LibDir string // Cilium library files directory + RunDir string // Cilium runtime directory + ExternalEnvoyProxy bool // Whether Envoy is deployed as external DaemonSet or not + LBDevInheritIPAddr string // Device which IP addr used by bpf_host devices + EnableXDPPrefilter bool // Enable XDP-based prefiltering + XDPMode string // XDP mode, values: { xdpdrv | xdpgeneric | none } + EnableTCX bool // Enable attaching endpoint programs using tcx if the kernel supports it + HostV4Addr net.IP // Host v4 address of the snooping device + HostV6Addr net.IP // Host v6 address of the snooping device + EncryptInterface []string // Set of network facing interface to encrypt over + EncryptNode bool // Set to true for encrypting node IP traffic // If set to true the daemon will detect new and deleted datapath devices // at runtime and reconfigure the datapath to load programs onto the new // devices. EnableRuntimeDeviceDetection bool - DatapathMode string // Datapath mode - Tunnel string // Tunnel mode - RoutingMode string // Routing mode - TunnelProtocol string // Tunneling protocol - TunnelPort int // Tunnel port + DatapathMode string // Datapath mode + RoutingMode string // Routing mode DryMode bool // Do not create BPF maps, devices, .. // RestoreState enables restoring the state from previous running daemons. RestoreState bool - // EnableHostIPRestore enables restoring the host IPs based on state - // left behind by previous Cilium runs. - EnableHostIPRestore bool - KeepConfig bool // Keep configuration of existing endpoints when starting up. // AllowLocalhost defines when to allows the local stack to local endpoints @@ -1441,9 +1357,6 @@ type DaemonConfig struct { // Options changeable at runtime Opts *IntOptions - // Mutex for serializing configuration updates to the daemon. - ConfigPatchMutex lock.RWMutex - // Monitor contains the configuration for the node monitor. Monitor *models.MonitorStatus @@ -1519,6 +1432,10 @@ type DaemonConfig struct { CTMapEntriesTimeoutSYN time.Duration CTMapEntriesTimeoutFIN time.Duration + // MaxInternalTimerDelay sets a maximum on all periodic timers in + // the agent in order to flush out timer-related bugs in the agent. + MaxInternalTimerDelay time.Duration + // MonitorAggregationInterval configures the interval between monitor // messages when monitor aggregation is enabled. MonitorAggregationInterval time.Duration @@ -1528,6 +1445,24 @@ type DaemonConfig struct { // is enabled. Network byte-order. MonitorAggregationFlags uint16 + // BPFEventsDefaultRateLimit specifies limit of messages per second that can be written to + // BPF events map. This limit is defined for all types of events except dbg and pcap. + // The number of messages is averaged, meaning that if no messages were written + // to the map over 5 seconds, it's possible to write more events than the value of rate limit + // in the 6th second. + // + // If BPFEventsDefaultRateLimit > 0, non-zero value for BPFEventsDefaultBurstLimit must also be provided + // lest the configuration is considered invalid. + BPFEventsDefaultRateLimit uint32 + + // BPFEventsDefaultBurstLimit specifies the maximum number of messages that can be written + // to BPF events map in 1 second. This limit is defined for all types of events except dbg and pcap. + // + // If BPFEventsDefaultBurstLimit > 0, non-zero value for BPFEventsDefaultRateLimit must also be provided + // lest the configuration is considered invalid. + // If both burst and rate limit are 0 or not specified, no limit is imposed. + BPFEventsDefaultBurstLimit uint32 + // BPFMapsDynamicSizeRatio is ratio of total system memory to use for // dynamic sizing of the CT, NAT, Neighbor and SockRevNAT BPF maps. BPFMapsDynamicSizeRatio float64 @@ -1547,6 +1482,10 @@ type DaemonConfig struct { // endpoint may allow traffic to exchange traffic with. PolicyMapEntries int + // PolicyMapFullReconciliationInterval is the interval at which to perform + // the full reconciliation of the endpoint policy map. + PolicyMapFullReconciliationInterval time.Duration + // SockRevNatEntries is the maximum number of sock rev nat mappings // allowed in the BPF rev nat table SockRevNatEntries int @@ -1558,81 +1497,12 @@ type DaemonConfig struct { // RunInterval. Zero means unlimited. MaxControllerInterval int - // UseSingleClusterRoute specifies whether to use a single cluster route - // instead of per-node routes. - UseSingleClusterRoute bool - - // HTTPNormalizePath switches on Envoy HTTP path normalization options, which currently - // includes RFC 3986 path normalization, Envoy merge slashes option, and unescaping and - // redirecting for paths that contain escaped slashes. These are necessary to keep path based - // access control functional, and should not interfere with normal operation. Set this to - // false only with caution. - HTTPNormalizePath bool - // HTTP403Message is the error message to return when a HTTP 403 is returned // by the proxy, if L7 policy is configured. HTTP403Message string - // HTTPRequestTimeout is the time in seconds after which Envoy responds with an - // error code on a request that has not yet completed. This needs to be longer - // than the HTTPIdleTimeout - HTTPRequestTimeout int - - // HTTPIdleTimeout is the time in seconds of a HTTP stream having no traffic after - // which Envoy responds with an error code. This needs to be shorter than the - // HTTPRequestTimeout - HTTPIdleTimeout int - - // HTTPMaxGRPCTimeout is the upper limit to which "grpc-timeout" headers in GRPC - // requests are honored by Envoy. If 0 there is no limit. GRPC requests are not - // bound by the HTTPRequestTimeout, but ARE affected by the idle timeout! - HTTPMaxGRPCTimeout int - - // HTTPRetryCount is the upper limit on how many times Envoy retries failed requests. - HTTPRetryCount int - - // HTTPRetryTimeout is the time in seconds before an uncompleted request is retried. - HTTPRetryTimeout int - - // ProxyConnectTimeout is the time in seconds after which Envoy considers a TCP - // connection attempt to have timed out. - ProxyConnectTimeout int - - // ProxyGID specifies the group ID that has access to unix domain sockets opened by Cilium - // agent for proxy configuration and access logging. - ProxyGID int - - // ProxyPrometheusPort specifies the port to serve Envoy metrics on. - ProxyPrometheusPort int - - // ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for the proxy - ProxyMaxRequestsPerConnection int - - // ProxyMaxConnectionDuration specifies the max_connection_duration setting for the proxy - ProxyMaxConnectionDuration time.Duration - - // ProxyIdleTimeout specifies the idle_timeout setting (in seconds), which applies - // for the connection from proxy to upstream cluster - ProxyIdleTimeout time.Duration - - // EnvoyLogPath specifies where to store the Envoy proxy logs when Envoy - // runs in the same container as Cilium. - EnvoyLogPath string - ProcFs string - // PrependIptablesChains is the name of the option to enable prepending - // iptables chains instead of appending - PrependIptablesChains bool - - // IPTablesLockTimeout defines the "-w" iptables option when the - // iptables CLI is directly invoked from the Cilium agent. - IPTablesLockTimeout time.Duration - - // IPTablesRandomFully defines the "--random-fully" iptables option when the - // iptables CLI is directly invoked from the Cilium agent. - IPTablesRandomFully bool - // K8sNamespace is the name of the namespace in which Cilium is // deployed in when running in Kubernetes mode K8sNamespace string @@ -1659,12 +1529,6 @@ type DaemonConfig struct { // EnableIPv6NDP is true when NDP is enabled for IPv6 EnableIPv6NDP bool - // EnableIPv6BIGTCP enables IPv6 BIG TCP (larger GSO/GRO limits) for the node including pods. - EnableIPv6BIGTCP bool - - // EnableIPv4BIGTCP enables IPv4 BIG TCP (larger GSO/GRO limits) for the node including pods. - EnableIPv4BIGTCP bool - // EnableSRv6 is true when SRv6 encapsulation support is enabled EnableSRv6 bool @@ -1694,11 +1558,31 @@ type DaemonConfig struct { // be necessary on key rotations. EnableIPsecKeyWatcher bool + // EnableIPSecXfrmStateCaching enables IPSec XfrmState caching. + EnableIPSecXfrmStateCaching bool + + // EnableIPSecEncryptedOverlay enables IPSec encryption for overlay traffic. + EnableIPSecEncryptedOverlay bool + + // BootIDFile is the file containing the boot ID of the node + BootIDFile string + // EnableWireguard enables Wireguard encryption EnableWireguard bool - // EnableWireguardUserspaceFallback enables the fallback to the userspace implementation - EnableWireguardUserspaceFallback bool + // EnableEncryptionStrictMode enables strict mode for encryption + EnableEncryptionStrictMode bool + + // EncryptionStrictModeCIDR is the CIDR to use for strict mode + EncryptionStrictModeCIDR netip.Prefix + + // EncryptionStrictModeAllowRemoteNodeIdentities allows dynamic lookup of node identities. + // This is required when tunneling is used + // or direct routing is used and the node CIDR and pod CIDR overlap. + EncryptionStrictModeAllowRemoteNodeIdentities bool + + // WireguardPersistentKeepalive controls Wireguard PersistentKeepalive option. + WireguardPersistentKeepalive time.Duration // EnableL2Announcements enables L2 announcement of service IPs EnableL2Announcements bool @@ -1736,11 +1620,13 @@ type DaemonConfig struct { EnableSocketLBPeer bool EnablePolicy string EnableTracing bool + EnableIPIPTermination bool EnableUnreachableRoutes bool - EnvoyLog string - DisableEnvoyVersionCheck bool FixedIdentityMapping map[string]string FixedIdentityMappingValidator func(val string) (string, error) `json:"-"` + FixedZoneMapping map[string]uint8 + ReverseFixedZoneMapping map[uint8]string + FixedZoneMappingValidator func(val string) (string, error) `json:"-"` IPv4Range string IPv6Range string IPv4ServiceRange string @@ -1754,34 +1640,30 @@ type DaemonConfig struct { Labels []string LogDriver []string LogOpt map[string]string - Logstash bool LogSystemLoadConfig bool // Masquerade specifies whether or not to masquerade packets from endpoints // leaving the host. - EnableIPv4Masquerade bool - EnableIPv6Masquerade bool - EnableBPFMasquerade bool - DeriveMasqIPAddrFromDevice string - EnableBPFClockProbe bool - EnableIPMasqAgent bool - EnableIPv4EgressGateway bool - EnableEnvoyConfig bool - EnableIngressController bool - EnableGatewayAPI bool - EnvoyConfigTimeout time.Duration - IPMasqAgentConfigPath string - InstallIptRules bool - MonitorAggregation string - PreAllocateMaps bool - IPv6NodeAddr string - IPv4NodeAddr string - SidecarIstioProxyImage string - SocketPath string - TracePayloadlen int - Version string - PrometheusServeAddr string - ToFQDNsMinTTL int + EnableIPv4Masquerade bool + EnableIPv6Masquerade bool + EnableBPFMasquerade bool + EnableMasqueradeRouteSource bool + EnableIPMasqAgent bool + IPMasqAgentConfigPath string + + EnableBPFClockProbe bool + EnableIPv4EgressGateway bool + EnableEnvoyConfig bool + InstallIptRules bool + MonitorAggregation string + PreAllocateMaps bool + IPv6NodeAddr string + IPv4NodeAddr string + SocketPath string + TracePayloadlen int + Version string + PrometheusServeAddr string + ToFQDNsMinTTL int // DNSMaxIPsPerRestoredRule defines the maximum number of IPs to maintain // for each FQDN selector in endpoint's restored DNS rules @@ -1838,17 +1720,24 @@ type DaemonConfig struct { // been reached. DNSProxyConcurrencyProcessingGracePeriod time.Duration + // DNSProxyEnableTransparentMode enables transparent mode for the DNS proxy. + DNSProxyEnableTransparentMode bool + + // DNSProxyInsecureSkipTransparentModeCheck is a hidden flag that allows users + // to disable transparent mode even if IPSec is enabled + DNSProxyInsecureSkipTransparentModeCheck bool + // DNSProxyLockCount is the array size containing mutexes which protect - // against parallel handling of DNS response IPs. + // against parallel handling of DNS response names. DNSProxyLockCount int // DNSProxyLockTimeout is timeout when acquiring the locks controlled by // DNSProxyLockCount. DNSProxyLockTimeout time.Duration - // EnableXTSocketFallback allows disabling of kernel's ip_early_demux - // sysctl option if `xt_socket` kernel module is not available. - EnableXTSocketFallback bool + // DNSProxySocketLingerTimeout defines how many seconds we wait for the connection + // between the DNS proxy and the upstream server to be closed. + DNSProxySocketLingerTimeout int // EnableBPFTProxy enables implementing proxy redirection via BPF // mechanisms rather than iptables rules. @@ -1858,6 +1747,10 @@ type DaemonConfig struct { // other nodes when available EnableAutoDirectRouting bool + // DirectRoutingSkipUnreachable skips installation of direct routes + // to nodes when they're not on the same L2 + DirectRoutingSkipUnreachable bool + // EnableLocalNodeRoute controls installation of the route which points // the allocation prefix of the local node. EnableLocalNodeRoute bool @@ -1874,6 +1767,15 @@ type DaemonConfig struct { // cilium EnableHealthCheckNodePort bool + // EnableHealthCheckLoadBalancerIP enables health checking of LoadBalancerIP + // by cilium + EnableHealthCheckLoadBalancerIP bool + + // HealthCheckICMPFailureThreshold is the number of ICMP packets sent for each health + // checking run. If at least an ICMP response is received, the node or endpoint + // is marked as healthy. + HealthCheckICMPFailureThreshold int + // KVstoreKeepAliveInterval is the interval in which the lease is being // renewed. This must be set to a value lesser than the LeaseTTL ideally // by a factor of 3. @@ -1884,7 +1786,7 @@ type DaemonConfig struct { // KVstoreMaxConsecutiveQuorumErrors is the maximum number of acceptable // kvstore consecutive quorum errors before the agent assumes permanent failure - KVstoreMaxConsecutiveQuorumErrors int + KVstoreMaxConsecutiveQuorumErrors uint // KVstorePeriodicSync is the time interval in which periodic // synchronization with the kvstore occurs @@ -1893,8 +1795,9 @@ type DaemonConfig struct { // KVstoreConnectivityTimeout is the timeout when performing kvstore operations KVstoreConnectivityTimeout time.Duration - // IPAllocationTimeout is the timeout when allocating CIDRs - IPAllocationTimeout time.Duration + // KVstorePodNetworkSupport enables the support for running the Cilium KVstore + // in pod network. + KVstorePodNetworkSupport bool // IdentityChangeGracePeriod is the grace period that needs to pass // before an endpoint that has changed its identity will start using @@ -1908,12 +1811,10 @@ type DaemonConfig struct { // unused after this time, they will be removed from the IP cache. Any of the restored // identities that are used in network policies will remain in the IP cache until all such // policies are removed. + // + // The default is 30 seconds for k8s clusters, and 10 minutes for kvstore clusters IdentityRestoreGracePeriod time.Duration - // PolicyQueueSize is the size of the queues for the policy repository. - // A larger queue means that more events related to policy can be buffered. - PolicyQueueSize int - // EndpointQueueSize is the size of the EventQueue per-endpoint. A larger // queue means that more events can be buffered per-endpoint. This is useful // in the case where a cluster might be under high load for endpoint-related @@ -1924,11 +1825,9 @@ type DaemonConfig struct { // interval ConntrackGCInterval time.Duration - // K8sEventHandover enables use of the kvstore to optimize Kubernetes - // event handling by listening for k8s events in the operator and - // mirroring it into the kvstore for reduced overhead in large - // clusters. - K8sEventHandover bool + // ConntrackGCMaxInterval if set limits the automatic GC interval calculation to + // the specified maximum value. + ConntrackGCMaxInterval time.Duration // LoopbackIPv4 is the address to use for service loopback SNAT LoopbackIPv4 string @@ -1967,19 +1866,22 @@ type DaemonConfig struct { // ("snat", "dsr" or "hybrid") NodePortMode string + // LoadBalancerModeAnnotation tells whether controller should check service + // level annotation for configuring bpf load balancing algorithm. + LoadBalancerModeAnnotation bool + // NodePortAlg indicates which backend selection algorithm is used // ("random" or "maglev") NodePortAlg string + // LoadBalancerAlgorithmAnnotation tells whether controller should check service + // level annotation for configuring bpf load balancing algorithm. + LoadBalancerAlgorithmAnnotation bool + // LoadBalancerDSRDispatch indicates the method for pushing packets to // backends under DSR ("opt" or "ipip") LoadBalancerDSRDispatch string - // LoadBalancerDSRL4Xlate indicates the method for L4 DNAT translation - // under IPIP dispatch, that is, whether the inner packet will be - // translated to the frontend or backend port. - LoadBalancerDSRL4Xlate string - // LoadBalancerRSSv4CIDR defines the outer source IPv4 prefix for DSR/IPIP LoadBalancerRSSv4CIDR string LoadBalancerRSSv4 net.IPNet @@ -1988,18 +1890,19 @@ type DaemonConfig struct { LoadBalancerRSSv6CIDR string LoadBalancerRSSv6 net.IPNet + // LoadBalancerExternalControlPlane tells whether to not use kube-apiserver as + // its control plane in lb-only mode. + LoadBalancerExternalControlPlane bool + + // LoadBalancerProtocolDifferentiation enables support for service protocol differentiation (TCP, UDP, SCTP) + LoadBalancerProtocolDifferentiation bool + // EnablePMTUDiscovery indicates whether to send ICMP fragmentation-needed // replies to the client (when needed). EnablePMTUDiscovery bool - // Maglev backend table size (M) per service. Must be prime number. - MaglevTableSize int - - // MaglevHashSeed contains the cluster-wide seed for the hash(es). - MaglevHashSeed string - // NodePortAcceleration indicates whether NodePort should be accelerated - // via XDP ("none", "generic" or "native") + // via XDP ("none", "generic", "native", or "best-effort") NodePortAcceleration string // NodePortBindProtection rejects bind requests to NodePort service ports @@ -2018,15 +1921,6 @@ type DaemonConfig struct { // considered local ones with HOST_ID in the ipcache AddressScopeMax int - // EnableBandwidthManager enables EDT-based pacing - EnableBandwidthManager bool - - // EnableBBR enables BBR TCP congestion control for the node including Pods - EnableBBR bool - - // ResetQueueMapping resets the Pod's skb queue mapping - ResetQueueMapping bool - // EnableRecorder enables the datapath pcap recorder EnableRecorder bool @@ -2048,10 +1942,6 @@ type DaemonConfig struct { // EnableLocalRedirectPolicy enables redirect policies to redirect traffic within nodes EnableLocalRedirectPolicy bool - // K8sEnableEndpointSlice enables k8s endpoint slice feature that is used - // in kubernetes. - K8sEnableK8sEndpointSlice bool - // NodePortMin is the minimum port address for the NodePort range NodePortMin int @@ -2061,8 +1951,6 @@ type DaemonConfig struct { // EnableSessionAffinity enables a support for service sessionAffinity EnableSessionAffinity bool - EnableServiceTopology bool - // Selection of BPF main clock source (ktime vs jiffies) ClockSource BPFClockSource @@ -2094,20 +1982,25 @@ type DaemonConfig struct { // IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool IPAMMultiPoolPreAllocation map[string]string - + // IPAMDefaultIPPool the default IP Pool when using multi-pool + IPAMDefaultIPPool string // AutoCreateCiliumNodeResource enables automatic creation of a // CiliumNode resource for the local node AutoCreateCiliumNodeResource bool + // ExcludeNodeLabelPatterns allows for excluding unnecessary labels from being propagated from k8s node to cilium + // node object. This allows for avoiding unnecessary events being broadcast to all nodes in the cluster. + ExcludeNodeLabelPatterns []*regexp.Regexp + // IPv4NativeRoutingCIDR describes a CIDR in which pod IPs are routable IPv4NativeRoutingCIDR *cidr.CIDR // IPv6NativeRoutingCIDR describes a CIDR in which pod IPs are routable IPv6NativeRoutingCIDR *cidr.CIDR - // EgressMasqueradeInterfaces is the selector used to select interfaces - // subject to egress masquerading - EgressMasqueradeInterfaces string + // MasqueradeInterfaces is the selector used to select interfaces subject + // to egress masquerading. + MasqueradeInterfaces []string // PolicyTriggerInterval is the amount of time between when policy updates // are triggered. @@ -2117,10 +2010,6 @@ type DaemonConfig struct { // allocation IdentityAllocationMode string - // DisableCNPStatusUpdates disables updating of CNP NodeStatus in the CNP - // CRD. - DisableCNPStatusUpdates bool - // AllowICMPFragNeeded allows ICMP Fragmentation Needed type packets in // the network policy for cilium-agent. AllowICMPFragNeeded bool @@ -2130,9 +2019,6 @@ type DaemonConfig struct { // control plane, e.g. when using the managed etcd feature EnableWellKnownIdentities bool - // EnableRemoteNodeIdentity enables use of the remote-node identity - EnableRemoteNodeIdentity bool - // Azure options // PolicyAuditMode enables non-drop mode for installed policies. In @@ -2140,89 +2026,8 @@ type DaemonConfig struct { // Policy related decisions can be checked via the poicy verdict messages. PolicyAuditMode bool - // EnableHubble specifies whether to enable the hubble server. - EnableHubble bool - - // HubbleSocketPath specifies the UNIX domain socket for Hubble server to listen to. - HubbleSocketPath string - - // HubbleListenAddress specifies address for Hubble to listen to. - HubbleListenAddress string - - // HubblePreferIpv6 controls whether IPv6 or IPv4 addresses should be preferred for - // communication to agents, if both are available. - HubblePreferIpv6 bool - - // HubbleTLSDisabled allows the Hubble server to run on the given listen - // address without TLS. - HubbleTLSDisabled bool - - // HubbleTLSCertFile specifies the path to the public key file for the - // Hubble server. The file must contain PEM encoded data. - HubbleTLSCertFile string - - // HubbleTLSKeyFile specifies the path to the private key file for the - // Hubble server. The file must contain PEM encoded data. - HubbleTLSKeyFile string - - // HubbleTLSClientCAFiles specifies the path to one or more client CA - // certificates to use for TLS with mutual authentication (mTLS). The files - // must contain PEM encoded data. - HubbleTLSClientCAFiles []string - - // HubbleEventBufferCapacity specifies the capacity of Hubble events buffer. - HubbleEventBufferCapacity int - - // HubbleEventQueueSize specifies the buffer size of the channel to receive monitor events. - HubbleEventQueueSize int - - // HubbleMetricsServer specifies the addresses to serve Hubble metrics on. - HubbleMetricsServer string - - // HubbleMetrics specifies enabled metrics and their configuration options. - HubbleMetrics []string - - // HubbleExportFilePath specifies the filepath to write Hubble events to. - // e.g. "/var/run/cilium/hubble/events.log" - HubbleExportFilePath string - - // HubbleExportFileMaxSizeMB specifies the file size in MB at which to rotate - // the Hubble export file. - HubbleExportFileMaxSizeMB int - - // HubbleExportFileMaxBacks specifies the number of rotated files to keep. - HubbleExportFileMaxBackups int - - // HubbleExportFileCompress specifies whether rotated files are compressed. - HubbleExportFileCompress bool - - // EnableHubbleRecorderAPI specifies if the Hubble Recorder API should be served - EnableHubbleRecorderAPI bool - - // EnableHubbleOpenMetrics enables exporting hubble metrics in OpenMetrics format. - EnableHubbleOpenMetrics bool - - // HubbleRecorderStoragePath specifies the directory in which pcap files - // created via the Hubble Recorder API are stored - HubbleRecorderStoragePath string - - // HubbleRecorderSinkQueueSize is the queue size for each recorder sink - HubbleRecorderSinkQueueSize int - - // HubbleSkipUnknownCGroupIDs specifies if events with unknown cgroup ids should be skipped - HubbleSkipUnknownCGroupIDs bool - - // HubbleMonitorEvents specifies Cilium monitor events for Hubble to observe. - // By default, Hubble observes all monitor events. - HubbleMonitorEvents []string - - // EndpointStatus enables population of information in the - // CiliumEndpoint.Status resource - EndpointStatus map[string]struct{} - - // DisableIptablesFeederRules specifies which chains will be excluded - // when installing the feeder rules - DisableIptablesFeederRules []string + // PolicyAccounting enable policy accounting + PolicyAccounting bool // EnableIPv4FragmentsTracking enables IPv4 fragments tracking for // L4-based lookups. Needs LRU map support. @@ -2270,22 +2075,16 @@ type DaemonConfig struct { // LBAffinityMapEntries is the maximum number of entries allowed in BPF lbmap for session affinities. LBAffinityMapEntries int + // LBSourceRangeAllTypes enables propagation of loadbalancerSourceRanges to all Kubernetes + // service types which were created from the LoadBalancer service. + LBSourceRangeAllTypes bool + // LBSourceRangeMapEntries is the maximum number of entries allowed in BPF lbmap for source ranges. LBSourceRangeMapEntries int // LBMaglevMapEntries is the maximum number of entries allowed in BPF lbmap for maglev. LBMaglevMapEntries int - // K8sServiceProxyName is the value of service.kubernetes.io/service-proxy-name label, - // that identifies the service objects Cilium should handle. - // If the provided value is an empty string, Cilium will manage service objects when - // the label is not present. For more details - - // https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/2447-Make-kube-proxy-service-abstraction-optional - K8sServiceProxyName string - - // APIRateLimitName enables configuration of the API rate limits - APIRateLimit map[string]string - // CRDWaitTimeout is the timeout in which Cilium will exit if CRDs are not // available. CRDWaitTimeout time.Duration @@ -2295,23 +2094,24 @@ type DaemonConfig struct { // Otherwise, it will use the old scheme. EgressMultiHomeIPRuleCompat bool + // Install ingress/egress routes through uplink on host for Pods when working with + // delegated IPAM plugin. + InstallUplinkRoutesForDelegatedIPAM bool + // InstallNoConntrackIptRules instructs Cilium to install Iptables rules to skip netfilter connection tracking on all pod traffic. InstallNoConntrackIptRules bool + // ContainerIPLocalReservedPorts instructs the Cilium CNI plugin to reserve + // the provided comma-separated list of ports in the container network namespace + ContainerIPLocalReservedPorts string + // EnableCustomCalls enables tail call hooks for user-defined custom // eBPF programs, typically used to collect custom per-endpoint // metrics. EnableCustomCalls bool - // BGPAnnounceLBIP announces service IPs of type LoadBalancer via BGP. - BGPAnnounceLBIP bool - - // BGPAnnouncePodCIDR announces the node's pod CIDR via BGP. - BGPAnnouncePodCIDR bool - - // BGPConfigPath is the file path to the BGP configuration. It is - // compatible with MetalLB's configuration. - BGPConfigPath string + // BGPSecretsNamespace is the Kubernetes namespace to get BGP control plane secrets from. + BGPSecretsNamespace string // ExternalClusterIP enables routing to ClusterIP services from outside // the cluster. This mirrors the behaviour of kube-proxy. @@ -2319,6 +2119,7 @@ type DaemonConfig struct { // ARPPingRefreshPeriod is the ARP entries refresher period. ARPPingRefreshPeriod time.Duration + // EnableCiliumEndpointSlice enables the cilium endpoint slicing feature. EnableCiliumEndpointSlice bool @@ -2327,6 +2128,10 @@ type DaemonConfig struct { // VLANBPFBypass list of explicitly allowed VLAN id's for bpf logic bypass VLANBPFBypass []int + + // DisableExternalIPMigration disable externalIP mitigation (CVE-2020-8554) + DisableExternalIPMitigation bool + // EnableL2NeighDiscovery determines if cilium should perform L2 neighbor // discovery. EnableL2NeighDiscovery bool @@ -2368,8 +2173,8 @@ type DaemonConfig struct { // Enables BGP control plane features. EnableBGPControlPlane bool - // EnvoySecretNamespaces for TLS secrets. Used by CiliumEnvoyConfig via SDS. - EnvoySecretNamespaces []string + // Enables BGP control plane status reporting. + EnableBGPControlPlaneStatusReport bool // BPFMapEventBuffers has configuration on what BPF map event buffers to enabled // and configuration options for those. @@ -2377,10 +2182,17 @@ type DaemonConfig struct { BPFMapEventBuffersValidator func(val string) (string, error) `json:"-"` bpfMapEventConfigs BPFEventBufferConfigs - // EnableStaleCiliumEndpointCleanup enables cleanup routine during Cilium init. - // This will attempt to remove local CiliumEndpoints that are not managed by Cilium - // following Endpoint restoration. - EnableStaleCiliumEndpointCleanup bool + // BPFEventsDropEnabled controls whether the Cilium datapath exposes "drop" events to Cilium monitor and Hubble. + BPFEventsDropEnabled bool + + // BPFEventsPolicyVerdictEnabled controls whether the Cilium datapath exposes "policy verdict" events to Cilium monitor and Hubble. + BPFEventsPolicyVerdictEnabled bool + + // BPFEventsTraceEnabled controls whether the Cilium datapath exposes "trace" events to Cilium monitor and Hubble. + BPFEventsTraceEnabled bool + + // BPFConntrackAccounting controls whether CT accounting for packets and bytes is enabled. + BPFConntrackAccounting bool // IPAMCiliumNodeUpdateRate is the maximum rate at which the CiliumNode custom // resource is updated. @@ -2388,107 +2200,123 @@ type DaemonConfig struct { // EnableK8sNetworkPolicy enables support for K8s NetworkPolicy. EnableK8sNetworkPolicy bool -} -var ( - // Config represents the daemon configuration - Config = &DaemonConfig{ - CreationTime: time.Now(), - Opts: NewIntOptions(&DaemonOptionLibrary), - Monitor: &models.MonitorStatus{Cpus: int64(runtime.NumCPU()), Npages: 64, Pagesize: int64(os.Getpagesize()), Lost: 0, Unknown: 0}, - IPv6ClusterAllocCIDR: defaults.IPv6ClusterAllocCIDR, - IPv6ClusterAllocCIDRBase: defaults.IPv6ClusterAllocCIDRBase, - EnableHostIPRestore: defaults.EnableHostIPRestore, - EnableHealthChecking: defaults.EnableHealthChecking, - EnableEndpointHealthChecking: defaults.EnableEndpointHealthChecking, - EnableHealthCheckNodePort: defaults.EnableHealthCheckNodePort, - EnableIPv4: defaults.EnableIPv4, - EnableIPv6: defaults.EnableIPv6, - EnableIPv6NDP: defaults.EnableIPv6NDP, - EnableSCTP: defaults.EnableSCTP, - EnableL7Proxy: defaults.EnableL7Proxy, - EndpointStatus: make(map[string]struct{}), - DNSMaxIPsPerRestoredRule: defaults.DNSMaxIPsPerRestoredRule, - ToFQDNsMaxIPsPerHost: defaults.ToFQDNsMaxIPsPerHost, - KVstorePeriodicSync: defaults.KVstorePeriodicSync, - KVstoreConnectivityTimeout: defaults.KVstoreConnectivityTimeout, - IPAllocationTimeout: defaults.IPAllocationTimeout, - IdentityChangeGracePeriod: defaults.IdentityChangeGracePeriod, - IdentityRestoreGracePeriod: defaults.IdentityRestoreGracePeriod, - FixedIdentityMapping: make(map[string]string), - KVStoreOpt: make(map[string]string), - LogOpt: make(map[string]string), - LoopbackIPv4: defaults.LoopbackIPv4, - EnableEndpointRoutes: defaults.EnableEndpointRoutes, - AnnotateK8sNode: defaults.AnnotateK8sNode, - K8sServiceCacheSize: defaults.K8sServiceCacheSize, - AutoCreateCiliumNodeResource: defaults.AutoCreateCiliumNodeResource, - IdentityAllocationMode: IdentityAllocationModeKVstore, - AllowICMPFragNeeded: defaults.AllowICMPFragNeeded, - EnableWellKnownIdentities: defaults.EnableWellKnownIdentities, - K8sEnableK8sEndpointSlice: defaults.K8sEnableEndpointSlice, - AllocatorListTimeout: defaults.AllocatorListTimeout, - EnableICMPRules: defaults.EnableICMPRules, - UseCiliumInternalIPForIPsec: defaults.UseCiliumInternalIPForIPsec, + // EnableCiliumNetworkPolicy enables support for Cilium Network Policy. + EnableCiliumNetworkPolicy bool - K8sEnableLeasesFallbackDiscovery: defaults.K8sEnableLeasesFallbackDiscovery, - APIRateLimit: make(map[string]string), + // EnableCiliumClusterwideNetworkPolicy enables support for Cilium Clusterwide + // Network Policy. + EnableCiliumClusterwideNetworkPolicy bool - ExternalClusterIP: defaults.ExternalClusterIP, - EnableVTEP: defaults.EnableVTEP, - EnableBGPControlPlane: defaults.EnableBGPControlPlane, - EnableK8sNetworkPolicy: defaults.EnableK8sNetworkPolicy, - } -) + // PolicyCIDRMatchMode is the list of entities that can be selected by CIDR policy. + // Currently supported values: + // - world + // - world, remote-node + PolicyCIDRMatchMode []string -// GetIPv4NativeRoutingCIDR returns the native routing CIDR if configured -func (c *DaemonConfig) GetIPv4NativeRoutingCIDR() (cidr *cidr.CIDR) { - c.ConfigPatchMutex.RLock() - cidr = c.IPv4NativeRoutingCIDR - c.ConfigPatchMutex.RUnlock() - return -} + // MaxConnectedClusters sets the maximum number of clusters that can be + // connected in a clustermesh. + // The value is used to determine the bit allocation for cluster ID and + // identity in a numeric identity. Values > 255 will decrease the number of + // allocatable identities. + MaxConnectedClusters uint32 -// SetIPv4NativeRoutingCIDR sets the native routing CIDR -func (c *DaemonConfig) SetIPv4NativeRoutingCIDR(cidr *cidr.CIDR) { - c.ConfigPatchMutex.Lock() - c.IPv4NativeRoutingCIDR = cidr - c.ConfigPatchMutex.Unlock() -} + // ForceDeviceRequired enforces the attachment of BPF programs on native device. + ForceDeviceRequired bool -// GetIPv6NativeRoutingCIDR returns the native routing CIDR if configured -func (c *DaemonConfig) GetIPv6NativeRoutingCIDR() (cidr *cidr.CIDR) { - c.ConfigPatchMutex.RLock() - cidr = c.IPv6NativeRoutingCIDR - c.ConfigPatchMutex.RUnlock() - return -} + // ServiceNoBackendResponse determines how we handle traffic to a service with no backends. + ServiceNoBackendResponse string -// SetIPv6NativeRoutingCIDR sets the native routing CIDR -func (c *DaemonConfig) SetIPv6NativeRoutingCIDR(cidr *cidr.CIDR) { - c.ConfigPatchMutex.Lock() - c.IPv6NativeRoutingCIDR = cidr - c.ConfigPatchMutex.Unlock() -} + // EnableNodeSelectorLabels enables use of the node label based identity + EnableNodeSelectorLabels bool -func (c *DaemonConfig) SetDevices(devices []string) { - c.devicesMu.Lock() - c.devices = devices - c.devicesMu.Unlock() -} + // NodeLabels is the list of label prefixes used to determine identity of a node (requires enabling of + // EnableNodeSelectorLabels) + NodeLabels []string -func (c *DaemonConfig) GetDevices() []string { - c.devicesMu.RLock() - defer c.devicesMu.RUnlock() - return c.devices -} + // EnableSocketLBPodConnectionTermination enables the termination of connections from pods + // to deleted service backends when socket-LB is enabled + EnableSocketLBPodConnectionTermination bool + + // EnableInternalTrafficPolicy enables handling routing for services with internalTrafficPolicy configured + EnableInternalTrafficPolicy bool + + // EnableNonDefaultDenyPolicies allows policies to define whether they are operating in default-deny mode + EnableNonDefaultDenyPolicies bool + + // EnableSourceIPVerification enables the source ip validation of connection from endpoints to endpoints + EnableSourceIPVerification bool -func (c *DaemonConfig) AppendDevice(dev string) { - c.devicesMu.Lock() - c.devices = append(c.devices, dev) - c.devicesMu.Unlock() + // EnableEndpointLockdownOnPolicyOverflow enables endpoint lockdown when an endpoint's + // policy map overflows. + EnableEndpointLockdownOnPolicyOverflow bool } +var ( + // Config represents the daemon configuration + Config = &DaemonConfig{ + CreationTime: time.Now(), + Opts: NewIntOptions(&DaemonOptionLibrary), + Monitor: &models.MonitorStatus{Cpus: int64(runtime.NumCPU()), Npages: 64, Pagesize: int64(os.Getpagesize()), Lost: 0, Unknown: 0}, + IPv6ClusterAllocCIDR: defaults.IPv6ClusterAllocCIDR, + IPv6ClusterAllocCIDRBase: defaults.IPv6ClusterAllocCIDRBase, + IPAMDefaultIPPool: defaults.IPAMDefaultIPPool, + EnableHealthChecking: defaults.EnableHealthChecking, + EnableEndpointHealthChecking: defaults.EnableEndpointHealthChecking, + EnableHealthCheckLoadBalancerIP: defaults.EnableHealthCheckLoadBalancerIP, + EnableHealthCheckNodePort: defaults.EnableHealthCheckNodePort, + HealthCheckICMPFailureThreshold: defaults.HealthCheckICMPFailureThreshold, + EnableIPv4: defaults.EnableIPv4, + EnableIPv6: defaults.EnableIPv6, + EnableIPv6NDP: defaults.EnableIPv6NDP, + EnableSCTP: defaults.EnableSCTP, + EnableL7Proxy: defaults.EnableL7Proxy, + DNSMaxIPsPerRestoredRule: defaults.DNSMaxIPsPerRestoredRule, + ToFQDNsMaxIPsPerHost: defaults.ToFQDNsMaxIPsPerHost, + KVstorePeriodicSync: defaults.KVstorePeriodicSync, + KVstoreConnectivityTimeout: defaults.KVstoreConnectivityTimeout, + KVstorePodNetworkSupport: defaults.KVstorePodNetworkSupport, + IdentityChangeGracePeriod: defaults.IdentityChangeGracePeriod, + IdentityRestoreGracePeriod: defaults.IdentityRestoreGracePeriodK8s, + FixedIdentityMapping: make(map[string]string), + KVStoreOpt: make(map[string]string), + LogOpt: make(map[string]string), + LoopbackIPv4: defaults.LoopbackIPv4, + EnableEndpointRoutes: defaults.EnableEndpointRoutes, + AnnotateK8sNode: defaults.AnnotateK8sNode, + K8sServiceCacheSize: defaults.K8sServiceCacheSize, + AutoCreateCiliumNodeResource: defaults.AutoCreateCiliumNodeResource, + IdentityAllocationMode: IdentityAllocationModeKVstore, + AllowICMPFragNeeded: defaults.AllowICMPFragNeeded, + EnableWellKnownIdentities: defaults.EnableWellKnownIdentities, + AllocatorListTimeout: defaults.AllocatorListTimeout, + EnableICMPRules: defaults.EnableICMPRules, + UseCiliumInternalIPForIPsec: defaults.UseCiliumInternalIPForIPsec, + + K8sEnableLeasesFallbackDiscovery: defaults.K8sEnableLeasesFallbackDiscovery, + + ExternalClusterIP: defaults.ExternalClusterIP, + EnableVTEP: defaults.EnableVTEP, + EnableBGPControlPlane: defaults.EnableBGPControlPlane, + EnableK8sNetworkPolicy: defaults.EnableK8sNetworkPolicy, + EnableCiliumNetworkPolicy: defaults.EnableCiliumNetworkPolicy, + EnableCiliumClusterwideNetworkPolicy: defaults.EnableCiliumClusterwideNetworkPolicy, + PolicyCIDRMatchMode: defaults.PolicyCIDRMatchMode, + MaxConnectedClusters: defaults.MaxConnectedClusters, + + BPFEventsDropEnabled: defaults.BPFEventsDropEnabled, + BPFEventsPolicyVerdictEnabled: defaults.BPFEventsPolicyVerdictEnabled, + BPFEventsTraceEnabled: defaults.BPFEventsTraceEnabled, + BPFConntrackAccounting: defaults.BPFConntrackAccounting, + EnableEnvoyConfig: defaults.EnableEnvoyConfig, + EnableInternalTrafficPolicy: defaults.EnableInternalTrafficPolicy, + + EnableNonDefaultDenyPolicies: defaults.EnableNonDefaultDenyPolicies, + + EnableSourceIPVerification: defaults.EnableSourceIPVerification, + } +) + // IsExcludedLocalAddress returns true if the specified IP matches one of the // excluded local IP ranges func (c *DaemonConfig) IsExcludedLocalAddress(ip net.IP) bool { @@ -2542,31 +2370,41 @@ func (c *DaemonConfig) TunnelingEnabled() bool { return c.RoutingMode != RoutingModeNative } -// TunnelDevice returns cilium_{vxlan,geneve} depending on the config or "" if disabled. -func (c *DaemonConfig) TunnelDevice() string { - if c.TunnelingEnabled() { - return fmt.Sprintf("cilium_%s", c.TunnelProtocol) - } else { - return "" - } -} - -// TunnelExists returns true if some traffic may go through a tunnel, including -// if the primary mode is native routing. For example, in the egress gateway, -// we may send such traffic to a gateway node via a tunnel. -// In conjunction with the DSR Geneve and the direct routing, traffic from -// intermediate nodes to backend pods go through a tunnel, but the datapath logic -// takes care of the MTU overhead. So no need to take it into account here. -// See encap_geneve_dsr_opt[4,6] in nodeport.h -func (c *DaemonConfig) TunnelExists() bool { - return c.TunnelingEnabled() || c.EnableIPv4EgressGateway || c.EnableHighScaleIPcache -} - // AreDevicesRequired returns true if the agent needs to attach to the native // devices to implement some features. func (c *DaemonConfig) AreDevicesRequired() bool { - return c.EnableNodePort || c.EnableHostFirewall || c.EnableBandwidthManager || - c.EnableWireguard || c.EnableHighScaleIPcache || c.EnableL2Announcements + return c.EnableNodePort || c.EnableHostFirewall || c.EnableWireguard || + c.EnableHighScaleIPcache || c.EnableL2Announcements || c.ForceDeviceRequired || + c.EnableIPSecEncryptedOverlay +} + +// NeedBPFHostOnWireGuardDevice returns true if the agent needs to attach +// a BPF program on the Ingress of Cilium's WireGuard device +func (c *DaemonConfig) NeedBPFHostOnWireGuardDevice() bool { + if !c.EnableWireguard { + return false + } + + // In native routing mode we want to deliver packets to local endpoints + // straight from BPF, without passing through the stack. + // This matches overlay mode (where bpf_overlay would handle the delivery) + // and native routing mode without encryption (where bpf_host at the native + // device would handle the delivery). + if !c.TunnelingEnabled() { + return true + } + + // When WG & encrypt-node are on, a NodePort BPF to-be forwarded request + // to a remote node running a selected service endpoint must be encrypted. + // To make the NodePort's rev-{S,D}NAT translations to happen for a reply + // from the remote node, we need to attach bpf_host to the Cilium's WG + // netdev (otherwise, the WG netdev after decrypting the reply will pass + // it to the stack which drops the packet). + if c.EnableNodePort && c.EncryptNode { + return true + } + + return false } // MasqueradingEnabled returns true if either IPv4 or IPv6 masquerading is enabled. @@ -2598,12 +2436,6 @@ func (c *DaemonConfig) NodeIpsetNeeded() bool { return !c.TunnelingEnabled() && c.IptablesMasqueradingEnabled() } -// RemoteNodeIdentitiesEnabled returns true if the remote-node identity feature -// is enabled -func (c *DaemonConfig) RemoteNodeIdentitiesEnabled() bool { - return c.EnableRemoteNodeIdentity -} - // NodeEncryptionEnabled returns true if node encryption is enabled func (c *DaemonConfig) NodeEncryptionEnabled() bool { return c.EncryptNode @@ -2624,6 +2456,11 @@ func (c *DaemonConfig) IPv6Enabled() bool { return c.EnableIPv6 } +// LBProtoDiffEnabled returns true if LoadBalancerProtocolDifferentiation is enabled +func (c *DaemonConfig) LBProtoDiffEnabled() bool { + return c.LoadBalancerProtocolDifferentiation +} + // IPv6NDPEnabled returns true if IPv6 NDP support is enabled func (c *DaemonConfig) IPv6NDPEnabled() bool { return c.EnableIPv6NDP @@ -2655,30 +2492,6 @@ func (c *DaemonConfig) UnreachableRoutesEnabled() bool { return c.EnableUnreachableRoutes } -// EndpointStatusIsEnabled returns true if a particular EndpointStatus* feature -// is enabled -func (c *DaemonConfig) EndpointStatusIsEnabled(option string) bool { - _, ok := c.EndpointStatus[option] - return ok -} - -// LocalClusterName returns the name of the cluster Cilium is deployed in -func (c *DaemonConfig) LocalClusterName() string { - return c.ClusterName -} - -// LocalClusterID returns the ID of the cluster local to the Cilium agent. -func (c *DaemonConfig) LocalClusterID() uint32 { - return c.ClusterID -} - -// K8sServiceProxyName returns the required value for the -// service.kubernetes.io/service-proxy-name label in order for services to be -// handled. -func (c *DaemonConfig) K8sServiceProxyNameValue() string { - return c.K8sServiceProxyName -} - // CiliumNamespaceName returns the name of the namespace in which Cilium is // deployed in func (c *DaemonConfig) CiliumNamespaceName() string { @@ -2700,14 +2513,32 @@ func (c *DaemonConfig) K8sNetworkPolicyEnabled() bool { return c.EnableK8sNetworkPolicy } -// K8sIngressControllerEnabled returns true if ingress controller feature is enabled in Cilium -func (c *DaemonConfig) K8sIngressControllerEnabled() bool { - return c.EnableIngressController +func (c *DaemonConfig) PolicyCIDRMatchesNodes() bool { + for _, mode := range c.PolicyCIDRMatchMode { + if mode == "nodes" { + return true + } + } + return false } -// K8sGatewayAPIEnabled returns true if Gateway API feature is enabled in Cilium -func (c *DaemonConfig) K8sGatewayAPIEnabled() bool { - return c.EnableGatewayAPI +// PerNodeLabelsEnabled returns true if per-node labels feature +// is enabled +func (c *DaemonConfig) PerNodeLabelsEnabled() bool { + return c.EnableNodeSelectorLabels +} + +func (c *DaemonConfig) validatePolicyCIDRMatchMode() error { + // Currently, the only acceptable values is "nodes". + for _, mode := range c.PolicyCIDRMatchMode { + switch mode { + case "nodes": + continue + default: + return fmt.Errorf("unknown CIDR match mode: %s", mode) + } + } + return nil } // DirectRoutingDeviceRequired return whether the Direct Routing Device is needed under @@ -2722,7 +2553,19 @@ func (c *DaemonConfig) DirectRoutingDeviceRequired() bool { return true } - return (c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard) && !c.TunnelingEnabled() + return c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard +} + +func (c *DaemonConfig) LoadBalancerUsesDSR() bool { + return c.NodePortMode == NodePortModeDSR || + c.NodePortMode == NodePortModeHybrid || + c.LoadBalancerModeAnnotation +} + +// KVstoreEnabledWithoutPodNetworkSupport returns whether Cilium is configured to connect +// to an external KVStore, and the support for running it in pod network is disabled. +func (c *DaemonConfig) KVstoreEnabledWithoutPodNetworkSupport() bool { + return c.KVStore != "" && !c.KVstorePodNetworkSupport } func (c *DaemonConfig) validateIPv6ClusterAllocCIDR() error { @@ -2753,15 +2596,27 @@ func (c *DaemonConfig) validateIPv6NAT46x64CIDR() error { return nil } +func (c *DaemonConfig) validateContainerIPLocalReservedPorts() error { + if c.ContainerIPLocalReservedPorts == "" || c.ContainerIPLocalReservedPorts == defaults.ContainerIPLocalReservedPortsAuto { + return nil + } + + if regexp.MustCompile(`^(\d+(-\d+)?)(,\d+(-\d+)?)*$`).MatchString(c.ContainerIPLocalReservedPorts) { + return nil + } + + return fmt.Errorf("Invalid comma separated list of of ranges for %s option", ContainerIPLocalReservedPorts) +} + // Validate validates the daemon configuration func (c *DaemonConfig) Validate(vp *viper.Viper) error { if err := c.validateIPv6ClusterAllocCIDR(); err != nil { - return fmt.Errorf("unable to parse CIDR value '%s' of option --%s: %s", + return fmt.Errorf("unable to parse CIDR value '%s' of option --%s: %w", c.IPv6ClusterAllocCIDR, IPv6ClusterAllocCIDRName, err) } if err := c.validateIPv6NAT46x64CIDR(); err != nil { - return fmt.Errorf("unable to parse internal CIDR value '%s': %s", + return fmt.Errorf("unable to parse internal CIDR value '%s': %w", c.IPv6NAT46x64CIDR, err) } @@ -2781,7 +2636,7 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error { if !c.EnableIPv6 { return fmt.Errorf("IPv6NDP cannot be enabled when IPv6 is not enabled") } - if len(c.IPv6MCastDevice) == 0 && !MightAutoDetectDevices() { + if len(c.IPv6MCastDevice) == 0 { return fmt.Errorf("IPv6NDP cannot be enabled without %s", IPv6MCastDevice) } } @@ -2793,27 +2648,16 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error { c.RoutingMode, RoutingModeTunnel, RoutingModeNative) } - switch c.TunnelProtocol { - case TunnelVXLAN, TunnelGeneve: - default: - return fmt.Errorf("invalid tunnel protocol %q", c.TunnelProtocol) - } - - if c.RoutingMode == RoutingModeNative && c.UseSingleClusterRoute { - return fmt.Errorf("option --%s cannot be used in combination with --%s=%s", - SingleClusterRouteName, RoutingMode, RoutingModeNative) + cinfo := clustermeshTypes.ClusterInfo{ + ID: c.ClusterID, + Name: c.ClusterName, + MaxConnectedClusters: c.MaxConnectedClusters, } - - if c.ClusterID < clustermeshTypes.ClusterIDMin || c.ClusterID > clustermeshTypes.ClusterIDMax { - return fmt.Errorf("invalid cluster id %d: must be in range %d..%d", - c.ClusterID, clustermeshTypes.ClusterIDMin, clustermeshTypes.ClusterIDMax) + if err := cinfo.InitClusterIDMax(); err != nil { + return err } - - if c.ClusterID != 0 { - if c.ClusterName == defaults.ClusterName { - return fmt.Errorf("cannot use default cluster name (%s) with option %s", - defaults.ClusterName, ClusterIDName) - } + if err := cinfo.Validate(); err != nil { + return err } if err := c.checkMapSizeLimits(); err != nil { @@ -2839,13 +2683,6 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error { int64(defaults.KVstoreLeaseMaxTTL.Seconds())) } - allowedEndpointStatusValues := EndpointStatusValuesMap() - for enabledEndpointStatus := range c.EndpointStatus { - if _, ok := allowedEndpointStatusValues[enabledEndpointStatus]; !ok { - return fmt.Errorf("unknown endpoint-status option '%s'", enabledEndpointStatus) - } - } - if c.EnableVTEP { err := c.validateVTEP(vp) if err != nil { @@ -2853,6 +2690,14 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error { } } + if err := c.validatePolicyCIDRMatchMode(); err != nil { + return err + } + + if err := c.validateContainerIPLocalReservedPorts(); err != nil { + return err + } + return nil } @@ -2862,7 +2707,7 @@ func ReadDirConfig(dirName string) (map[string]interface{}, error) { m := map[string]interface{}{} files, err := os.ReadDir(dirName) if err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("unable to read configuration directory: %s", err) + return nil, fmt.Errorf("unable to read configuration directory: %w", err) } for _, f := range files { if f.IsDir() { @@ -2903,7 +2748,7 @@ func ReadDirConfig(dirName string) (map[string]interface{}, error) { func MergeConfig(vp *viper.Viper, m map[string]interface{}) error { err := vp.MergeConfigMap(m) if err != nil { - return fmt.Errorf("unable to read merge directory configuration: %s", err) + return fmt.Errorf("unable to read merge directory configuration: %w", err) } return nil } @@ -2935,7 +2780,7 @@ func (c *DaemonConfig) parseExcludedLocalAddresses(s []string) error { for _, ipString := range s { _, ipnet, err := net.ParseCIDR(ipString) if err != nil { - return fmt.Errorf("unable to parse excluded local address %s: %s", ipString, err) + return fmt.Errorf("unable to parse excluded local address %s: %w", ipString, err) } c.ExcludeLocalAddresses = append(c.ExcludeLocalAddresses, ipnet) @@ -2944,7 +2789,30 @@ func (c *DaemonConfig) parseExcludedLocalAddresses(s []string) error { return nil } -// Populate sets all options with the values from viper +// SetupLogging sets all logging-related options with the values from viper, +// then setup logging based on these options and the given tag. +// +// This allows initializing logging as early as possible, then log entries +// produced below in Populate can honor the requested logging configurations. +func (c *DaemonConfig) SetupLogging(vp *viper.Viper, tag string) { + c.Debug = vp.GetBool(DebugArg) + c.LogDriver = vp.GetStringSlice(LogDriver) + + if m, err := command.GetStringMapStringE(vp, LogOpt); err != nil { + log.Fatalf("unable to parse %s: %s", LogOpt, err) + } else { + c.LogOpt = m + } + + if err := logging.SetupLogging(c.LogDriver, logging.LogOptions(c.LogOpt), tag, c.Debug); err != nil { + log.Fatal(err) + } +} + +// Populate sets all non-logging options with the values from viper. +// +// This function may emit logs. Consider calling SetupLogging before this +// to make sure that they honor logging-related options. func (c *DaemonConfig) Populate(vp *viper.Viper) { var err error @@ -2960,18 +2828,14 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.AutoCreateCiliumNodeResource = vp.GetBool(AutoCreateCiliumNodeResource) c.BPFRoot = vp.GetString(BPFRoot) c.CGroupRoot = vp.GetString(CGroupRoot) - c.ClusterID = vp.GetUint32(ClusterIDName) - c.ClusterName = vp.GetString(ClusterName) + c.ClusterID = vp.GetUint32(clustermeshTypes.OptClusterID) + c.ClusterName = vp.GetString(clustermeshTypes.OptClusterName) + c.MaxConnectedClusters = vp.GetUint32(clustermeshTypes.OptMaxConnectedClusters) c.DatapathMode = vp.GetString(DatapathMode) - c.Debug = vp.GetBool(DebugArg) c.DebugVerbose = vp.GetStringSlice(DebugVerbose) - c.DirectRoutingDevice = vp.GetString(DirectRoutingDevice) - c.LBDevInheritIPAddr = vp.GetString(LBDevInheritIPAddr) c.EnableIPv4 = vp.GetBool(EnableIPv4Name) c.EnableIPv6 = vp.GetBool(EnableIPv6Name) c.EnableIPv6NDP = vp.GetBool(EnableIPv6NDPName) - c.EnableIPv6BIGTCP = vp.GetBool(EnableIPv6BIGTCP) - c.EnableIPv4BIGTCP = vp.GetBool(EnableIPv4BIGTCP) c.EnableSRv6 = vp.GetBool(EnableSRv6) c.SRv6EncapMode = vp.GetString(SRv6EncapModeName) c.EnableSCTP = vp.GetBool(EnableSCTPName) @@ -2982,41 +2846,40 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.L2AnnouncerLeaseDuration = vp.GetDuration(L2AnnouncerLeaseDuration) c.L2AnnouncerRenewDeadline = vp.GetDuration(L2AnnouncerRenewDeadline) c.L2AnnouncerRetryPeriod = vp.GetDuration(L2AnnouncerRetryPeriod) - c.EnableWireguardUserspaceFallback = vp.GetBool(EnableWireguardUserspaceFallback) + c.WireguardPersistentKeepalive = vp.GetDuration(WireguardPersistentKeepalive) c.EnableWellKnownIdentities = vp.GetBool(EnableWellKnownIdentities) c.EnableXDPPrefilter = vp.GetBool(EnableXDPPrefilter) + c.EnableTCX = vp.GetBool(EnableTCX) c.DisableCiliumEndpointCRD = vp.GetBool(DisableCiliumEndpointCRDName) - c.EgressMasqueradeInterfaces = vp.GetString(EgressMasqueradeInterfaces) + c.MasqueradeInterfaces = vp.GetStringSlice(MasqueradeInterfaces) c.BPFSocketLBHostnsOnly = vp.GetBool(BPFSocketLBHostnsOnly) c.EnableSocketLB = vp.GetBool(EnableSocketLB) c.EnableSocketLBTracing = vp.GetBool(EnableSocketLBTracing) - c.EnableRemoteNodeIdentity = vp.GetBool(EnableRemoteNodeIdentity) + c.EnableSocketLBPodConnectionTermination = vp.GetBool(EnableSocketLBPodConnectionTermination) c.EnableBPFTProxy = vp.GetBool(EnableBPFTProxy) - c.EnableXTSocketFallback = vp.GetBool(EnableXTSocketFallbackName) c.EnableAutoDirectRouting = vp.GetBool(EnableAutoDirectRoutingName) + c.DirectRoutingSkipUnreachable = vp.GetBool(DirectRoutingSkipUnreachableName) c.EnableEndpointRoutes = vp.GetBool(EnableEndpointRoutes) c.EnableHealthChecking = vp.GetBool(EnableHealthChecking) c.EnableEndpointHealthChecking = vp.GetBool(EnableEndpointHealthChecking) c.EnableHealthCheckNodePort = vp.GetBool(EnableHealthCheckNodePort) + c.EnableHealthCheckLoadBalancerIP = vp.GetBool(EnableHealthCheckLoadBalancerIP) + c.HealthCheckICMPFailureThreshold = vp.GetInt(HealthCheckICMPFailureThreshold) c.EnableLocalNodeRoute = vp.GetBool(EnableLocalNodeRoute) c.EnablePolicy = strings.ToLower(vp.GetString(EnablePolicy)) c.EnableExternalIPs = vp.GetBool(EnableExternalIPs) c.EnableL7Proxy = vp.GetBool(EnableL7Proxy) c.EnableTracing = vp.GetBool(EnableTracing) + c.EnableIPIPTermination = vp.GetBool(EnableIPIPTermination) c.EnableUnreachableRoutes = vp.GetBool(EnableUnreachableRoutes) c.EnableNodePort = vp.GetBool(EnableNodePort) c.EnableSVCSourceRangeCheck = vp.GetBool(EnableSVCSourceRangeCheck) c.EnableHostPort = vp.GetBool(EnableHostPort) c.EnableHostLegacyRouting = vp.GetBool(EnableHostLegacyRouting) - c.MaglevTableSize = vp.GetInt(MaglevTableSize) - c.MaglevHashSeed = vp.GetString(MaglevHashSeed) c.NodePortBindProtection = vp.GetBool(NodePortBindProtection) c.EnableAutoProtectNodePortRange = vp.GetBool(EnableAutoProtectNodePortRange) c.KubeProxyReplacement = vp.GetString(KubeProxyReplacement) c.EnableSessionAffinity = vp.GetBool(EnableSessionAffinity) - c.EnableServiceTopology = vp.GetBool(EnableServiceTopology) - c.EnableBandwidthManager = vp.GetBool(EnableBandwidthManager) - c.EnableBBR = vp.GetBool(EnableBBR) c.EnableRecorder = vp.GetBool(EnableRecorder) c.EnableMKE = vp.GetBool(EnableMKE) c.CgroupPathMKE = vp.GetString(CgroupPathMKE) @@ -3024,16 +2887,10 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EnableLocalRedirectPolicy = vp.GetBool(EnableLocalRedirectPolicy) c.EncryptInterface = vp.GetStringSlice(EncryptInterface) c.EncryptNode = vp.GetBool(EncryptNode) - c.EnvoyLogPath = vp.GetString(EnvoyLog) - c.HTTPNormalizePath = vp.GetBool(HTTPNormalizePath) - c.HTTPIdleTimeout = vp.GetInt(HTTPIdleTimeout) - c.HTTPMaxGRPCTimeout = vp.GetInt(HTTPMaxGRPCTimeout) - c.HTTPRequestTimeout = vp.GetInt(HTTPRequestTimeout) - c.HTTPRetryCount = vp.GetInt(HTTPRetryCount) - c.HTTPRetryTimeout = vp.GetInt(HTTPRetryTimeout) c.IdentityChangeGracePeriod = vp.GetDuration(IdentityChangeGracePeriod) c.IdentityRestoreGracePeriod = vp.GetDuration(IdentityRestoreGracePeriod) c.IPAM = vp.GetString(IPAM) + c.IPAMDefaultIPPool = vp.GetString(IPAMDefaultIPPool) c.IPv4Range = vp.GetString(IPv4Range) c.IPv4NodeAddr = vp.GetString(IPv4NodeAddr) c.IPv4ServiceRange = vp.GetString(IPv4ServiceRange) @@ -3042,11 +2899,9 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.IPv6Range = vp.GetString(IPv6Range) c.IPv6ServiceRange = vp.GetString(IPv6ServiceRange) c.JoinCluster = vp.GetBool(JoinClusterName) - c.K8sEnableK8sEndpointSlice = vp.GetBool(K8sEnableEndpointSlice) c.K8sRequireIPv4PodCIDR = vp.GetBool(K8sRequireIPv4PodCIDRName) c.K8sRequireIPv6PodCIDR = vp.GetBool(K8sRequireIPv6PodCIDRName) c.K8sServiceCacheSize = uint(vp.GetInt(K8sServiceCacheSize)) - c.K8sEventHandover = vp.GetBool(K8sEventHandover) c.K8sSyncTimeout = vp.GetDuration(K8sSyncTimeoutName) c.AllocatorListTimeout = vp.GetDuration(AllocatorListTimeoutName) c.K8sWatcherEndpointSelector = vp.GetString(K8sWatcherEndpointSelector) @@ -3056,14 +2911,12 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.KVstoreKeepAliveInterval = c.KVstoreLeaseTTL / defaults.KVstoreKeepAliveIntervalFactor c.KVstorePeriodicSync = vp.GetDuration(KVstorePeriodicSync) c.KVstoreConnectivityTimeout = vp.GetDuration(KVstoreConnectivityTimeout) - c.KVstoreMaxConsecutiveQuorumErrors = vp.GetInt(KVstoreMaxConsecutiveQuorumErrorsName) - c.IPAllocationTimeout = vp.GetDuration(IPAllocationTimeout) + c.KVstorePodNetworkSupport = vp.GetBool(KVstorePodNetworkSupport) + c.KVstoreMaxConsecutiveQuorumErrors = vp.GetUint(KVstoreMaxConsecutiveQuorumErrorsName) c.LabelPrefixFile = vp.GetString(LabelPrefixFile) c.Labels = vp.GetStringSlice(Labels) c.LibDir = vp.GetString(LibDir) - c.LogDriver = vp.GetStringSlice(LogDriver) c.LogSystemLoadConfig = vp.GetBool(LogSystemLoadConfigName) - c.Logstash = vp.GetBool(Logstash) c.LoopbackIPv4 = vp.GetString(LoopbackIPv4) c.LocalRouterIPv4 = vp.GetString(LocalRouterIPv4) c.LocalRouterIPv6 = vp.GetString(LocalRouterIPv6) @@ -3071,34 +2924,21 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EnableIPMasqAgent = vp.GetBool(EnableIPMasqAgent) c.EnableIPv4EgressGateway = vp.GetBool(EnableIPv4EgressGateway) c.EnableEnvoyConfig = vp.GetBool(EnableEnvoyConfig) - c.EnableIngressController = vp.GetBool(EnableIngressController) - c.EnableGatewayAPI = vp.GetBool(EnableGatewayAPI) - c.EnvoyConfigTimeout = vp.GetDuration(EnvoyConfigTimeout) c.IPMasqAgentConfigPath = vp.GetString(IPMasqAgentConfigPath) c.InstallIptRules = vp.GetBool(InstallIptRules) - c.IPTablesLockTimeout = vp.GetDuration(IPTablesLockTimeout) - c.IPTablesRandomFully = vp.GetBool(IPTablesRandomFully) c.IPSecKeyFile = vp.GetString(IPSecKeyFileName) c.IPsecKeyRotationDuration = vp.GetDuration(IPsecKeyRotationDuration) c.EnableIPsecKeyWatcher = vp.GetBool(EnableIPsecKeyWatcher) + c.EnableIPSecXfrmStateCaching = vp.GetBool(EnableIPSecXfrmStateCaching) c.MonitorAggregation = vp.GetString(MonitorAggregationName) c.MonitorAggregationInterval = vp.GetDuration(MonitorAggregationInterval) c.MTU = vp.GetInt(MTUName) c.PreAllocateMaps = vp.GetBool(PreAllocateMapsName) - c.PrependIptablesChains = vp.GetBool(PrependIptablesChainsName) c.ProcFs = vp.GetString(ProcFs) - c.ProxyConnectTimeout = vp.GetInt(ProxyConnectTimeout) - c.ProxyGID = vp.GetInt(ProxyGID) - c.ProxyPrometheusPort = vp.GetInt(ProxyPrometheusPort) - c.ProxyMaxRequestsPerConnection = vp.GetInt(ProxyMaxRequestsPerConnection) - c.ProxyMaxConnectionDuration = time.Duration(vp.GetInt64(ProxyMaxConnectionDuration)) - c.ProxyIdleTimeout = time.Duration(vp.GetInt64(ProxyIdleTimeout)) c.RestoreState = vp.GetBool(Restore) c.RouteMetric = vp.GetInt(RouteMetric) c.RunDir = vp.GetString(StateDir) c.ExternalEnvoyProxy = vp.GetBool(ExternalEnvoyProxy) - c.SidecarIstioProxyImage = vp.GetString(SidecarIstioProxyImage) - c.UseSingleClusterRoute = vp.GetBool(SingleClusterRouteName) c.SocketPath = vp.GetString(SocketPath) c.TracePayloadlen = vp.GetInt(TracePayloadlen) c.Version = vp.GetString(Version) @@ -3111,34 +2951,48 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.CTMapEntriesTimeoutSYN = vp.GetDuration(CTMapEntriesTimeoutSYNName) c.CTMapEntriesTimeoutFIN = vp.GetDuration(CTMapEntriesTimeoutFINName) c.PolicyAuditMode = vp.GetBool(PolicyAuditModeArg) + c.PolicyAccounting = vp.GetBool(PolicyAccountingArg) c.EnableIPv4FragmentsTracking = vp.GetBool(EnableIPv4FragmentsTrackingName) c.FragmentsMapEntries = vp.GetInt(FragmentsMapEntriesName) - c.K8sServiceProxyName = vp.GetString(K8sServiceProxyName) c.CRDWaitTimeout = vp.GetDuration(CRDWaitTimeout) c.LoadBalancerDSRDispatch = vp.GetString(LoadBalancerDSRDispatch) - c.LoadBalancerDSRL4Xlate = vp.GetString(LoadBalancerDSRL4Xlate) c.LoadBalancerRSSv4CIDR = vp.GetString(LoadBalancerRSSv4CIDR) c.LoadBalancerRSSv6CIDR = vp.GetString(LoadBalancerRSSv6CIDR) c.InstallNoConntrackIptRules = vp.GetBool(InstallNoConntrackIptRules) + c.ContainerIPLocalReservedPorts = vp.GetString(ContainerIPLocalReservedPorts) c.EnableCustomCalls = vp.GetBool(EnableCustomCallsName) - c.BGPAnnounceLBIP = vp.GetBool(BGPAnnounceLBIP) - c.BGPAnnouncePodCIDR = vp.GetBool(BGPAnnouncePodCIDR) - c.BGPConfigPath = vp.GetString(BGPConfigPath) + c.BGPSecretsNamespace = vp.GetString(BGPSecretsNamespace) c.ExternalClusterIP = vp.GetBool(ExternalClusterIPName) c.EnableNat46X64Gateway = vp.GetBool(EnableNat46X64Gateway) c.EnableHighScaleIPcache = vp.GetBool(EnableHighScaleIPcache) c.EnableIPv4Masquerade = vp.GetBool(EnableIPv4Masquerade) && c.EnableIPv4 c.EnableIPv6Masquerade = vp.GetBool(EnableIPv6Masquerade) && c.EnableIPv6 c.EnableBPFMasquerade = vp.GetBool(EnableBPFMasquerade) - c.DeriveMasqIPAddrFromDevice = vp.GetString(DeriveMasqIPAddrFromDevice) + c.EnableMasqueradeRouteSource = vp.GetBool(EnableMasqueradeRouteSource) c.EnablePMTUDiscovery = vp.GetBool(EnablePMTUDiscovery) c.IPv6NAT46x64CIDR = defaults.IPv6NAT46x64CIDR c.IPAMCiliumNodeUpdateRate = vp.GetDuration(IPAMCiliumNodeUpdateRate) + c.BPFEventsDropEnabled = vp.GetBool(BPFEventsDropEnabled) + c.BPFEventsPolicyVerdictEnabled = vp.GetBool(BPFEventsPolicyVerdictEnabled) + c.BPFEventsTraceEnabled = vp.GetBool(BPFEventsTraceEnabled) + c.BPFConntrackAccounting = vp.GetBool(BPFConntrackAccounting) + c.EnableIPSecEncryptedOverlay = vp.GetBool(EnableIPSecEncryptedOverlay) + c.LBSourceRangeAllTypes = vp.GetBool(LBSourceRangeAllTypes) + c.BootIDFile = vp.GetString(BootIDFilename) + + c.ServiceNoBackendResponse = vp.GetString(ServiceNoBackendResponse) + switch c.ServiceNoBackendResponse { + case ServiceNoBackendResponseReject, ServiceNoBackendResponseDrop: + case "": + c.ServiceNoBackendResponse = defaults.ServiceNoBackendResponse + default: + log.Fatalf("Invalid value for --%s: %s (must be 'reject' or 'drop')", ServiceNoBackendResponse, c.ServiceNoBackendResponse) + } c.populateLoadBalancerSettings(vp) - c.populateDevices(vp) c.EnableRuntimeDeviceDetection = vp.GetBool(EnableRuntimeDeviceDetection) c.EgressMultiHomeIPRuleCompat = vp.GetBool(EgressMultiHomeIPRuleCompat) + c.InstallUplinkRoutesForDelegatedIPAM = vp.GetBool(InstallUplinkRoutesForDelegatedIPAM) vlanBPFBypassIDs := vp.GetStringSlice(VLANBPFBypass) c.VLANBPFBypass = make([]int, 0, len(vlanBPFBypassIDs)) @@ -3150,44 +3004,15 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.VLANBPFBypass = append(c.VLANBPFBypass, vlanID) } + c.DisableExternalIPMitigation = vp.GetBool(DisableExternalIPMitigation) + tcFilterPrio := vp.GetUint32(TCFilterPriority) if tcFilterPrio > math.MaxUint16 { log.Fatalf("%s cannot be higher than %d", TCFilterPriority, math.MaxUint16) } c.TCFilterPriority = uint16(tcFilterPrio) - c.Tunnel = vp.GetString(TunnelName) c.RoutingMode = vp.GetString(RoutingMode) - c.TunnelProtocol = vp.GetString(TunnelProtocol) - c.TunnelPort = vp.GetInt(TunnelPortName) - - if c.Tunnel != "" && c.RoutingMode != defaults.RoutingMode { - log.Fatalf("Option --%s cannot be used in combination with --%s", RoutingMode, TunnelName) - } - - if c.Tunnel == "disabled" { - c.RoutingMode = RoutingModeNative - } else if c.Tunnel != "" { - c.TunnelProtocol = c.Tunnel - } - c.Tunnel = "" - - if c.TunnelPort == 0 { - // manually pick port for native-routing and DSR with Geneve dispatch: - if !c.TunnelingEnabled() && - (c.EnableNodePort || (c.KubeProxyReplacement == KubeProxyReplacementStrict || c.KubeProxyReplacement == KubeProxyReplacementTrue)) && - c.NodePortMode != NodePortModeSNAT && - c.LoadBalancerDSRDispatch == DSRDispatchGeneve { - c.TunnelPort = defaults.TunnelPortGeneve - } else { - switch c.TunnelProtocol { - case TunnelVXLAN: - c.TunnelPort = defaults.TunnelPortVXLAN - case TunnelGeneve: - c.TunnelPort = defaults.TunnelPortGeneve - } - } - } if vp.IsSet(AddressScopeMax) { c.AddressScopeMax, err = ip.ParseScope(vp.GetString(AddressScopeMax)) @@ -3205,6 +3030,26 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } } + encryptionStrictModeEnabled := vp.GetBool(EnableEncryptionStrictMode) + if encryptionStrictModeEnabled { + if c.EnableIPv6 { + log.Info("WireGuard encryption strict mode only supports IPv4. IPv6 traffic is not protected and can be leaked.") + } + + strictCIDR := vp.GetString(EncryptionStrictModeCIDR) + c.EncryptionStrictModeCIDR, err = netip.ParsePrefix(strictCIDR) + if err != nil { + log.WithError(err).Fatalf("Cannot parse CIDR %s from --%s option", strictCIDR, EncryptionStrictModeCIDR) + } + + if !c.EncryptionStrictModeCIDR.Addr().Is4() { + log.Fatalf("%s must be an IPv4 CIDR", EncryptionStrictModeCIDR) + } + + c.EncryptionStrictModeAllowRemoteNodeIdentities = vp.GetBool(EncryptionStrictModeAllowRemoteNodeIdentities) + c.EnableEncryptionStrictMode = encryptionStrictModeEnabled + } + ipv4NativeRoutingCIDR := vp.GetString(IPv4NativeRoutingCIDR) if ipv4NativeRoutingCIDR != "" { @@ -3218,11 +3063,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } } - if c.EnableIPv4 && ipv4NativeRoutingCIDR == "" && c.EnableAutoDirectRouting { - log.Warnf("If %s is enabled, then you are recommended to also configure %s. If %s is not configured, this may lead to pod to pod traffic being masqueraded, "+ - "which can cause problems with performance, observability and policy", EnableAutoDirectRoutingName, IPv4NativeRoutingCIDR, IPv4NativeRoutingCIDR) - } - ipv6NativeRoutingCIDR := vp.GetString(IPv6NativeRoutingCIDR) if ipv6NativeRoutingCIDR != "" { @@ -3236,9 +3076,8 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } } - if c.EnableIPv6 && ipv6NativeRoutingCIDR == "" && c.EnableAutoDirectRouting { - log.Warnf("If %s is enabled, then you are recommended to also configure %s. If %s is not configured, this may lead to pod to pod traffic being masqueraded, "+ - "which can cause problems with performance, observability and policy", EnableAutoDirectRoutingName, IPv6NativeRoutingCIDR, IPv6NativeRoutingCIDR) + if c.DirectRoutingSkipUnreachable && !c.EnableAutoDirectRouting { + log.Fatalf("Flag %s cannot be enabled when %s is not enabled. As if %s is then enabled, it may lead to unexpected behaviour causing network connectivity issues.", DirectRoutingSkipUnreachableName, EnableAutoDirectRoutingName, EnableAutoDirectRoutingName) } if err := c.calculateBPFMapSizes(vp); err != nil { @@ -3272,8 +3111,12 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.FQDNProxyResponseMaxDelay = vp.GetDuration(FQDNProxyResponseMaxDelay) c.DNSProxyConcurrencyLimit = vp.GetInt(DNSProxyConcurrencyLimit) c.DNSProxyConcurrencyProcessingGracePeriod = vp.GetDuration(DNSProxyConcurrencyProcessingGracePeriod) + c.DNSProxyEnableTransparentMode = vp.GetBool(DNSProxyEnableTransparentMode) + c.DNSProxyInsecureSkipTransparentModeCheck = vp.GetBool(DNSProxyInsecureSkipTransparentModeCheck) c.DNSProxyLockCount = vp.GetInt(DNSProxyLockCount) c.DNSProxyLockTimeout = vp.GetDuration(DNSProxyLockTimeout) + c.DNSProxySocketLingerTimeout = vp.GetInt(DNSProxySocketLingerTimeout) + c.FQDNRejectResponse = vp.GetString(FQDNRejectResponseCode) // Convert IP strings into net.IPNet types subnets, invalid := ip.ParseCIDRs(vp.GetStringSlice(IPv4PodSubnets)) @@ -3294,8 +3137,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } c.IPv6PodSubnets = subnets - c.XDPMode = XDPModeLinkNone - err = c.populateNodePortRange(vp) if err != nil { log.WithError(err).Fatal("Failed to populate NodePortRange") @@ -3321,7 +3162,29 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.FixedIdentityMapping = m } + if m := command.GetStringMapString(vp, FixedZoneMapping); err != nil { + log.Fatalf("unable to parse %s: %s", FixedZoneMapping, err) + } else if len(m) != 0 { + forward := make(map[string]uint8, len(m)) + reverse := make(map[uint8]string, len(m)) + for k, v := range m { + bigN, _ := strconv.Atoi(v) + n := uint8(bigN) + if oldKey, ok := reverse[n]; ok && oldKey != k { + log.Fatalf("duplicate numeric ID entry for %s: %q and %q map to the same value %d", FixedZoneMapping, oldKey, k, n) + } + if oldN, ok := forward[k]; ok && oldN != n { + log.Fatalf("duplicate zone name entry for %s: %d and %d map to different values %s", FixedZoneMapping, oldN, n, k) + } + forward[k] = n + reverse[n] = k + } + c.FixedZoneMapping = forward + c.ReverseFixedZoneMapping = reverse + } + c.ConntrackGCInterval = vp.GetDuration(ConntrackGCInterval) + c.ConntrackGCMaxInterval = vp.GetDuration(ConntrackGCMaxInterval) if m, err := command.GetStringMapStringE(vp, KVStoreOpt); err != nil { log.Fatalf("unable to parse %s: %s", KVStoreOpt, err) @@ -3329,16 +3192,16 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.KVStoreOpt = m } - if m, err := command.GetStringMapStringE(vp, LogOpt); err != nil { - log.Fatalf("unable to parse %s: %s", LogOpt, err) - } else { - c.LogOpt = m - } - - if m, err := command.GetStringMapStringE(vp, APIRateLimitName); err != nil { - log.Fatalf("unable to parse %s: %s", APIRateLimitName, err) - } else { - c.APIRateLimit = m + bpfEventsDefaultRateLimit := vp.GetUint32(BPFEventsDefaultRateLimit) + bpfEventsDefaultBurstLimit := vp.GetUint32(BPFEventsDefaultBurstLimit) + switch { + case bpfEventsDefaultRateLimit > 0 && bpfEventsDefaultBurstLimit == 0: + log.Fatalf("invalid BPF events default config: burst limit must also be specified when rate limit is provided") + case bpfEventsDefaultRateLimit == 0 && bpfEventsDefaultBurstLimit > 0: + log.Fatalf("invalid BPF events default config: rate limit must also be specified when burst limit is provided") + default: + c.BPFEventsDefaultRateLimit = vp.GetUint32(BPFEventsDefaultRateLimit) + c.BPFEventsDefaultBurstLimit = vp.GetUint32(BPFEventsDefaultBurstLimit) } c.bpfMapEventConfigs = make(BPFEventBufferConfigs) @@ -3356,10 +3219,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.NodeEncryptionOptOutLabels = sel } - for _, option := range vp.GetStringSlice(EndpointStatus) { - c.EndpointStatus[option] = struct{}{} - } - if err := c.parseExcludedLocalAddresses(vp.GetStringSlice(ExcludeLocalAddress)); err != nil { log.WithError(err).Fatalf("Unable to parse excluded local addresses") } @@ -3371,33 +3230,38 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { EnableCiliumEndpointSlice, c.EnableCiliumEndpointSlice, DisableCiliumEndpointCRDName) } + // To support K8s NetworkPolicy + c.EnableK8sNetworkPolicy = vp.GetBool(EnableK8sNetworkPolicy) + c.PolicyCIDRMatchMode = vp.GetStringSlice(PolicyCIDRMatchMode) + c.EnableNodeSelectorLabels = vp.GetBool(EnableNodeSelectorLabels) + c.NodeLabels = vp.GetStringSlice(NodeLabels) + + c.EnableCiliumNetworkPolicy = vp.GetBool(EnableCiliumNetworkPolicy) + c.EnableCiliumClusterwideNetworkPolicy = vp.GetBool(EnableCiliumClusterwideNetworkPolicy) + c.IdentityAllocationMode = vp.GetString(IdentityAllocationMode) switch c.IdentityAllocationMode { // This is here for tests. Some call Populate without the normal init case "": c.IdentityAllocationMode = IdentityAllocationModeKVstore - case IdentityAllocationModeKVstore, IdentityAllocationModeCRD: + case IdentityAllocationModeKVstore, IdentityAllocationModeCRD, IdentityAllocationModeDoubleWriteReadKVstore, IdentityAllocationModeDoubleWriteReadCRD: // c.IdentityAllocationMode is set above default: - log.Fatalf("Invalid identity allocation mode %q. It must be one of %s or %s", c.IdentityAllocationMode, IdentityAllocationModeKVstore, IdentityAllocationModeCRD) + log.Fatalf("Invalid identity allocation mode %q. It must be one of %s, %s or %s / %s", c.IdentityAllocationMode, IdentityAllocationModeKVstore, IdentityAllocationModeCRD, IdentityAllocationModeDoubleWriteReadKVstore, IdentityAllocationModeDoubleWriteReadCRD) } if c.KVStore == "" { if c.IdentityAllocationMode != IdentityAllocationModeCRD { log.Warningf("Running Cilium with %q=%q requires identity allocation via CRDs. Changing %s to %q", KVStore, c.KVStore, IdentityAllocationMode, IdentityAllocationModeCRD) c.IdentityAllocationMode = IdentityAllocationModeCRD } - if c.DisableCiliumEndpointCRD { - log.Warningf("Running Cilium with %q=%q requires endpoint CRDs. Changing %s to %t", KVStore, c.KVStore, DisableCiliumEndpointCRDName, false) + if c.DisableCiliumEndpointCRD && NetworkPolicyEnabled(c) { + log.Warningf("Running Cilium with %q=%q requires endpoint CRDs when network policy enforcement system is enabled. Changing %s to %t", KVStore, c.KVStore, DisableCiliumEndpointCRDName, false) c.DisableCiliumEndpointCRD = false } - if c.K8sEventHandover { - log.Warningf("Running Cilium with %q=%q requires KVStore capability. Changing %s to %t", KVStore, c.KVStore, K8sEventHandover, false) - c.K8sEventHandover = false - } } switch c.IPAM { - case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool, ipamOption.IPAMClusterPoolV2: + case ipamOption.IPAMKubernetes, ipamOption.IPAMClusterPool: if c.EnableIPv4 { c.K8sRequireIPv4PodCIDR = true } @@ -3411,38 +3275,12 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { } else { c.IPAMMultiPoolPreAllocation = m } - + if len(c.IPAMMultiPoolPreAllocation) == 0 { + // Default to the same value as IPAMDefaultIPPool + c.IPAMMultiPoolPreAllocation = map[string]string{c.IPAMDefaultIPPool: "8"} + } c.KubeProxyReplacementHealthzBindAddr = vp.GetString(KubeProxyReplacementHealthzBindAddr) - // Hubble options. - c.EnableHubble = vp.GetBool(EnableHubble) - c.EnableHubbleOpenMetrics = vp.GetBool(EnableHubbleOpenMetrics) - c.HubbleSocketPath = vp.GetString(HubbleSocketPath) - c.HubbleListenAddress = vp.GetString(HubbleListenAddress) - c.HubblePreferIpv6 = vp.GetBool(HubblePreferIpv6) - c.HubbleTLSDisabled = vp.GetBool(HubbleTLSDisabled) - c.HubbleTLSCertFile = vp.GetString(HubbleTLSCertFile) - c.HubbleTLSKeyFile = vp.GetString(HubbleTLSKeyFile) - c.HubbleTLSClientCAFiles = vp.GetStringSlice(HubbleTLSClientCAFiles) - c.HubbleEventBufferCapacity = vp.GetInt(HubbleEventBufferCapacity) - c.HubbleEventQueueSize = vp.GetInt(HubbleEventQueueSize) - if c.HubbleEventQueueSize == 0 { - c.HubbleEventQueueSize = getDefaultMonitorQueueSize(runtime.NumCPU()) - } - c.HubbleMetricsServer = vp.GetString(HubbleMetricsServer) - c.HubbleMetrics = vp.GetStringSlice(HubbleMetrics) - c.HubbleExportFilePath = vp.GetString(HubbleExportFilePath) - c.HubbleExportFileMaxSizeMB = vp.GetInt(HubbleExportFileMaxSizeMB) - c.HubbleExportFileMaxBackups = vp.GetInt(HubbleExportFileMaxBackups) - c.HubbleExportFileCompress = vp.GetBool(HubbleExportFileCompress) - c.EnableHubbleRecorderAPI = vp.GetBool(EnableHubbleRecorderAPI) - c.HubbleRecorderStoragePath = vp.GetString(HubbleRecorderStoragePath) - c.HubbleRecorderSinkQueueSize = vp.GetInt(HubbleRecorderSinkQueueSize) - c.HubbleSkipUnknownCGroupIDs = vp.GetBool(HubbleSkipUnknownCGroupIDs) - c.HubbleMonitorEvents = vp.GetStringSlice(HubbleMonitorEvents) - - c.DisableIptablesFeederRules = vp.GetStringSlice(DisableIptablesFeederRules) - // Hidden options c.CompilerFlags = vp.GetStringSlice(CompilerFlags) c.ConfigFile = vp.GetString(ConfigFile) @@ -3450,20 +3288,11 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.K8sNamespace = vp.GetString(K8sNamespaceName) c.AgentNotReadyNodeTaintKey = vp.GetString(AgentNotReadyNodeTaintKeyName) c.MaxControllerInterval = vp.GetInt(MaxCtrlIntervalName) - c.PolicyQueueSize = sanitizeIntParam(vp, PolicyQueueSize, defaults.PolicyQueueSize) c.EndpointQueueSize = sanitizeIntParam(vp, EndpointQueueSize, defaults.EndpointQueueSize) - c.DisableCNPStatusUpdates = vp.GetBool(DisableCNPStatusUpdates) c.EnableICMPRules = vp.GetBool(EnableICMPRules) c.UseCiliumInternalIPForIPsec = vp.GetBool(UseCiliumInternalIPForIPsec) c.BypassIPAvailabilityUponRestore = vp.GetBool(BypassIPAvailabilityUponRestore) c.EnableK8sTerminatingEndpoint = vp.GetBool(EnableK8sTerminatingEndpoint) - c.EnableStaleCiliumEndpointCleanup = vp.GetBool(EnableStaleCiliumEndpointCleanup) - - // Disable Envoy version check if L7 proxy is disabled. - c.DisableEnvoyVersionCheck = vp.GetBool(DisableEnvoyVersionCheck) - if !c.EnableL7Proxy { - c.DisableEnvoyVersionCheck = true - } // VTEP integration enable option c.EnableVTEP = vp.GetBool(EnableVTEP) @@ -3471,42 +3300,40 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { // Enable BGP control plane features c.EnableBGPControlPlane = vp.GetBool(EnableBGPControlPlane) - // Envoy secrets namespaces to watch - params := []string{EnvoySecretsNamespace, IngressSecretsNamespace, GatewayAPISecretsNamespace} - var nsList = make([]string, 0, len(params)) - for _, param := range params { - ns := vp.GetString(param) - if ns != "" { - nsList = append(nsList, ns) + // Enable BGP control plane status reporting + c.EnableBGPControlPlaneStatusReport = vp.GetBool(EnableBGPControlPlaneStatusReport) + + // Support failure-mode for policy map overflow + c.EnableEndpointLockdownOnPolicyOverflow = vp.GetBool(EnableEndpointLockdownOnPolicyOverflow) + + // Parse node label patterns + nodeLabelPatterns := vp.GetStringSlice(ExcludeNodeLabelPatterns) + for _, pattern := range nodeLabelPatterns { + r, err := regexp.Compile(pattern) + if err != nil { + log.WithError(err).Errorf("Unable to compile exclude node label regex pattern %s", pattern) + continue } + c.ExcludeNodeLabelPatterns = append(c.ExcludeNodeLabelPatterns, r) } - c.EnvoySecretNamespaces = nsList - // To support K8s NetworkPolicy - c.EnableK8sNetworkPolicy = vp.GetBool(EnableK8sNetworkPolicy) -} + if c.KVStore != "" { + c.IdentityRestoreGracePeriod = defaults.IdentityRestoreGracePeriodKvstore + } -func (c *DaemonConfig) populateDevices(vp *viper.Viper) { - c.devices = vp.GetStringSlice(Devices) + c.LoadBalancerProtocolDifferentiation = vp.GetBool(LoadBalancerProtocolDifferentiation) - // Make sure that devices are unique - if len(c.devices) <= 1 { - return - } - devSet := map[string]struct{}{} - for _, dev := range c.devices { - devSet[dev] = struct{}{} - } - c.devices = make([]string, 0, len(devSet)) - for dev := range devSet { - c.devices = append(c.devices, dev) - } + c.EnableInternalTrafficPolicy = vp.GetBool(EnableInternalTrafficPolicy) + + c.EnableSourceIPVerification = vp.GetBool(EnableSourceIPVerification) } func (c *DaemonConfig) populateLoadBalancerSettings(vp *viper.Viper) { c.NodePortAcceleration = vp.GetString(LoadBalancerAcceleration) c.NodePortMode = vp.GetString(LoadBalancerMode) - c.NodePortAlg = vp.GetString(LoadBalancerAlg) + c.LoadBalancerModeAnnotation = vp.GetBool(LoadBalancerModeAnnotation) + c.NodePortAlg = vp.GetString(LoadBalancerAlgorithm) + c.LoadBalancerAlgorithmAnnotation = vp.GetBool(LoadBalancerAlgorithmAnnotation) // If old settings were explicitly set by the user, then have them // override the new ones in order to not break existing setups. if vp.IsSet(NodePortAcceleration) { @@ -3528,9 +3355,9 @@ func (c *DaemonConfig) populateLoadBalancerSettings(vp *viper.Viper) { if vp.IsSet(NodePortAlg) { prior := c.NodePortAlg c.NodePortAlg = vp.GetString(NodePortAlg) - if vp.IsSet(LoadBalancerAlg) && prior != c.NodePortAlg { + if vp.IsSet(LoadBalancerAlgorithm) && prior != c.NodePortAlg { log.Fatalf("Both --%s and --%s were set. Only use --%s instead.", - LoadBalancerAlg, NodePortAlg, LoadBalancerAlg) + LoadBalancerAlgorithm, NodePortAlg, LoadBalancerAlgorithm) } } } @@ -3548,11 +3375,11 @@ func (c *DaemonConfig) populateNodePortRange(vp *viper.Viper) error { c.NodePortMin, err = strconv.Atoi(nodePortRange[0]) if err != nil { - return fmt.Errorf("Unable to parse min port value for NodePort range: %s", err.Error()) + return fmt.Errorf("Unable to parse min port value for NodePort range: %w", err) } c.NodePortMax, err = strconv.Atoi(nodePortRange[1]) if err != nil { - return fmt.Errorf("Unable to parse max port value for NodePort range: %s", err.Error()) + return fmt.Errorf("Unable to parse max port value for NodePort range: %w", err) } if c.NodePortMax <= c.NodePortMin { return errors.New("NodePort range min port must be smaller than max port") @@ -3654,29 +3481,52 @@ func (c *DaemonConfig) checkMapSizeLimits() error { } func (c *DaemonConfig) checkIPv4NativeRoutingCIDR() error { - if c.GetIPv4NativeRoutingCIDR() == nil && c.EnableIPv4Masquerade && !c.TunnelingEnabled() && - c.IPAMMode() != ipamOption.IPAMENI && c.EnableIPv4 && c.IPAMMode() != ipamOption.IPAMAlibabaCloud { - return fmt.Errorf( - "native routing cidr must be configured with option --%s "+ - "in combination with --%s --%s=%s --%s=%s --%s=true", - IPv4NativeRoutingCIDR, EnableIPv4Masquerade, RoutingMode, RoutingModeNative, - IPAM, c.IPAMMode(), EnableIPv4Name) + if c.IPv4NativeRoutingCIDR != nil { + return nil + } + if !c.EnableIPv4 || !c.EnableIPv4Masquerade { + return nil + } + if c.EnableIPMasqAgent { + return nil + } + if c.TunnelingEnabled() { + return nil + } + if c.IPAMMode() == ipamOption.IPAMENI || c.IPAMMode() == ipamOption.IPAMAlibabaCloud { + return nil } - return nil + return fmt.Errorf( + "native routing cidr must be configured with option --%s "+ + "in combination with --%s=true --%s=true --%s=false --%s=%s --%s=%s", + IPv4NativeRoutingCIDR, + EnableIPv4Name, EnableIPv4Masquerade, + EnableIPMasqAgent, + RoutingMode, RoutingModeNative, + IPAM, c.IPAMMode()) } func (c *DaemonConfig) checkIPv6NativeRoutingCIDR() error { - if c.GetIPv6NativeRoutingCIDR() == nil && c.EnableIPv6Masquerade && !c.TunnelingEnabled() && - c.EnableIPv6 { - return fmt.Errorf( - "native routing cidr must be configured with option --%s "+ - "in combination with --%s --%s=%s --%s=true", - IPv6NativeRoutingCIDR, EnableIPv6Masquerade, RoutingMode, RoutingModeNative, - EnableIPv6Name) + if c.IPv6NativeRoutingCIDR != nil { + return nil } - - return nil + if !c.EnableIPv6 || !c.EnableIPv6Masquerade { + return nil + } + if c.EnableIPMasqAgent { + return nil + } + if c.TunnelingEnabled() { + return nil + } + return fmt.Errorf( + "native routing cidr must be configured with option --%s "+ + "in combination with --%s=true --%s=true --%s=false --%s=%s", + IPv6NativeRoutingCIDR, + EnableIPv6Name, EnableIPv6Masquerade, + EnableIPMasqAgent, + RoutingMode, RoutingModeNative) } func (c *DaemonConfig) checkIPAMDelegatedPlugin() error { @@ -3693,12 +3543,9 @@ func (c *DaemonConfig) checkIPAMDelegatedPlugin() error { if c.EnableEndpointHealthChecking { return fmt.Errorf("--%s must be disabled with --%s=%s", EnableEndpointHealthChecking, IPAM, ipamOption.IPAMDelegatedPlugin) } - // Ingress controller and envoy config require cilium-agent to create an IP address - // specifically for differentiating ingress and envoy traffic, which is not possible + // envoy config (Ingress, Gateway API, ...) require cilium-agent to create an IP address + // specifically for differentiating envoy traffic, which is not possible // with delegated IPAM. - if c.EnableIngressController { - return fmt.Errorf("--%s must be disabled with --%s=%s", EnableIngressController, IPAM, ipamOption.IPAMDelegatedPlugin) - } if c.EnableEnvoyConfig { return fmt.Errorf("--%s must be disabled with --%s=%s", EnableEnvoyConfig, IPAM, ipamOption.IPAMDelegatedPlugin) } @@ -3716,6 +3563,7 @@ func (c *DaemonConfig) calculateBPFMapSizes(vp *viper.Viper) error { c.NATMapEntriesGlobal = vp.GetInt(NATMapEntriesGlobalName) c.NeighMapEntriesGlobal = vp.GetInt(NeighMapEntriesGlobalName) c.PolicyMapEntries = vp.GetInt(PolicyMapEntriesName) + c.PolicyMapFullReconciliationInterval = vp.GetDuration(PolicyMapFullReconciliationIntervalName) c.SockRevNatEntries = vp.GetInt(SockRevNatEntriesName) c.LBMapEntries = vp.GetInt(LBMapEntriesName) c.LBServiceMapEntries = vp.GetInt(LBServiceMapMaxEntries) @@ -3739,7 +3587,7 @@ func (c *DaemonConfig) calculateBPFMapSizes(vp *viper.Viper) error { // to 98% of the total memory being allocated for BPF maps. dynamicSizeRatio := vp.GetFloat64(MapEntriesGlobalDynamicSizeRatioName) if 0.0 < dynamicSizeRatio && dynamicSizeRatio <= 1.0 { - vms, err := mem.VirtualMemory() + vms, err := memory.Get() if err != nil || vms == nil { log.WithError(err).Fatal("Failed to get system memory") } @@ -3783,7 +3631,7 @@ func (c *DaemonConfig) calculateDynamicBPFMapSizes(vp *viper.Viper, totalMemory // 4GB 265121 132560 265121 // 16GB 1060485 530242 1060485 memoryAvailableForMaps := int(float64(totalMemory) * dynamicSizeRatio) - log.Infof("Memory available for map entries (%.3f%% of %dB): %dB", dynamicSizeRatio, totalMemory, memoryAvailableForMaps) + log.Infof("Memory available for map entries (%.3f%% of %dB): %dB", dynamicSizeRatio*100, totalMemory, memoryAvailableForMaps) totalMapMemoryDefault := CTMapEntriesGlobalTCPDefault*c.SizeofCTElement + CTMapEntriesGlobalAnyDefault*c.SizeofCTElement + NATMapEntriesGlobalDefault*c.SizeofNATElement + @@ -3918,16 +3766,18 @@ func (c *DaemonConfig) KubeProxyReplacementFullyEnabled() bool { c.EnableSessionAffinity } +var backupFileNames []string = []string{ + "agent-runtime-config.json", + "agent-runtime-config-1.json", + "agent-runtime-config-2.json", +} + // StoreInFile stores the configuration in a the given directory under the file // name 'daemon-config.json'. If this file already exists, it is renamed to // 'daemon-config-1.json', if 'daemon-config-1.json' also exists, // 'daemon-config-1.json' is renamed to 'daemon-config-2.json' +// Caller is responsible for blocking concurrent changes. func (c *DaemonConfig) StoreInFile(dir string) error { - backupFileNames := []string{ - "agent-runtime-config.json", - "agent-runtime-config-1.json", - "agent-runtime-config-2.json", - } backupFiles(dir, backupFileNames) f, err := os.Create(backupFileNames[0]) if err != nil { @@ -3936,13 +3786,92 @@ func (c *DaemonConfig) StoreInFile(dir string) error { defer f.Close() e := json.NewEncoder(f) e.SetIndent("", " ") - return e.Encode(c) + + err = e.Encode(c) + c.shaSum = c.checksum() + + return err +} + +func (c *DaemonConfig) checksum() [32]byte { + // take a shallow copy for summing + sumConfig := *c + // Ignore variable parts + sumConfig.Opts = nil + cBytes, err := json.Marshal(&sumConfig) + if err != nil { + return [32]byte{} + } + return sha256.Sum256(cBytes) +} + +// ValidateUnchanged checks that invariable parts of the config have not changed since init. +// Caller is responsible for blocking concurrent changes. +func (c *DaemonConfig) ValidateUnchanged() error { + sum := c.checksum() + if sum != c.shaSum { + return c.diffFromFile() + } + return nil +} + +func (c *DaemonConfig) diffFromFile() error { + f, err := os.Open(backupFileNames[0]) + if err != nil { + return err + } + + fi, err := f.Stat() + if err != nil { + return err + } + + fileBytes := make([]byte, fi.Size()) + count, err := f.Read(fileBytes) + if err != nil { + return err + } + fileBytes = fileBytes[:count] + + var config DaemonConfig + err = json.Unmarshal(fileBytes, &config) + + var diff string + if err != nil { + diff = fmt.Errorf("unmarshal failed %q: %w", string(fileBytes), err).Error() + } else { + // Ignore all unexported fields during Diff. + // from https://github.com/google/go-cmp/issues/313#issuecomment-1315651560 + opts := cmp.FilterPath(func(p cmp.Path) bool { + sf, ok := p.Index(-1).(cmp.StructField) + if !ok { + return false + } + r, _ := utf8.DecodeRuneInString(sf.Name()) + return !unicode.IsUpper(r) + }, cmp.Ignore()) + + diff = cmp.Diff(&config, c, opts, + cmpopts.IgnoreTypes(&IntOptions{}), + cmpopts.IgnoreTypes(&OptionLibrary{})) + } + return fmt.Errorf("Config differs:\n%s", diff) } func (c *DaemonConfig) BGPControlPlaneEnabled() bool { return c.EnableBGPControlPlane } +func (c *DaemonConfig) IsDualStack() bool { + return c.EnableIPv4 && c.EnableIPv6 +} + +// IsLocalRouterIP checks if provided IP address matches either LocalRouterIPv4 +// or LocalRouterIPv6 +func (c *DaemonConfig) IsLocalRouterIP(ip string) bool { + return ip != "" && (c.LocalRouterIPv4 == ip || c.LocalRouterIPv6 == ip) +} + // StoreViperInFile stores viper's configuration in a the given directory under // the file name 'viper-config.yaml'. If this file already exists, it is renamed // to 'viper-config-1.yaml', if 'viper-config-1.yaml' also exists, @@ -3989,6 +3918,52 @@ func sanitizeIntParam(vp *viper.Viper, paramName string, paramDefault int) int { return intParam } +func validateConfigMapFlag(flag *pflag.Flag, key string, value interface{}) error { + var err error + switch t := flag.Value.Type(); t { + case "bool": + _, err = cast.ToBoolE(value) + case "duration": + _, err = cast.ToDurationE(value) + case "float32": + _, err = cast.ToFloat32E(value) + case "float64": + _, err = cast.ToFloat64E(value) + case "int": + _, err = cast.ToIntE(value) + case "int8": + _, err = cast.ToInt8E(value) + case "int16": + _, err = cast.ToInt16E(value) + case "int32": + _, err = cast.ToInt32E(value) + case "int64": + _, err = cast.ToInt64E(value) + case "map": + // custom type, see pkg/option/map_options.go + err = flag.Value.Set(fmt.Sprintf("%s", value)) + case "stringSlice": + _, err = cast.ToStringSliceE(value) + case "string": + _, err = cast.ToStringE(value) + case "uint": + _, err = cast.ToUintE(value) + case "uint8": + _, err = cast.ToUint8E(value) + case "uint16": + _, err = cast.ToUint16E(value) + case "uint32": + _, err = cast.ToUint32E(value) + case "uint64": + _, err = cast.ToUint64E(value) + case "stringToString": + _, err = command.ToStringMapStringE(value) + default: + log.Warnf("Unable to validate option %s value of type %s", key, t) + } + return err +} + // validateConfigMap checks whether the flag exists and validate its value func validateConfigMap(cmd *cobra.Command, m map[string]interface{}) error { flags := cmd.Flags() @@ -3998,54 +3973,11 @@ func validateConfigMap(cmd *cobra.Command, m map[string]interface{}) error { if flag == nil { continue } - - var err error - - switch t := flag.Value.Type(); t { - case "bool": - _, err = cast.ToBoolE(value) - case "duration": - _, err = cast.ToDurationE(value) - case "float32": - _, err = cast.ToFloat32E(value) - case "float64": - _, err = cast.ToFloat64E(value) - case "int": - _, err = cast.ToIntE(value) - case "int8": - _, err = cast.ToInt8E(value) - case "int16": - _, err = cast.ToInt16E(value) - case "int32": - _, err = cast.ToInt32E(value) - case "int64": - _, err = cast.ToInt64E(value) - case "map": - // custom type, see pkg/option/map_options.go - err = flag.Value.Set(fmt.Sprintf("%s", value)) - case "stringSlice": - _, err = cast.ToStringSliceE(value) - case "string": - _, err = cast.ToStringE(value) - case "uint": - _, err = cast.ToUintE(value) - case "uint8": - _, err = cast.ToUint8E(value) - case "uint16": - _, err = cast.ToUint16E(value) - case "uint32": - _, err = cast.ToUint32E(value) - case "uint64": - _, err = cast.ToUint64E(value) - default: - log.Warnf("Unable to validate option %s value of type %s", key, t) - } - + err := validateConfigMapFlag(flag, key, value) if err != nil { return fmt.Errorf("option %s: %w", key, err) } } - return nil } @@ -4106,7 +4038,7 @@ func InitConfig(cmd *cobra.Command, programName, configName string, vp *viper.Vi log.WithField(logfields.Path, vp.ConfigFileUsed()). Info("Using config from file") } else if Config.ConfigFile != "" { - log.WithField(logfields.Path, Config.ConfigFile). + log.WithField(logfields.Path, Config.ConfigFile).WithError(err). Fatal("Error reading config file") } else { log.WithError(err).Debug("Skipped reading configuration file") @@ -4120,43 +4052,6 @@ func InitConfig(cmd *cobra.Command, programName, configName string, vp *viper.Vi } } -func getDefaultMonitorQueueSize(numCPU int) int { - monitorQueueSize := numCPU * defaults.MonitorQueueSizePerCPU - if monitorQueueSize > defaults.MonitorQueueSizePerCPUMaximum { - monitorQueueSize = defaults.MonitorQueueSizePerCPUMaximum - } - return monitorQueueSize -} - -// EndpointStatusValues returns all available EndpointStatus option values -func EndpointStatusValues() []string { - return []string{ - EndpointStatusControllers, - EndpointStatusHealth, - EndpointStatusLog, - EndpointStatusPolicy, - EndpointStatusState, - } -} - -// EndpointStatusValuesMap returns all EndpointStatus option values as a map -func EndpointStatusValuesMap() (values map[string]struct{}) { - values = map[string]struct{}{} - for _, v := range EndpointStatusValues() { - values[v] = struct{}{} - } - return -} - -// MightAutoDetectDevices returns true if the device auto-detection might take -// place. -func MightAutoDetectDevices() bool { - devices := Config.GetDevices() - return ((Config.EnableHostFirewall || Config.EnableWireguard || Config.EnableHighScaleIPcache) && len(devices) == 0) || - (Config.KubeProxyReplacement != KubeProxyReplacementDisabled && - (len(devices) == 0 || Config.DirectRoutingDevice == "")) -} - // BPFEventBufferConfig contains parsed configuration for a bpf map event buffer. type BPFEventBufferConfig struct { Enabled bool @@ -4215,9 +4110,27 @@ func parseBPFMapEventConfigs(confs BPFEventBufferConfigs, confMap map[string]str for name, confStr := range confMap { conf, err := ParseEventBufferTupleString(confStr) if err != nil { - return fmt.Errorf("unable to parse %s: %s", BPFMapEventBuffers, err) + return fmt.Errorf("unable to parse %s: %w", BPFMapEventBuffers, err) } confs[name] = conf } return nil } + +func (d *DaemonConfig) EnforceLXCFibLookup() bool { + // See https://github.com/cilium/cilium/issues/27343 for the symptoms. + // + // We want to enforce FIB lookup if EndpointRoutes are enabled, because + // this was a config dependency change which caused different behaviour + // since v1.14.0-snapshot.2. We will remove this hack later, once we + // have auto-device detection on by default. + return d.EnableEndpointRoutes +} + +func (d *DaemonConfig) GetZone(id uint8) string { + return d.ReverseFixedZoneMapping[id] +} + +func (d *DaemonConfig) GetZoneID(zone string) uint8 { + return d.FixedZoneMapping[zone] +} diff --git a/vendor/github.com/cilium/cilium/pkg/option/daemon.go b/vendor/github.com/cilium/cilium/pkg/option/daemon.go index 3e5ffd368d..003f1cc604 100644 --- a/vendor/github.com/cilium/cilium/pkg/option/daemon.go +++ b/vendor/github.com/cilium/cilium/pkg/option/daemon.go @@ -16,6 +16,7 @@ var ( DaemonMutableOptionLibrary = OptionLibrary{ ConntrackAccounting: &specConntrackAccounting, + PolicyAccounting: &specPolicyAccounting, ConntrackLocal: &specConntrackLocal, Debug: &specDebug, DebugLB: &specDebugLB, @@ -36,6 +37,6 @@ func init() { } // ParseDaemonOption parses a string as daemon option -func ParseDaemonOption(opt string) (string, OptionSetting, error) { - return ParseOption(opt, &DaemonOptionLibrary) +func ParseDaemonOption(opt string) (string, OptionSetting, bool, error) { + return DaemonOptionLibrary.ParseOption(opt) } diff --git a/vendor/github.com/cilium/cilium/pkg/option/endpoint.go b/vendor/github.com/cilium/cilium/pkg/option/endpoint.go index 29e579368b..b72049ebe3 100644 --- a/vendor/github.com/cilium/cilium/pkg/option/endpoint.go +++ b/vendor/github.com/cilium/cilium/pkg/option/endpoint.go @@ -6,6 +6,7 @@ package option var ( endpointMutableOptionLibrary = OptionLibrary{ ConntrackAccounting: &specConntrackAccounting, + PolicyAccounting: &specPolicyAccounting, ConntrackLocal: &specConntrackLocal, Debug: &specDebug, DebugLB: &specDebugLB, diff --git a/vendor/github.com/cilium/cilium/pkg/option/features.go b/vendor/github.com/cilium/cilium/pkg/option/features.go new file mode 100644 index 0000000000..06543451ae --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/option/features.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package option + +import "cmp" + +// NetworkPolicyEnabled returns true if the network policy enforcement +// system is enabled for K8s, Cilium and Cilium Clusterwide network policies. +func NetworkPolicyEnabled(cfg *DaemonConfig) bool { + return cmp.Or( + cfg.EnablePolicy != NeverEnforce, + cfg.EnableK8sNetworkPolicy, + cfg.EnableCiliumNetworkPolicy, + cfg.EnableCiliumClusterwideNetworkPolicy, + !cfg.DisableCiliumEndpointCRD, + cfg.IdentityAllocationMode != IdentityAllocationModeCRD, + ) +} diff --git a/vendor/github.com/cilium/cilium/pkg/option/option.go b/vendor/github.com/cilium/cilium/pkg/option/option.go index c2ca5d5ecd..ad8e0467a3 100644 --- a/vendor/github.com/cilium/cilium/pkg/option/option.go +++ b/vendor/github.com/cilium/cilium/pkg/option/option.go @@ -4,8 +4,9 @@ package option import ( + "encoding/json" "fmt" - "sort" + "slices" "strings" "github.com/cilium/cilium/api/v1/models" @@ -44,6 +45,9 @@ type Option struct { Format FormatFunc // Verify is called prior to applying the option Verify VerifyFunc + // Deprecated is true if this option is deprecated and a warning + // should be printed. + Deprecated bool } // OptionSetting specifies the different choices each Option has. @@ -103,7 +107,7 @@ func NormalizeBool(value string) (OptionSetting, error) { func (l *OptionLibrary) ValidateConfigurationMap(n models.ConfigurationMap) (OptionMap, error) { o := make(OptionMap) for k, v := range n { - _, newVal, err := ParseKeyValue(l, k, v) + _, newVal, _, err := l.parseKeyValue(k, v) if err != nil { return nil, err } @@ -148,9 +152,49 @@ func (om OptionMap) DeepCopy() OptionMap { // locking by the caller, while functions with internal access presume // the caller to have taken care of any locking needed. type IntOptions struct { - optsMU lock.RWMutex // Protects all variables from this structure below this line - Opts OptionMap `json:"map"` - Library *OptionLibrary `json:"-"` + optsMU lock.RWMutex // Protects all variables from this structure below this line + opts OptionMap + library *OptionLibrary +} + +// intOptions is only used for JSON +type intOptions struct { + Opts OptionMap `json:"map"` +} + +// ValidateConfigurationMap validates a given configuration map based on the +// option library +func (o *IntOptions) ValidateConfigurationMap(n models.ConfigurationMap) (OptionMap, error) { + return o.library.ValidateConfigurationMap(n) +} + +// Custom json marshal for unexported 'opts' while holding a read lock +func (o *IntOptions) MarshalJSON() ([]byte, error) { + o.optsMU.RLock() + defer o.optsMU.RUnlock() + return json.Marshal(&intOptions{ + Opts: o.opts, + }) +} + +// Custom json unmarshal for unexported 'opts' while holding a write lock +func (o *IntOptions) UnmarshalJSON(b []byte) error { + o.optsMU.Lock() + defer o.optsMU.Unlock() + err := json.Unmarshal(b, &intOptions{ + Opts: o.opts, + }) + if err != nil { + return err + } + // Silently discard unsupported options + for k := range o.opts { + key, _ := o.library.Lookup(k) + if key == "" { + delete(o.opts, k) + } + } + return nil } // GetImmutableModel returns the set of immutable options as a ConfigurationMap API model. @@ -163,8 +207,8 @@ func (o *IntOptions) GetImmutableModel() *models.ConfigurationMap { func (o *IntOptions) GetMutableModel() *models.ConfigurationMap { mutableCfg := make(models.ConfigurationMap) o.optsMU.RLock() - for k, v := range o.Opts { - _, config := o.Library.Lookup(k) + for k, v := range o.opts { + _, config := o.library.Lookup(k) // It's possible that an option has since been removed and thus has // no corresponding configuration; need to check if configuration is @@ -189,8 +233,8 @@ func (o *IntOptions) GetMutableModel() *models.ConfigurationMap { func (o *IntOptions) DeepCopy() *IntOptions { o.optsMU.RLock() cpy := &IntOptions{ - Opts: o.Opts.DeepCopy(), - Library: o.Library, + opts: o.opts.DeepCopy(), + library: o.library, } o.optsMU.RUnlock() return cpy @@ -198,13 +242,13 @@ func (o *IntOptions) DeepCopy() *IntOptions { func NewIntOptions(lib *OptionLibrary) *IntOptions { return &IntOptions{ - Opts: OptionMap{}, - Library: lib, + opts: OptionMap{}, + library: lib, } } func (o *IntOptions) getValue(key string) OptionSetting { - value, exists := o.Opts[key] + value, exists := o.opts[key] if !exists { return OptionDisabled } @@ -226,7 +270,7 @@ func (o *IntOptions) IsEnabled(key string) bool { // expected to have validated the input to this function. func (o *IntOptions) SetValidated(key string, value OptionSetting) { o.optsMU.Lock() - o.Opts[key] = value + o.opts[key] = value o.optsMU.Unlock() } @@ -237,31 +281,31 @@ func (o *IntOptions) SetBool(key string, value bool) { intValue = OptionEnabled } o.optsMU.Lock() - o.Opts[key] = intValue + o.opts[key] = intValue o.optsMU.Unlock() } func (o *IntOptions) Delete(key string) { o.optsMU.Lock() - delete(o.Opts, key) + delete(o.opts, key) o.optsMU.Unlock() } func (o *IntOptions) SetIfUnset(key string, value OptionSetting) { o.optsMU.Lock() - if _, exists := o.Opts[key]; !exists { - o.Opts[key] = value + if _, exists := o.opts[key]; !exists { + o.opts[key] = value } o.optsMU.Unlock() } func (o *IntOptions) InheritDefault(parent *IntOptions, key string) { o.optsMU.RLock() - o.Opts[key] = parent.GetValue(key) + o.opts[key] = parent.GetValue(key) o.optsMU.RUnlock() } -func ParseOption(arg string, lib *OptionLibrary) (string, OptionSetting, error) { +func (l *OptionLibrary) ParseOption(arg string) (string, OptionSetting, bool, error) { result := OptionEnabled if arg[0] == '!' { @@ -273,21 +317,21 @@ func ParseOption(arg string, lib *OptionLibrary) (string, OptionSetting, error) arg = optionSplit[0] if len(optionSplit) > 1 { if result == OptionDisabled { - return "", OptionDisabled, fmt.Errorf("invalid boolean format") + return "", OptionDisabled, false, fmt.Errorf("invalid boolean format") } - return ParseKeyValue(lib, arg, optionSplit[1]) + return l.parseKeyValue(arg, optionSplit[1]) } - return "", OptionDisabled, fmt.Errorf("invalid option format") + return "", OptionDisabled, false, fmt.Errorf("invalid option format") } -func ParseKeyValue(lib *OptionLibrary, arg, value string) (string, OptionSetting, error) { +func (l *OptionLibrary) parseKeyValue(arg, value string) (string, OptionSetting, bool, error) { var result OptionSetting - key, spec := lib.Lookup(arg) + key, spec := l.Lookup(arg) if key == "" { - return "", OptionDisabled, fmt.Errorf("unknown option %q", arg) + return "", OptionDisabled, false, fmt.Errorf("unknown option %q", arg) } var err error @@ -297,40 +341,40 @@ func ParseKeyValue(lib *OptionLibrary, arg, value string) (string, OptionSetting result, err = NormalizeBool(value) } if err != nil { - return "", OptionDisabled, err + return "", OptionDisabled, false, err } if spec.Immutable { - return "", OptionDisabled, fmt.Errorf("specified option is immutable (read-only)") + return "", OptionDisabled, spec.Deprecated, fmt.Errorf("specified option is immutable (read-only)") } - return key, result, nil + return key, result, spec.Deprecated, nil } // getFmtOpt returns #define name if option exists and is set to true in endpoint's Opts // map or #undef name if option does not exist or exists but is set to false func (o *IntOptions) getFmtOpt(name string) string { - define := o.Library.Define(name) + define := o.library.Define(name) if define == "" { return "" } value := o.getValue(name) if value != OptionDisabled { - return fmt.Sprintf("#define %s %d", o.Library.Define(name), value) + return fmt.Sprintf("#define %s %d", o.library.Define(name), value) } - return "#undef " + o.Library.Define(name) + return "#undef " + o.library.Define(name) } func (o *IntOptions) GetFmtList() string { txt := "" o.optsMU.RLock() - opts := make([]string, 0, len(o.Opts)) - for k := range o.Opts { + opts := make([]string, 0, len(o.opts)) + for k := range o.opts { opts = append(opts, k) } - sort.Strings(opts) + slices.Sort(opts) for _, k := range opts { def := o.getFmtOpt(k) @@ -349,23 +393,23 @@ func (o *IntOptions) Dump() { } o.optsMU.RLock() - opts := make([]string, 0, len(o.Opts)) - for k := range o.Opts { + opts := make([]string, 0, len(o.opts)) + for k := range o.opts { opts = append(opts, k) } - sort.Strings(opts) + slices.Sort(opts) for _, k := range opts { var text string - _, option := o.Library.Lookup(k) + _, option := o.library.Lookup(k) if option == nil || option.Format == nil { - if o.Opts[k] == OptionDisabled { + if o.opts[k] == OptionDisabled { text = "Disabled" } else { text = "Enabled" } } else { - text = option.Format(o.Opts[k]) + text = option.Format(o.opts[k]) } fmt.Printf("%-24s %s\n", k, text) @@ -378,17 +422,17 @@ func (o *IntOptions) Validate(n models.ConfigurationMap) error { o.optsMU.RLock() defer o.optsMU.RUnlock() for k, v := range n { - _, newVal, err := ParseKeyValue(o.Library, k, v) + _, newVal, _, err := o.library.parseKeyValue(k, v) if err != nil { return err } // Ignore validation if value is identical - if oldVal, ok := o.Opts[k]; ok && oldVal == newVal { + if oldVal, ok := o.opts[k]; ok && oldVal == newVal { continue } - if err := o.Library.Validate(k, v); err != nil { + if err := o.library.Validate(k, v); err != nil { return err } } @@ -401,35 +445,35 @@ type ChangedFunc func(key string, value OptionSetting, data interface{}) // enable enables the option `name` with all its dependencies func (o *IntOptions) enable(name string) { - if o.Library != nil { - if _, opt := o.Library.Lookup(name); opt != nil { + if o.library != nil { + if _, opt := o.library.Lookup(name); opt != nil { for _, dependency := range opt.Requires { o.enable(dependency) } } } - o.Opts[name] = OptionEnabled + o.opts[name] = OptionEnabled } // set enables the option `name` with all its dependencies, and sets the // integer level of the option to `value`. func (o *IntOptions) set(name string, value OptionSetting) { o.enable(name) - o.Opts[name] = value + o.opts[name] = value } // disable disables the option `name`. All options which depend on the option // to be disabled will be disabled. Options which have previously been enabled // as a dependency will not be automatically disabled. func (o *IntOptions) disable(name string) { - o.Opts[name] = OptionDisabled + o.opts[name] = OptionDisabled - if o.Library != nil { + if o.library != nil { // Disable all options which have a dependency on the option // that was just disabled - for key, opt := range *o.Library { - if opt.RequiresOption(name) && o.Opts[key] != OptionDisabled { + for key, opt := range *o.library { + if opt.RequiresOption(name) && o.opts[key] != OptionDisabled { o.disable(key) } } @@ -453,7 +497,7 @@ func (o *IntOptions) ApplyValidated(n OptionMap, changed ChangedFunc, data inter o.optsMU.Lock() for k, optVal := range n { - val, ok := o.Opts[k] + val, ok := o.opts[k] if optVal == OptionDisabled { /* Only disable if enabled already */ diff --git a/vendor/github.com/cilium/cilium/pkg/option/runtime_options.go b/vendor/github.com/cilium/cilium/pkg/option/runtime_options.go index 07cb16dde4..e872b55646 100644 --- a/vendor/github.com/cilium/cilium/pkg/option/runtime_options.go +++ b/vendor/github.com/cilium/cilium/pkg/option/runtime_options.go @@ -15,6 +15,7 @@ const ( TraceSockNotify = "TraceSockNotification" PolicyVerdictNotify = "PolicyVerdictNotification" PolicyAuditMode = "PolicyAuditMode" + PolicyAccounting = "PolicyAccounting" MonitorAggregation = "MonitorAggregationLevel" SourceIPVerification = "SourceIPVerification" AlwaysEnforce = "always" @@ -33,6 +34,7 @@ var ( Define: "CONNTRACK_LOCAL", Description: "Use endpoint dedicated tracking table instead of global one", Requires: nil, + Deprecated: true, } specDebug = Option{ @@ -60,6 +62,11 @@ var ( Description: "Enable trace notifications", } + specPolicyAccounting = Option{ + Define: "POLICY_ACCOUNTING", + Description: "Enable policy accounting ", + } + specPolicyVerdictNotify = Option{ Define: "POLICY_VERDICT_NOTIFY", Description: "Enable policy verdict notifications", diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go b/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go index 5e70c3cd62..e30a00b91f 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/cidr.go @@ -4,33 +4,28 @@ package api import ( - "net" "net/netip" "strings" - "github.com/cilium/cilium/pkg/ip" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" "github.com/cilium/cilium/pkg/labels" - cidrpkg "github.com/cilium/cilium/pkg/labels/cidr" + "github.com/cilium/cilium/pkg/option" ) -// +kubebuilder:validation:Pattern=`^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\/([0-9]|[1-2][0-9]|3[0-2])$|^s*((([0-9A-Fa-f]{1,4}:){7}(:|([0-9A-Fa-f]{1,4})))|(([0-9A-Fa-f]{1,4}:){6}:([0-9A-Fa-f]{1,4})?)|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){0,1}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){0,2}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){0,3}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){0,4}):([0-9A-Fa-f]{1,4})?))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){0,5}):([0-9A-Fa-f]{1,4})?))|(:(:|((:[0-9A-Fa-f]{1,4}){1,7}))))(%.+)?s*/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8])$` - // CIDR specifies a block of IP addresses. // Example: 192.0.2.1/32 +// +// +kubebuilder:validation:Format=cidr type CIDR string -// CIDRMatchAll is a []CIDR that matches everything -var CIDRMatchAll = []CIDR{CIDR("0.0.0.0/0"), CIDR("::/0")} +var ( + ipv4All = CIDR("0.0.0.0/0") + ipv6All = CIDR("::/0") -// MatchesAll determines whether the CIDR matches all traffic. -func (c CIDR) MatchesAll() bool { - for _, wildcard := range CIDRMatchAll { - if c == wildcard { - return true - } - } - return false -} + worldLabelNonDualStack = labels.Label{Source: labels.LabelSourceReserved, Key: labels.IDNameWorld} + worldLabelV4 = labels.Label{Source: labels.LabelSourceReserved, Key: labels.IDNameWorldIPv4} + worldLabelV6 = labels.Label{Source: labels.LabelSourceReserved, Key: labels.IDNameWorldIPv6} +) // CIDRRule is a rule that specifies a CIDR prefix to/from which outside // communication is allowed, along with an optional list of subnets within that @@ -43,11 +38,18 @@ type CIDRRule struct { // CIDRGroupRef is a reference to a CiliumCIDRGroup object. // A CiliumCIDRGroup contains a list of CIDRs that the endpoint, subject to - // the rule, can (Ingress) or cannot (IngressDeny) receive connections from. + // the rule, can (Ingress/Egress) or cannot (IngressDeny/EgressDeny) receive + // connections from. // // +kubebuilder:validation:OneOf CIDRGroupRef CIDRGroupRef `json:"cidrGroupRef,omitempty"` + // CIDRGroupSelector selects CiliumCIDRGroups by their labels, + // rather than by name. + // + // +kubebuilder:validation:OneOf + CIDRGroupSelector *slim_metav1.LabelSelector `json:"cidrGroupSelector,omitempty"` + // ExceptCIDRs is a list of IP blocks which the endpoint subject to the rule // is not allowed to initiate connections to. These CIDR prefixes should be // contained within Cidr, using ExceptCIDRs together with CIDRGroupRef is not @@ -83,20 +85,39 @@ func (s CIDRSlice) GetAsEndpointSelectors() EndpointSelectorSlice { // If multiple CIDRs representing reserved:world are in this CIDRSlice, // we only have to add the EndpointSelector representing reserved:world // once. - var hasWorldBeenAdded bool + var hasIPv4AllBeenAdded, hasIPv6AllBeenAdded bool slice := EndpointSelectorSlice{} for _, cidr := range s { - if cidr.MatchesAll() && !hasWorldBeenAdded { - hasWorldBeenAdded = true - slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld]) + if cidr == ipv4All { + hasIPv4AllBeenAdded = true + } + if cidr == ipv6All { + hasIPv6AllBeenAdded = true } - lbl, err := cidrpkg.IPStringToLabel(string(cidr)) + lbl, err := labels.IPStringToLabel(string(cidr)) if err == nil { slice = append(slice, NewESFromLabels(lbl)) } // TODO: Log the error? } + if option.Config.IsDualStack() { + // If Cilium is in dual-stack mode then world-ipv4 and + // world-ipv6 need to be distinguished from one another. + if hasIPv4AllBeenAdded && hasIPv6AllBeenAdded { + slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld]) + } + if hasIPv4AllBeenAdded { + slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorldIPv4]) + } + if hasIPv6AllBeenAdded { + slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorldIPv6]) + } + } else if option.Config.EnableIPv4 && hasIPv4AllBeenAdded { + slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld]) + } else if option.Config.EnableIPv6 && hasIPv6AllBeenAdded { + slice = append(slice, ReservedEndpointSelectors[labels.IDNameWorld]) + } return slice } @@ -124,43 +145,78 @@ type CIDRRuleSlice []CIDRRule // GetAsEndpointSelectors returns the provided CIDRRule slice as a slice of // endpoint selectors +// +// The ExceptCIDRs block is inserted as a negative match. Specifically, the +// DoesNotExist qualifier. For example, the CIDRRule +// +// cidr: 1.1.1.0/24 +// exceptCIDRs: ["1.1.1.1/32"] +// +// results in the selector equivalent to "cidr:1.1.1.0/24 !cidr:1.1.1.1/32". +// +// This works because the label selectors will select numeric identities belonging only +// to the shorter prefixes. However, longer prefixes will have a different numeric +// identity, as the bpf ipcache is an LPM lookup. This essentially acts as a +// "carve-out", using the LPM mechanism to exlude subsets of a larger prefix. func (s CIDRRuleSlice) GetAsEndpointSelectors() EndpointSelectorSlice { - cidrs := ComputeResultantCIDRSet(s) - return cidrs.GetAsEndpointSelectors() -} + ces := make(EndpointSelectorSlice, 0, len(s)) -// StringSlice returns the CIDRRuleSlice as a slice of strings. -func (s CIDRRuleSlice) StringSlice() []string { - result := make([]string, 0, len(s)) - for _, c := range s { - result = append(result, c.String()) - } - return result -} + for _, r := range s { + ls := slim_metav1.LabelSelector{ + MatchExpressions: make([]slim_metav1.LabelSelectorRequirement, 0, 1+len(r.ExceptCIDRs)), + } -// ComputeResultantCIDRSet converts a slice of CIDRRules into a slice of -// individual CIDRs. This expands the cidr defined by each CIDRRule, applies -// the CIDR exceptions defined in "ExceptCIDRs", and forms a minimal set of -// CIDRs that cover all of the CIDRRules. -// -// Assumes no error checking is necessary as CIDRRule.Sanitize already does this. -func ComputeResultantCIDRSet(cidrs CIDRRuleSlice) CIDRSlice { - var allResultantAllowedCIDRs CIDRSlice - for _, s := range cidrs { - _, allowNet, _ := net.ParseCIDR(string(s.Cidr)) - - var removeSubnets []*net.IPNet - for _, t := range s.ExceptCIDRs { - _, removeSubnet, _ := net.ParseCIDR(string(t)) - removeSubnets = append(removeSubnets, removeSubnet) + // add the "main" label: + // either a CIDR, CIDRGroupRef, or CIDRGroupSelector + if r.Cidr != "" { + var lbl labels.Label + switch r.Cidr { + case ipv4All: + if option.Config.IsDualStack() { + lbl = worldLabelV4 + } else { + lbl = worldLabelNonDualStack + } + case ipv6All: + if option.Config.IsDualStack() { + lbl = worldLabelV6 + } else { + lbl = worldLabelNonDualStack + } + default: + lbl, _ = labels.IPStringToLabel(string(r.Cidr)) + } + ls.MatchExpressions = append(ls.MatchExpressions, slim_metav1.LabelSelectorRequirement{ + Key: lbl.GetExtendedKey(), + Operator: slim_metav1.LabelSelectorOpExists, + }) + } else if r.CIDRGroupRef != "" { + lbl := LabelForCIDRGroupRef(string(r.CIDRGroupRef)) + ls.MatchExpressions = append(ls.MatchExpressions, slim_metav1.LabelSelectorRequirement{ + Key: lbl.GetExtendedKey(), + Operator: slim_metav1.LabelSelectorOpExists, + }) + } else if r.CIDRGroupSelector != nil { + ls = *NewESFromK8sLabelSelector(labels.LabelSourceCIDRGroupKeyPrefix, r.CIDRGroupSelector).LabelSelector + } else { + // should never be hit, but paranoia + continue } - resultantAllowedCIDRs := ip.RemoveCIDRs([]*net.IPNet{allowNet}, removeSubnets) - for _, u := range resultantAllowedCIDRs { - allResultantAllowedCIDRs = append(allResultantAllowedCIDRs, CIDR(u.String())) + // exclude any excepted CIDRs. + // Do so by inserting a "DoesNotExist" requirement for the given prefix key + for _, exceptCIDR := range r.ExceptCIDRs { + lbl, _ := labels.IPStringToLabel(string(exceptCIDR)) + ls.MatchExpressions = append(ls.MatchExpressions, slim_metav1.LabelSelectorRequirement{ + Key: lbl.GetExtendedKey(), + Operator: slim_metav1.LabelSelectorOpDoesNotExist, + }) } + + ces = append(ces, NewESFromK8sLabelSelector("", &ls)) } - return allResultantAllowedCIDRs + + return ces } // addrsToCIDRRules generates CIDRRules for the IPs passed in. @@ -187,3 +243,18 @@ func addrsToCIDRRules(addrs []netip.Addr) []CIDRRule { // A CIDR Group is a list of CIDRs whose IP addresses should be considered as a // same entity when applying fromCIDRGroupRefs policies on incoming network traffic. type CIDRGroupRef string + +const LabelPrefixGroupName = "io.cilium.policy.cidrgroupname" + +func LabelForCIDRGroupRef(ref string) labels.Label { + var key strings.Builder + key.Grow(len(LabelPrefixGroupName) + len(ref) + 1) + key.WriteString(LabelPrefixGroupName) + key.WriteString("/") + key.WriteString(ref) + return labels.NewLabel( + key.String(), + "", + labels.LabelSourceCIDRGroup, + ) +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go b/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go index 2af43d19df..ca7f82a56d 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/egress.go @@ -7,11 +7,14 @@ import ( "context" slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/slices" ) // EgressCommonRule is a rule that shares some of its fields across the // EgressRule and EgressDenyRule. It's publicly exported so the code generators // can generate code for this structure. +// +// +deepequal-gen:private-method=true type EgressCommonRule struct { // ToEndpoints is a list of endpoints identified by an EndpointSelector to // which the endpoints subject to the rule are allowed to communicate. @@ -77,12 +80,7 @@ type EgressCommonRule struct { // ToServices is a list of services to which the endpoint subject // to the rule is allowed to initiate connections. - // Currently Cilium only supports toServices for K8s services without - // selectors. - // - // Example: - // Any endpoint with the label "app=backend-app" is allowed to - // initiate connections to all cidrs backing the "external-service" service + // Currently Cilium only supports toServices for K8s services. // // +kubebuilder:validation:Optional ToServices []Service `json:"toServices,omitempty"` @@ -98,13 +96,40 @@ type EgressCommonRule struct { // - 'sg-XXXXXXXXXXXXX' // // +kubebuilder:validation:Optional - ToGroups []ToGroups `json:"toGroups,omitempty"` + ToGroups []Groups `json:"toGroups,omitempty"` + + // ToNodes is a list of nodes identified by an + // EndpointSelector to which endpoints subject to the rule is allowed to communicate. + // + // +kubebuilder:validation:Optional + ToNodes []EndpointSelector `json:"toNodes,omitempty"` // TODO: Move this to the policy package // (https://github.com/cilium/cilium/issues/8353) aggregatedSelectors EndpointSelectorSlice `json:"-"` } +// DeepEqual returns true if both EgressCommonRule are deep equal. +// The semantic of a nil slice in one of its fields is different from the semantic +// of an empty non-nil slice, thus it explicitly checks for that case before calling +// the autogenerated method. +func (in *EgressCommonRule) DeepEqual(other *EgressCommonRule) bool { + if slices.XorNil(in.ToEndpoints, other.ToEndpoints) { + return false + } + if slices.XorNil(in.ToCIDR, other.ToCIDR) { + return false + } + if slices.XorNil(in.ToCIDRSet, other.ToCIDRSet) { + return false + } + if slices.XorNil(in.ToEntities, other.ToEntities) { + return false + } + + return in.deepEqual(other) +} + // EgressRule contains all rule types which can be applied at egress, i.e. // network traffic that originates inside the endpoint and exits the endpoint // selected by the endpointSelector. @@ -148,15 +173,6 @@ type EgressRule struct { // ToFQDN rule will not apply to that IP. // Note: ToFQDN cannot occur in the same policy as other To* rules. // - // The current implementation has a number of limitations: - // - The DNS resolution originates from cilium-agent, and not from the pods. - // Differences between the responses seen by cilium agent and a particular - // pod will whitelist the incorrect IP. - // - DNS TTLs are ignored, and cilium-agent will repoll on a short interval - // (5 seconds). Each change to the DNS data will trigger a policy - // regeneration. This may result in delayed updates to the policy for an - // endpoint when the data changes often or the system is under load. - // // +kubebuilder:validation:Optional ToFQDNs FQDNSelectorSlice `json:"toFQDNs,omitempty"` @@ -228,6 +244,13 @@ type EgressDenyRule struct { // ToEndpoints is not aggregated due to requirement folding in // GetDestinationEndpointSelectorsWithRequirements() func (e *EgressCommonRule) getAggregatedSelectors() EndpointSelectorSlice { + // explicitly check for empty non-nil slices, it should not result in any identity being selected. + if (e.ToEntities != nil && len(e.ToEntities) == 0) || + (e.ToCIDR != nil && len(e.ToCIDR) == 0) || + (e.ToCIDRSet != nil && len(e.ToCIDRSet) == 0) { + return nil + } + res := make(EndpointSelectorSlice, 0, len(e.ToEntities)+len(e.ToCIDR)+len(e.ToCIDRSet)) res = append(res, e.ToEntities.GetAsEndpointSelectors()...) res = append(res, e.ToCIDR.GetAsEndpointSelectors()...) @@ -291,7 +314,13 @@ func (e *EgressCommonRule) getDestinationEndpointSelectorsWithRequirements( requirements []slim_metav1.LabelSelectorRequirement, ) EndpointSelectorSlice { - res := make(EndpointSelectorSlice, 0, len(e.ToEndpoints)+len(e.aggregatedSelectors)) + // explicitly check for empty non-nil slices, it should not result in any identity being selected. + if e.aggregatedSelectors == nil || (e.ToEndpoints != nil && len(e.ToEndpoints) == 0) || + (e.ToNodes != nil && len(e.ToNodes) == 0) { + return nil + } + + res := make(EndpointSelectorSlice, 0, len(e.ToEndpoints)+len(e.aggregatedSelectors)+len(e.ToNodes)) if len(requirements) > 0 && len(e.ToEndpoints) > 0 { for idx := range e.ToEndpoints { @@ -305,6 +334,7 @@ func (e *EgressCommonRule) getDestinationEndpointSelectorsWithRequirements( } } else { res = append(res, e.ToEndpoints...) + res = append(res, e.ToNodes...) } return append(res, e.aggregatedSelectors...) } @@ -328,6 +358,19 @@ func (e *EgressCommonRule) RequiresDerivative() bool { return len(e.ToGroups) > 0 } +func (e *EgressCommonRule) IsL3() bool { + if e == nil { + return false + } + return len(e.ToEndpoints) > 0 || + len(e.ToRequires) > 0 || + len(e.ToCIDR) > 0 || + len(e.ToCIDRSet) > 0 || + len(e.ToEntities) > 0 || + len(e.ToGroups) > 0 || + len(e.ToNodes) > 0 +} + // CreateDerivative will return a new rule based on the data gathered by the // rules that creates a new derivative policy. // In the case of ToGroups will call outside using the groups callback and this @@ -338,16 +381,11 @@ func (e *EgressRule) CreateDerivative(ctx context.Context) (*EgressRule, error) return newRule, nil } newRule.ToCIDRSet = make(CIDRRuleSlice, 0, len(e.ToGroups)) - for _, group := range e.ToGroups { - cidrSet, err := group.GetCidrSet(ctx) - if err != nil { - return &EgressRule{}, err - } - if len(cidrSet) == 0 { - return &EgressRule{}, nil - } - newRule.ToCIDRSet = append(e.ToCIDRSet, cidrSet...) + cidrSet, err := ExtractCidrSet(ctx, e.ToGroups) + if err != nil { + return &EgressRule{}, err } + newRule.ToCIDRSet = append(e.ToCIDRSet, cidrSet...) newRule.ToGroups = nil e.SetAggregatedSelectors() return newRule, nil @@ -363,16 +401,11 @@ func (e *EgressDenyRule) CreateDerivative(ctx context.Context) (*EgressDenyRule, return newRule, nil } newRule.ToCIDRSet = make(CIDRRuleSlice, 0, len(e.ToGroups)) - for _, group := range e.ToGroups { - cidrSet, err := group.GetCidrSet(ctx) - if err != nil { - return &EgressDenyRule{}, err - } - if len(cidrSet) == 0 { - return &EgressDenyRule{}, nil - } - newRule.ToCIDRSet = append(e.ToCIDRSet, cidrSet...) + cidrSet, err := ExtractCidrSet(ctx, e.ToGroups) + if err != nil { + return &EgressDenyRule{}, err } + newRule.ToCIDRSet = append(e.ToCIDRSet, cidrSet...) newRule.ToGroups = nil e.SetAggregatedSelectors() return newRule, nil diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go b/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go index a12d12e934..733ec71c3b 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/entity.go @@ -23,6 +23,16 @@ const ( // endpoint's cluster EntityWorld Entity = "world" + // EntityWorldIPv4 is an entity that represents traffic external to + // endpoint's cluster, specifically an IPv4 endpoint, to distinguish + // it from IPv6 in dual-stack mode. + EntityWorldIPv4 Entity = "world-ipv4" + + // EntityWorldIPv6 is an entity that represents traffic external to + // endpoint's cluster, specifically an IPv6 endpoint, to distinguish + // it from IPv4 in dual-stack mode. + EntityWorldIPv6 Entity = "world-ipv6" + // EntityCluster is an entity that represents traffic within the // endpoint's cluster, to endpoints not managed by cilium EntityCluster Entity = "cluster" @@ -55,6 +65,10 @@ const ( var ( endpointSelectorWorld = NewESFromLabels(labels.NewLabel(labels.IDNameWorld, "", labels.LabelSourceReserved)) + endpointSelectorWorldIPv4 = NewESFromLabels(labels.NewLabel(labels.IDNameWorldIPv4, "", labels.LabelSourceReserved)) + + endpointSelectorWorldIPv6 = NewESFromLabels(labels.NewLabel(labels.IDNameWorldIPv6, "", labels.LabelSourceReserved)) + endpointSelectorHost = NewESFromLabels(labels.NewLabel(labels.IDNameHost, "", labels.LabelSourceReserved)) endpointSelectorInit = NewESFromLabels(labels.NewLabel(labels.IDNameInit, "", labels.LabelSourceReserved)) @@ -73,9 +87,13 @@ var ( // EntitySelectorMapping maps special entity names that come in // policies to selectors + // If you add an entry here, you must also update the CRD + // validation above. EntitySelectorMapping = map[Entity]EndpointSelectorSlice{ EntityAll: {WildcardEndpointSelector}, - EntityWorld: {endpointSelectorWorld}, + EntityWorld: {endpointSelectorWorld, endpointSelectorWorldIPv4, endpointSelectorWorldIPv6}, + EntityWorldIPv4: {endpointSelectorWorldIPv4}, + EntityWorldIPv6: {endpointSelectorWorldIPv6}, EntityHost: {endpointSelectorHost}, EntityInit: {endpointSelectorInit}, EntityIngress: {endpointSelectorIngress}, @@ -114,7 +132,7 @@ func (s EntitySlice) GetAsEndpointSelectors() EndpointSelectorSlice { } // InitEntities is called to initialize the policy API layer -func InitEntities(clusterName string, treatRemoteNodeAsHost bool) { +func InitEntities(clusterName string) { EntitySelectorMapping[EntityCluster] = EndpointSelectorSlice{ endpointSelectorHost, endpointSelectorRemoteNode, @@ -125,11 +143,4 @@ func InitEntities(clusterName string, treatRemoteNodeAsHost bool) { endpointSelectorKubeAPIServer, NewESFromLabels(labels.NewLabel(k8sapi.PolicyLabelCluster, clusterName, labels.LabelSourceK8s)), } - - hostSelectors := make(EndpointSelectorSlice, 0, 2) - hostSelectors = append(hostSelectors, endpointSelectorHost) - if treatRemoteNodeAsHost { - hostSelectors = append(hostSelectors, endpointSelectorRemoteNode) - } - EntitySelectorMapping[EntityHost] = hostSelectors } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/fqdn.go b/vendor/github.com/cilium/cilium/pkg/policy/api/fqdn.go index 1d015975fe..765ca48cf1 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/fqdn.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/fqdn.go @@ -10,6 +10,7 @@ import ( "github.com/cilium/cilium/pkg/fqdn/dns" "github.com/cilium/cilium/pkg/fqdn/matchpattern" + "github.com/cilium/cilium/pkg/labels" ) var ( @@ -37,7 +38,9 @@ type FQDNSelector struct { // MatchName matches literal DNS names. A trailing "." is automatically added // when missing. // + // +kubebuilder:validation:MaxLength=255 // +kubebuilder:validation:Pattern=`^([-a-zA-Z0-9_]+[.]?)+$` + // +kubebuilder:validation:OneOf MatchName string `json:"matchName,omitempty"` // MatchPattern allows using wildcards to match DNS names. All wildcards are @@ -58,7 +61,9 @@ type FQDNSelector struct { // sub.cilium.io and subdomain.cilium.io match, www.cilium.io, // blog.cilium.io, cilium.io and google.com do not // + // +kubebuilder:validation:MaxLength=255 // +kubebuilder:validation:Pattern=`^([-a-zA-Z0-9_*]+[.]?)+$` + // +kubebuilder:validation:OneOf MatchPattern string `json:"matchPattern,omitempty"` } @@ -74,6 +79,19 @@ func (s *FQDNSelector) String() string { return str.String() } +// IdentityLabel returns the label which needs to be added to each identity +// selected by this selector. The identity label is based on the MatchName +// if set, otherwise on the MatchPattern. This matches the behavior of the +// ToRegex function +func (s *FQDNSelector) IdentityLabel() labels.Label { + match := s.MatchPattern + if s.MatchName != "" { + match = s.MatchName + } + + return labels.NewLabel(match, "", labels.LabelSourceFQDN) +} + // sanitize for FQDNSelector is a little wonky. While we do more processing // when using MatchName the basic requirement is that is a valid regexp. We // test that it can compile here. diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go b/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go index a9ad79e6ad..9dd95a50de 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/groups.go @@ -7,9 +7,9 @@ import ( "context" "fmt" "net/netip" - "sync" "github.com/cilium/cilium/pkg/ip" + "github.com/cilium/cilium/pkg/lock" ) const ( @@ -17,16 +17,16 @@ const ( ) var ( - providers = sync.Map{} // map with the list of providers to callback to retrieve info from. + providers lock.Map[string, GroupProviderFunc] // map with the list of providers to callback to retrieve info from. ) // GroupProviderFunc is a func that need to be register to be able to // register a new provider in the platform. -type GroupProviderFunc func(context.Context, *ToGroups) ([]netip.Addr, error) +type GroupProviderFunc func(context.Context, *Groups) ([]netip.Addr, error) -// ToGroups structure to store all kinds of new integrations that needs a new +// Groups structure to store all kinds of new integrations that needs a new // derivative policy. -type ToGroups struct { +type Groups struct { AWS *AWSGroup `json:"aws,omitempty"` } @@ -46,22 +46,18 @@ func RegisterToGroupsProvider(providerName string, callback GroupProviderFunc) { // GetCidrSet will return the CIDRRule for the rule using the callbacks that // are register in the platform. -func (group *ToGroups) GetCidrSet(ctx context.Context) ([]CIDRRule, error) { +func (group *Groups) GetCidrSet(ctx context.Context) ([]CIDRRule, error) { var addrs []netip.Addr // Get per provider CIDRSet if group.AWS != nil { - callbackInterface, ok := providers.Load(AWSProvider) + callback, ok := providers.Load(AWSProvider) if !ok { return nil, fmt.Errorf("Provider %s is not registered", AWSProvider) } - callback, ok := callbackInterface.(GroupProviderFunc) - if !ok { - return nil, fmt.Errorf("Provider callback for %s is not a valid instance", AWSProvider) - } awsAddrs, err := callback(ctx, group) if err != nil { return nil, fmt.Errorf( - "Cannot retrieve data from %s provider: %s", + "Cannot retrieve data from %s provider: %w", AWSProvider, err) } addrs = append(addrs, awsAddrs...) diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/http.go b/vendor/github.com/cilium/cilium/pkg/policy/api/http.go index fa7ee9173b..2239420a1e 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/http.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/http.go @@ -35,6 +35,8 @@ type HeaderMatch struct { Mismatch MismatchAction `json:"mismatch,omitempty"` // Name identifies the header. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 Name string `json:"name"` // Secret refers to a secret that contains the value to be matched against. @@ -80,7 +82,10 @@ type PortRuleHTTP struct { Method string `json:"method,omitempty"` // Host is an extended POSIX regex matched against the host header of a - // request, e.g. "foo.com" + // request. Examples: + // + // - foo.bar.com will match the host fooXbar.com or foo-bar.com + // - foo\.bar\.com will only match the host foo.bar.com // // If omitted or empty, the value of the host header is ignored. // diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/icmp.go b/vendor/github.com/cilium/cilium/pkg/policy/api/icmp.go index c2a6847295..a40bb882af 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/icmp.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/icmp.go @@ -3,13 +3,65 @@ package api -import "strconv" +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/util/intstr" +) const ( IPv4Family = "IPv4" IPv6Family = "IPv6" ) +var icmpIpv4TypeNameToCode = map[string]string{ + "EchoReply": "0", + "DestinationUnreachable": "3", + "Redirect": "5", + "Echo": "8", + "EchoRequest": "8", + "RouterAdvertisement": "9", + "RouterSelection": "10", + "TimeExceeded": "11", + "ParameterProblem": "12", + "Timestamp": "13", + "TimestampReply": "14", + "Photuris": "40", + "ExtendedEchoRequest": "42", + "ExtendedEchoReply": "43", +} + +var icmpIpv6TypeNameToCode = map[string]string{ + "DestinationUnreachable": "1", + "PacketTooBig": "2", + "TimeExceeded": "3", + "ParameterProblem": "4", + "EchoRequest": "128", + "EchoReply": "129", + "MulticastListenerQuery": "130", + "MulticastListenerReport": "131", + "MulticastListenerDone": "132", + "RouterSolicitation": "133", + "RouterAdvertisement": "134", + "NeighborSolicitation": "135", + "NeighborAdvertisement": "136", + "RedirectMessage": "137", + "RouterRenumbering": "138", + "ICMPNodeInformationQuery": "139", + "ICMPNodeInformationResponse": "140", + "InverseNeighborDiscoverySolicitation": "141", + "InverseNeighborDiscoveryAdvertisement": "142", + "HomeAgentAddressDiscoveryRequest": "144", + "HomeAgentAddressDiscoveryReply": "145", + "MobilePrefixSolicitation": "146", + "MobilePrefixAdvertisement": "147", + "DuplicateAddressRequestCodeSuffix": "157", + "DuplicateAddressConfirmationCodeSuffix": "158", + "ExtendedEchoRequest": "160", + "ExtendedEchoReply": "161", +} + type ICMPRules []ICMPRule // ICMPRule is a list of ICMP fields. @@ -17,10 +69,14 @@ type ICMPRule struct { // Fields is a list of ICMP fields. // // +kubebuilder:validation:Optional + // +kubebuilder:validation:MaxItems=40 Fields []ICMPField `json:"fields,omitempty"` } // ICMPField is a ICMP field. +// +// +deepequal-gen=true +// +deepequal-gen:private-method=true type ICMPField struct { // Family is a IP address version. // Currently, we support `IPv4` and `IPv6`. @@ -32,11 +88,69 @@ type ICMPField struct { Family string `json:"family,omitempty"` // Type is a ICMP-type. - // It should be 0-255 (8bit). + // It should be an 8bit code (0-255), or it's CamelCase name (for example, "EchoReply"). + // Allowed ICMP types are: + // Ipv4: EchoReply | DestinationUnreachable | Redirect | Echo | EchoRequest | + // RouterAdvertisement | RouterSelection | TimeExceeded | ParameterProblem | + // Timestamp | TimestampReply | Photuris | ExtendedEcho Request | ExtendedEcho Reply + // Ipv6: DestinationUnreachable | PacketTooBig | TimeExceeded | ParameterProblem | + // EchoRequest | EchoReply | MulticastListenerQuery| MulticastListenerReport | + // MulticastListenerDone | RouterSolicitation | RouterAdvertisement | NeighborSolicitation | + // NeighborAdvertisement | RedirectMessage | RouterRenumbering | ICMPNodeInformationQuery | + // ICMPNodeInformationResponse | InverseNeighborDiscoverySolicitation | InverseNeighborDiscoveryAdvertisement | + // HomeAgentAddressDiscoveryRequest | HomeAgentAddressDiscoveryReply | MobilePrefixSolicitation | + // MobilePrefixAdvertisement | DuplicateAddressRequestCodeSuffix | DuplicateAddressConfirmationCodeSuffix | + // ExtendedEchoRequest | ExtendedEchoReply // - // +kubebuilder:validation:Maximum=255 - // +kubebuilder:validation:Minimum=0 - Type uint8 `json:"type"` + // +deepequal-gen=false + // +kubebuilder:validation:XIntOrString + // +kubebuilder:validation:Pattern="^([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]|EchoReply|DestinationUnreachable|Redirect|Echo|RouterAdvertisement|RouterSelection|TimeExceeded|ParameterProblem|Timestamp|TimestampReply|Photuris|ExtendedEchoRequest|ExtendedEcho Reply|PacketTooBig|ParameterProblem|EchoRequest|MulticastListenerQuery|MulticastListenerReport|MulticastListenerDone|RouterSolicitation|RouterAdvertisement|NeighborSolicitation|NeighborAdvertisement|RedirectMessage|RouterRenumbering|ICMPNodeInformationQuery|ICMPNodeInformationResponse|InverseNeighborDiscoverySolicitation|InverseNeighborDiscoveryAdvertisement|HomeAgentAddressDiscoveryRequest|HomeAgentAddressDiscoveryReply|MobilePrefixSolicitation|MobilePrefixAdvertisement|DuplicateAddressRequestCodeSuffix|DuplicateAddressConfirmationCodeSuffix)$" + Type *intstr.IntOrString `json:"type"` +} + +func (i *ICMPField) DeepEqual(o *ICMPField) bool { + if i == nil { + return o == nil + } + + if i.Type.String() != o.Type.String() { + return false + } + + return i.deepEqual(o) +} + +// UnmarshalJSON unmarshals the ICMPField from the byte array and check if the Type matches with IP version. +func (i *ICMPField) UnmarshalJSON(value []byte) error { + var t struct { + Family string `json:"family,omitempty"` + Type *intstr.IntOrString `json:"type"` + } + + if err := json.Unmarshal(value, &t); err != nil { + return err + } + + // If i.Type is ICMP type name, the value should be checked if it belongs to the map for the given family. + if t.Type.String() != "0" && t.Type.IntValue() == 0 { + name := t.Type.String() + var nameToCode map[string]string + switch t.Family { + case IPv6Family: + nameToCode = icmpIpv6TypeNameToCode + default: + nameToCode = icmpIpv4TypeNameToCode + } + + if _, ok := nameToCode[name]; !ok { + return fmt.Errorf("ICMP type %s not found in %s", name, t.Family) + } + } + + i.Family = t.Family + i.Type = t.Type + + return nil } // Iterate iterates over all elements of ICMPRules. @@ -80,16 +194,25 @@ func (ir ICMPRule) GetPortRule() *PortRule { // PortProtocol translates ICMPType to PortProtocol. func (i ICMPField) PortProtocol() *PortProtocol { var proto L4Proto + var nameToCode map[string]string - typeStr := strconv.Itoa(int(i.Type)) - if i.Family == IPv6Family { + switch i.Family { + case IPv6Family: proto = ProtoICMPv6 - } else { + nameToCode = icmpIpv6TypeNameToCode + + default: proto = ProtoICMP + nameToCode = icmpIpv4TypeNameToCode + } + + port := i.Type.String() + if name, ok := nameToCode[port]; ok { + port = name } pr := PortProtocol{ - Port: typeStr, + Port: port, Protocol: proto, } return &pr diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/ingress.go b/vendor/github.com/cilium/cilium/pkg/policy/api/ingress.go index a727d9df20..ffc10688de 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/ingress.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/ingress.go @@ -4,12 +4,17 @@ package api import ( + "context" + slim_metav1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/cilium/cilium/pkg/slices" ) // IngressCommonRule is a rule that shares some of its fields across the // IngressRule and IngressDenyRule. It's publicly exported so the code // generators can generate code for this structure. +// +// +deepequal-gen:private-method=true type IngressCommonRule struct { // FromEndpoints is a list of endpoints identified by an // EndpointSelector which are allowed to communicate with the endpoint @@ -72,11 +77,52 @@ type IngressCommonRule struct { // +kubebuilder:validation:Optional FromEntities EntitySlice `json:"fromEntities,omitempty"` + // FromGroups is a directive that allows the integration with multiple outside + // providers. Currently, only AWS is supported, and the rule can select by + // multiple sub directives: + // + // Example: + // FromGroups: + // - aws: + // securityGroupsIds: + // - 'sg-XXXXXXXXXXXXX' + // + // +kubebuilder:validation:Optional + FromGroups []Groups `json:"fromGroups,omitempty"` + + // FromNodes is a list of nodes identified by an + // EndpointSelector which are allowed to communicate with the endpoint + // subject to the rule. + // + // +kubebuilder:validation:Optional + FromNodes []EndpointSelector `json:"fromNodes,omitempty"` + // TODO: Move this to the policy package // (https://github.com/cilium/cilium/issues/8353) aggregatedSelectors EndpointSelectorSlice `json:"-"` } +// DeepEqual returns true if both IngressCommonRule are deep equal. +// The semantic of a nil slice in one of its fields is different from the semantic +// of an empty non-nil slice, thus it explicitly checks for that case before calling +// the autogenerated method. +func (in *IngressCommonRule) DeepEqual(other *IngressCommonRule) bool { + if slices.XorNil(in.FromEndpoints, other.FromEndpoints) { + return false + } + if slices.XorNil(in.FromCIDR, other.FromCIDR) { + return false + } + if slices.XorNil(in.FromCIDRSet, other.FromCIDRSet) { + return false + } + if slices.XorNil(in.FromEntities, other.FromEntities) { + return false + } + + return in.deepEqual(other) +} + // IngressRule contains all rule types which can be applied at ingress, // i.e. network traffic that originates outside of the endpoint and // is entering the endpoint selected by the endpointSelector. @@ -135,7 +181,7 @@ type IngressRule struct { // the effects of any Requires field in any rule will apply to all other // rules as well. // -// - FromEndpoints, FromCIDR, FromCIDRSet and FromEntities are mutually +// - FromEndpoints, FromCIDR, FromCIDRSet, FromGroups and FromEntities are mutually // exclusive. Only one of these members may be present within an individual // rule. type IngressDenyRule struct { @@ -175,12 +221,21 @@ type IngressDenyRule struct { // FromEndpoints is not aggregated due to requirement folding in // GetSourceEndpointSelectorsWithRequirements() func (i *IngressCommonRule) SetAggregatedSelectors() { + // Goroutines can race setting i.aggregatedSelectors, but they will all compute the same result, so it does not matter. + + // explicitly check for empty non-nil slices, it should not result in any identity being selected. + if (i.FromCIDR != nil && len(i.FromCIDR) == 0) || + (i.FromCIDRSet != nil && len(i.FromCIDRSet) == 0) || + (i.FromEntities != nil && len(i.FromEntities) == 0) { + i.aggregatedSelectors = nil + return + } + res := make(EndpointSelectorSlice, 0, len(i.FromEntities)+len(i.FromCIDR)+len(i.FromCIDRSet)) res = append(res, i.FromEntities.GetAsEndpointSelectors()...) res = append(res, i.FromCIDR.GetAsEndpointSelectors()...) res = append(res, i.FromCIDRSet.GetAsEndpointSelectors()...) - // Goroutines can race setting this, but they will all compute - // the same result, so it does not matter. + i.aggregatedSelectors = res } @@ -190,7 +245,14 @@ func (i *IngressCommonRule) GetSourceEndpointSelectorsWithRequirements(requireme if i.aggregatedSelectors == nil { i.SetAggregatedSelectors() } - res := make(EndpointSelectorSlice, 0, len(i.FromEndpoints)+len(i.aggregatedSelectors)) + + // explicitly check for empty non-nil slices, it should not result in any identity being selected. + if i.aggregatedSelectors == nil || (i.FromEndpoints != nil && len(i.FromEndpoints) == 0) || + (i.FromNodes != nil && len(i.FromNodes) == 0) { + return nil + } + + res := make(EndpointSelectorSlice, 0, len(i.FromEndpoints)+len(i.aggregatedSelectors)+len(i.FromNodes)) if len(requirements) > 0 && len(i.FromEndpoints) > 0 { for idx := range i.FromEndpoints { sel := *i.FromEndpoints[idx].DeepCopy() @@ -203,6 +265,7 @@ func (i *IngressCommonRule) GetSourceEndpointSelectorsWithRequirements(requireme } } else { res = append(res, i.FromEndpoints...) + res = append(res, i.FromNodes...) } return append(res, i.aggregatedSelectors...) @@ -213,3 +276,65 @@ func (i *IngressCommonRule) GetSourceEndpointSelectorsWithRequirements(requireme func (i *IngressCommonRule) AllowsWildcarding() bool { return len(i.FromRequires) == 0 } + +// RequiresDerivative returns true when the EgressCommonRule contains sections +// that need a derivative policy created in order to be enforced +// (e.g. FromGroups). +func (e *IngressCommonRule) RequiresDerivative() bool { + return len(e.FromGroups) > 0 +} + +// IsL3 returns true if the IngressCommonRule contains at least a rule that +// affects L3 policy enforcement. +func (in *IngressCommonRule) IsL3() bool { + if in == nil { + return false + } + return len(in.FromEndpoints) > 0 || + len(in.FromRequires) > 0 || + len(in.FromCIDR) > 0 || + len(in.FromCIDRSet) > 0 || + len(in.FromEntities) > 0 || + len(in.FromGroups) > 0 || + len(in.FromNodes) > 0 +} + +// CreateDerivative will return a new rule based on the data gathered by the +// rules that creates a new derivative policy. +// In the case of FromGroups will call outside using the groups callback and this +// function can take a bit of time. +func (e *IngressRule) CreateDerivative(ctx context.Context) (*IngressRule, error) { + newRule := e.DeepCopy() + if !e.RequiresDerivative() { + return newRule, nil + } + newRule.FromCIDRSet = make(CIDRRuleSlice, 0, len(e.FromGroups)) + cidrSet, err := ExtractCidrSet(ctx, e.FromGroups) + if err != nil { + return &IngressRule{}, err + } + newRule.FromCIDRSet = append(e.FromCIDRSet, cidrSet...) + newRule.FromGroups = nil + e.SetAggregatedSelectors() + return newRule, nil +} + +// CreateDerivative will return a new rule based on the data gathered by the +// rules that creates a new derivative policy. +// In the case of FromGroups will call outside using the groups callback and this +// function can take a bit of time. +func (e *IngressDenyRule) CreateDerivative(ctx context.Context) (*IngressDenyRule, error) { + newRule := e.DeepCopy() + if !e.RequiresDerivative() { + return newRule, nil + } + newRule.FromCIDRSet = make(CIDRRuleSlice, 0, len(e.FromGroups)) + cidrSet, err := ExtractCidrSet(ctx, e.FromGroups) + if err != nil { + return &IngressDenyRule{}, err + } + newRule.FromCIDRSet = append(e.FromCIDRSet, cidrSet...) + newRule.FromGroups = nil + e.SetAggregatedSelectors() + return newRule, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go b/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go index d41adf1b63..348fc047a7 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/l4.go @@ -23,18 +23,32 @@ const ( PortProtocolAny = "0/ANY" ) +// IsAny returns true if an L4Proto represents ANY protocol +func (l4 L4Proto) IsAny() bool { + return l4 == ProtoAny || string(l4) == "" +} + +// SupportedProtocols returns the currently supported protocols in the policy +// engine, excluding "ANY". +func SupportedProtocols() []L4Proto { + return []L4Proto{ProtoTCP, ProtoUDP, ProtoSCTP} +} + // PortProtocol specifies an L4 port with an optional transport protocol type PortProtocol struct { - // Port is an L4 port number. For now the string will be strictly - // parsed as a single uint16. In the future, this field may support - // ranges in the form "1024-2048 - // Port can also be a port name, which must contain at least one [a-z], - // and may also contain [0-9] and '-' anywhere except adjacent to another - // '-' or in the beginning or the end. + // Port can be an L4 port number, or a name in the form of "http" + // or "http-8080". // // +kubebuilder:validation:Pattern=`^(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|6[0-4][0-9]{3}|[1-5][0-9]{4}|[0-9]{1,4})|([a-zA-Z0-9]-?)*[a-zA-Z](-?[a-zA-Z0-9])*$` Port string `json:"port"` + // EndPort can only be an L4 port number. + // + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Optional + EndPort int32 `json:"endPort,omitempty"` + // Protocol is the L4 protocol. If omitted or empty, any protocol // matches. Accepted values: "TCP", "UDP", "SCTP", "ANY" // @@ -56,7 +70,7 @@ func (p PortProtocol) Covers(other PortProtocol) bool { return false } if p.Protocol != other.Protocol { - return p.Protocol == "" || p.Protocol == ProtoAny + return p.Protocol.IsAny() } return true } @@ -134,7 +148,7 @@ type EnvoyConfig struct { // Listener defines a reference to an Envoy listener specified in a CEC or CCEC resource. type Listener struct { - // EnvoyConfig is a reference to the CEC or CCNP resource in which + // EnvoyConfig is a reference to the CEC or CCEC resource in which // the listener is defined. // // +kubebuilder:validation:Required @@ -145,6 +159,14 @@ type Listener struct { // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:Required Name string `json:"name"` + + // Priority for this Listener that is used when multiple rules would apply different + // listeners to a policy map entry. Behavior of this is implementation dependent. + // + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=100 + // +kubebuilder:validation:Optional + Priority uint8 `json:"priority"` } // PortRule is a list of ports/protocol combinations with optional Layer 7 @@ -153,6 +175,7 @@ type PortRule struct { // Ports is a list of L4 port/protocol // // +kubebuilder:validation:Optional + // +kubebuilder:validation:MaxItems=40 Ports []PortProtocol `json:"ports,omitempty"` // TerminatingTLS is the TLS context for the connection terminated by @@ -233,21 +256,25 @@ type L7Rules struct { // HTTP specific rules. // // +kubebuilder:validation:Optional + // +kubebuilder:validation:OneOf HTTP []PortRuleHTTP `json:"http,omitempty"` // Kafka-specific rules. // // +kubebuilder:validation:Optional + // +kubebuilder:validation:OneOf Kafka []kafka.PortRule `json:"kafka,omitempty"` // DNS-specific rules. // // +kubebuilder:validation:Optional + // +kubebuilder:validation:OneOf DNS []PortRuleDNS `json:"dns,omitempty"` // Name of the L7 protocol for which the Key-value pair rules apply. // // +kubebuilder:validation:Optional + // +kubebuilder:validation:OneOf L7Proto string `json:"l7proto,omitempty"` // Key-value pair rules. diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go b/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go index d83d0b0f30..c77b131f15 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/rule.go @@ -29,7 +29,21 @@ type Authentication struct { Mode AuthenticationMode `json:"mode"` } -// +kubebuilder:validation:Type=object +// DefaultDenyConfig expresses a policy's desired default mode for the subject +// endpoints. +type DefaultDenyConfig struct { + // Whether or not the endpoint should have a default-deny rule applied + // to ingress traffic. + // + // +kubebuilder:validation:Optional + Ingress *bool `json:"ingress,omitempty"` + + // Whether or not the endpoint should have a default-deny rule applied + // to egress traffic. + // + // +kubebuilder:validation:Optional + Egress *bool `json:"egress,omitempty"` +} // Rule is a policy rule which must be applied to all endpoints which match the // labels contained in the endpointSelector @@ -62,7 +76,7 @@ type Rule struct { // Ingress is a list of IngressRule which are enforced at ingress. // If omitted or empty, this rule does not apply at ingress. // - // +kubebuilder:validation:Optional + // +kubebuilder:validation:AnyOf Ingress []IngressRule `json:"ingress,omitempty"` // IngressDeny is a list of IngressDenyRule which are enforced at ingress. @@ -70,13 +84,13 @@ type Rule struct { // rules in the 'ingress' field. // If omitted or empty, this rule does not apply at ingress. // - // +kubebuilder:validation:Optional + // +kubebuilder:validation:AnyOf IngressDeny []IngressDenyRule `json:"ingressDeny,omitempty"` // Egress is a list of EgressRule which are enforced at egress. // If omitted or empty, this rule does not apply at egress. // - // +kubebuilder:validation:Optional + // +kubebuilder:validation:AnyOf Egress []EgressRule `json:"egress,omitempty"` // EgressDeny is a list of EgressDenyRule which are enforced at egress. @@ -84,7 +98,7 @@ type Rule struct { // rules in the 'egress' field. // If omitted or empty, this rule does not apply at egress. // - // +kubebuilder:validation:Optional + // +kubebuilder:validation:AnyOf EgressDeny []EgressDenyRule `json:"egressDeny,omitempty"` // Labels is a list of optional strings which can be used to @@ -95,6 +109,25 @@ type Rule struct { // +kubebuilder:validation:Optional Labels labels.LabelArray `json:"labels,omitempty"` + // EnableDefaultDeny determines whether this policy configures the + // subject endpoint(s) to have a default deny mode. If enabled, + // this causes all traffic not explicitly allowed by a network policy + // to be dropped. + // + // If not specified, the default is true for each traffic direction + // that has rules, and false otherwise. For example, if a policy + // only has Ingress or IngressDeny rules, then the default for + // ingress is true and egress is false. + // + // If multiple policies apply to an endpoint, that endpoint's default deny + // will be enabled if any policy requests it. + // + // This is useful for creating broad-based network policies that will not + // cause endpoints to enter default-deny mode. + // + // +kubebuilder:validation:Optional + EnableDefaultDeny DefaultDenyConfig `json:"enableDefaultDeny,omitempty"` + // Description is a free form string, it can be used by the creator of // the rule to store human readable explanation of the purpose of this // rule. Rules cannot be identified by comment. @@ -107,12 +140,13 @@ type Rule struct { // enforce omitempty on the EndpointSelector nested structures. func (r *Rule) MarshalJSON() ([]byte, error) { type common struct { - Ingress []IngressRule `json:"ingress,omitempty"` - IngressDeny []IngressDenyRule `json:"ingressDeny,omitempty"` - Egress []EgressRule `json:"egress,omitempty"` - EgressDeny []EgressDenyRule `json:"egressDeny,omitempty"` - Labels labels.LabelArray `json:"labels,omitempty"` - Description string `json:"description,omitempty"` + Ingress []IngressRule `json:"ingress,omitempty"` + IngressDeny []IngressDenyRule `json:"ingressDeny,omitempty"` + Egress []EgressRule `json:"egress,omitempty"` + EgressDeny []EgressDenyRule `json:"egressDeny,omitempty"` + Labels labels.LabelArray `json:"labels,omitempty"` + EnableDefaultDeny *DefaultDenyConfig `json:"enableDefaultDeny,omitempty"` + Description string `json:"description,omitempty"` } var a interface{} @@ -125,6 +159,11 @@ func (r *Rule) MarshalJSON() ([]byte, error) { Description: r.Description, } + // TODO: convert this to `omitzero` when Go v1.24 is released + if r.EnableDefaultDeny.Egress != nil || r.EnableDefaultDeny.Ingress != nil { + ruleCommon.EnableDefaultDeny = &r.EnableDefaultDeny + } + // Only one of endpointSelector or nodeSelector is permitted. switch { case r.EndpointSelector.LabelSelector != nil: @@ -193,6 +232,12 @@ func (r *Rule) WithEgressDenyRules(rules []EgressDenyRule) *Rule { return r } +// WithEnableDefaultDeny configures the Rule to enable default deny. +func (r *Rule) WithEnableDefaultDeny(ingress, egress bool) *Rule { + r.EnableDefaultDeny = DefaultDenyConfig{&ingress, &egress} + return r +} + // WithLabels configures the Rule with the specified labels metadata. func (r *Rule) WithLabels(labels labels.LabelArray) *Rule { r.Labels = labels @@ -217,6 +262,16 @@ func (r *Rule) RequiresDerivative() bool { return true } } + for _, rule := range r.Ingress { + if rule.RequiresDerivative() { + return true + } + } + for _, rule := range r.IngressDeny { + if rule.RequiresDerivative() { + return true + } + } return false } @@ -226,6 +281,8 @@ func (r *Rule) CreateDerivative(ctx context.Context) (*Rule, error) { newRule := r.DeepCopy() newRule.Egress = []EgressRule{} newRule.EgressDeny = []EgressDenyRule{} + newRule.Ingress = []IngressRule{} + newRule.IngressDeny = []IngressDenyRule{} for _, egressRule := range r.Egress { derivativeEgressRule, err := egressRule.CreateDerivative(ctx) @@ -242,5 +299,39 @@ func (r *Rule) CreateDerivative(ctx context.Context) (*Rule, error) { } newRule.EgressDeny = append(newRule.EgressDeny, *derivativeEgressDenyRule) } + + for _, ingressRule := range r.Ingress { + derivativeIngressRule, err := ingressRule.CreateDerivative(ctx) + if err != nil { + return newRule, err + } + newRule.Ingress = append(newRule.Ingress, *derivativeIngressRule) + } + + for _, ingressDenyRule := range r.IngressDeny { + derivativeDenyIngressRule, err := ingressDenyRule.CreateDerivative(ctx) + if err != nil { + return newRule, err + } + newRule.IngressDeny = append(newRule.IngressDeny, *derivativeDenyIngressRule) + } return newRule, nil } + +type PolicyMetrics interface { + AddRule(r Rule) + DelRule(r Rule) +} + +type policyMetricsNoop struct { +} + +func (p *policyMetricsNoop) AddRule(Rule) { +} + +func (p *policyMetricsNoop) DelRule(Rule) { +} + +func NewPolicyMetricsNoop() PolicyMetrics { + return &policyMetricsNoop{} +} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go b/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go index 3828334b51..57f61f279b 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/rule_validation.go @@ -6,11 +6,12 @@ package api import ( "errors" "fmt" - "net" + "net/netip" "strconv" "strings" "github.com/cilium/cilium/pkg/iana" + "github.com/cilium/cilium/pkg/labels" "github.com/cilium/cilium/pkg/option" ) @@ -19,17 +20,50 @@ const ( maxICMPFields = 40 ) -type exists struct{} +var ( + ErrFromToNodesRequiresNodeSelectorOption = fmt.Errorf("FromNodes/ToNodes rules can only be applied when the %q flag is set", option.EnableNodeSelectorLabels) + + errUnsupportedICMPWithToPorts = errors.New("the ICMPs block may only be present without ToPorts. Define a separate rule to use ToPorts") + errEmptyServerName = errors.New("empty server name is not allowed") + + enableDefaultDenyDefault = true +) // Sanitize validates and sanitizes a policy rule. Minor edits such as // capitalization of the protocol name are automatically fixed up. More // fundamental violations will cause an error to be returned. -func (r Rule) Sanitize() error { +// +// Note: this function is called from both the operator and the agent; +// make sure any configuration flags are bound in **both** binaries. +func (r *Rule) Sanitize() error { + if len(r.Ingress) == 0 && len(r.IngressDeny) == 0 && len(r.Egress) == 0 && len(r.EgressDeny) == 0 { + return fmt.Errorf("rule must have at least one of Ingress, IngressDeny, Egress, EgressDeny") + } + + if option.Config.EnableNonDefaultDenyPolicies { + // Fill in the default traffic posture of this Rule. + // Default posture is per-direction (ingress or egress), + // if there is a peer selector for that direction, the + // default is deny, else allow. + if r.EnableDefaultDeny.Egress == nil { + x := len(r.Egress) > 0 || len(r.EgressDeny) > 0 + r.EnableDefaultDeny.Egress = &x + } + if r.EnableDefaultDeny.Ingress == nil { + x := len(r.Ingress) > 0 || len(r.IngressDeny) > 0 + r.EnableDefaultDeny.Ingress = &x + } + } else { + // Since Non Default Deny Policies is disabled by flag, set EnableDefaultDeny to true + r.EnableDefaultDeny.Egress = &enableDefaultDenyDefault + r.EnableDefaultDeny.Ingress = &enableDefaultDenyDefault + } + if r.EndpointSelector.LabelSelector == nil && r.NodeSelector.LabelSelector == nil { - return fmt.Errorf("rule must have one of EndpointSelector or NodeSelector") + return errors.New("rule must have one of EndpointSelector or NodeSelector") } if r.EndpointSelector.LabelSelector != nil && r.NodeSelector.LabelSelector != nil { - return fmt.Errorf("rule cannot have both EndpointSelector and NodeSelector") + return errors.New("rule cannot have both EndpointSelector and NodeSelector") } if r.EndpointSelector.LabelSelector != nil { @@ -47,24 +81,26 @@ func (r Rule) Sanitize() error { } for i := range r.Ingress { - if err := r.Ingress[i].sanitize(); err != nil { + if err := r.Ingress[i].sanitize(hostPolicy); err != nil { return err } - if hostPolicy { - if len(countL7Rules(r.Ingress[i].ToPorts)) > 0 { - return fmt.Errorf("host policies do not support L7 rules yet") - } + } + + for i := range r.IngressDeny { + if err := r.IngressDeny[i].sanitize(); err != nil { + return err } } for i := range r.Egress { - if err := r.Egress[i].sanitize(); err != nil { + if err := r.Egress[i].sanitize(hostPolicy); err != nil { return err } - if hostPolicy { - if len(countL7Rules(r.Egress[i].ToPorts)) > 0 { - return fmt.Errorf("host policies do not support L7 rules yet") - } + } + + for i := range r.EgressDeny { + if err := r.EgressDeny[i].sanitize(); err != nil { + return err } } @@ -83,13 +119,7 @@ func countL7Rules(ports []PortRule) map[string]int { return result } -func (i *IngressRule) sanitize() error { - l3Members := map[string]int{ - "FromEndpoints": len(i.FromEndpoints), - "FromCIDR": len(i.FromCIDR), - "FromCIDRSet": len(i.FromCIDRSet), - "FromEntities": len(i.FromEntities), - } +func (i *IngressRule) sanitize(hostPolicy bool) error { l7Members := countL7Rules(i.ToPorts) l7IngressSupport := map[string]bool{ "DNS": false, @@ -97,12 +127,12 @@ func (i *IngressRule) sanitize() error { "HTTP": true, } - for m1 := range l3Members { - for m2 := range l3Members { - if m2 != m1 && l3Members[m1] > 0 && l3Members[m2] > 0 { - return fmt.Errorf("Combining %s and %s is not supported yet", m1, m2) - } - } + if err := i.IngressCommonRule.sanitize(); err != nil { + return err + } + + if hostPolicy && len(l7Members) > 0 { + return errors.New("L7 policy is not supported on host ingress yet") } if len(l7Members) > 0 && !option.Config.EnableL7Proxy { @@ -119,23 +149,41 @@ func (i *IngressRule) sanitize() error { } if len(i.ICMPs) > 0 && len(i.ToPorts) > 0 { - return fmt.Errorf("The ICMPs block may only be present without ToPorts. Define a separate rule to use ToPorts.") + return errUnsupportedICMPWithToPorts } - for _, es := range i.FromEndpoints { - if err := es.sanitize(); err != nil { + for n := range i.ToPorts { + if err := i.ToPorts[n].sanitize(true); err != nil { return err } } - for _, es := range i.FromRequires { - if err := es.sanitize(); err != nil { + for n := range i.ICMPs { + if err := i.ICMPs[n].verify(); err != nil { return err } } + i.SetAggregatedSelectors() + + return nil +} + +func (i *IngressDenyRule) sanitize() error { + if err := i.IngressCommonRule.sanitize(); err != nil { + return err + } + + if len(i.ICMPs) > 0 && !option.Config.EnableICMPRules { + return fmt.Errorf("ICMP rules can only be applied when the %q flag is set", option.EnableICMPRules) + } + + if len(i.ICMPs) > 0 && len(i.ToPorts) > 0 { + return errUnsupportedICMPWithToPorts + } + for n := range i.ToPorts { - if err := i.ToPorts[n].sanitize(true); err != nil { + if err := i.ToPorts[n].sanitize(); err != nil { return err } } @@ -146,71 +194,126 @@ func (i *IngressRule) sanitize() error { } } - prefixLengths := map[int]exists{} + i.SetAggregatedSelectors() + + return nil +} + +func (i *IngressCommonRule) sanitize() error { + l3Members := map[string]int{ + "FromEndpoints": len(i.FromEndpoints), + "FromCIDR": len(i.FromCIDR), + "FromCIDRSet": len(i.FromCIDRSet), + "FromEntities": len(i.FromEntities), + "FromNodes": len(i.FromNodes), + "FromGroups": len(i.FromGroups), + } + + for m1 := range l3Members { + for m2 := range l3Members { + if m2 != m1 && l3Members[m1] > 0 && l3Members[m2] > 0 { + return fmt.Errorf("combining %s and %s is not supported yet", m1, m2) + } + } + } + + var retErr error + + if len(i.FromNodes) > 0 && !option.Config.EnableNodeSelectorLabels { + retErr = ErrFromToNodesRequiresNodeSelectorOption + } + + for _, es := range i.FromEndpoints { + if err := es.sanitize(); err != nil { + return errors.Join(err, retErr) + } + } + + for _, es := range i.FromRequires { + if err := es.sanitize(); err != nil { + return errors.Join(err, retErr) + } + } + + for _, ns := range i.FromNodes { + if err := ns.sanitize(); err != nil { + return errors.Join(err, retErr) + } + } + for n := range i.FromCIDR { - prefixLength, err := i.FromCIDR[n].sanitize() - if err != nil { - return err + if err := i.FromCIDR[n].sanitize(); err != nil { + return errors.Join(err, retErr) } - prefixLengths[prefixLength] = exists{} } for n := range i.FromCIDRSet { - prefixLength, err := i.FromCIDRSet[n].sanitize() - if err != nil { - return err + if err := i.FromCIDRSet[n].sanitize(); err != nil { + return errors.Join(err, retErr) } - prefixLengths[prefixLength] = exists{} } for _, fromEntity := range i.FromEntities { _, ok := EntitySelectorMapping[fromEntity] if !ok { - return fmt.Errorf("unsupported entity: %s", fromEntity) + return errors.Join(fmt.Errorf("unsupported entity: %s", fromEntity), retErr) } } - i.SetAggregatedSelectors() + return retErr +} - return nil +// countNonGeneratedRules counts the number of CIDRRule items which are not +// `Generated`, i.e. were directly provided by the user. +// The `Generated` field is currently only set by the `ToServices` +// implementation, which extracts service endpoints and translates them as +// ToCIDRSet rules before the CNP is passed to the policy repository. +// Therefore, we want to allow the combination of ToCIDRSet and ToServices +// rules, if (and only if) the ToCIDRSet only contains `Generated` entries. +func countNonGeneratedCIDRRules(s CIDRRuleSlice) int { + n := 0 + for _, c := range s { + if !c.Generated { + n++ + } + } + return n } -func (e *EgressRule) sanitize() error { - l3Members := map[string]int{ - "ToCIDR": len(e.ToCIDR), - "ToCIDRSet": len(e.ToCIDRSet), - "ToEndpoints": len(e.ToEndpoints), - "ToEntities": len(e.ToEntities), - "ToServices": len(e.ToServices), - "ToFQDNs": len(e.ToFQDNs), - "ToGroups": len(e.ToGroups), - } - l3DependentL4Support := map[interface{}]bool{ - "ToCIDR": true, - "ToCIDRSet": true, - "ToEndpoints": true, - "ToEntities": true, - "ToServices": false, // see https://github.com/cilium/cilium/issues/20067 - "ToFQDNs": true, - "ToGroups": true, - } +// countNonGeneratedEndpoints counts the number of EndpointSelector items which are not +// `Generated`, i.e. were directly provided by the user. +// The `Generated` field is currently only set by the `ToServices` +// implementation, which extracts service endpoints and translates them as +// ToEndpoints rules before the CNP is passed to the policy repository. +// Therefore, we want to allow the combination of ToEndpoints and ToServices +// rules, if (and only if) the ToEndpoints only contains `Generated` entries. +func countNonGeneratedEndpoints(s []EndpointSelector) int { + n := 0 + for _, c := range s { + if !c.Generated { + n++ + } + } + return n +} + +func (e *EgressRule) sanitize(hostPolicy bool) error { + l3Members := e.l3Members() + l3DependentL4Support := e.l3DependentL4Support() l7Members := countL7Rules(e.ToPorts) l7EgressSupport := map[string]bool{ "DNS": true, - "Kafka": true, - "HTTP": true, + "Kafka": !hostPolicy, + "HTTP": !hostPolicy, } - for m1 := range l3Members { - for m2 := range l3Members { - if m2 != m1 && l3Members[m1] > 0 && l3Members[m2] > 0 { - return fmt.Errorf("Combining %s and %s is not supported yet", m1, m2) - } - } + if err := e.EgressCommonRule.sanitize(l3Members); err != nil { + return err } + for member := range l3Members { if l3Members[member] > 0 && len(e.ToPorts) > 0 && !l3DependentL4Support[member] { - return fmt.Errorf("Combining %s and ToPorts is not supported yet", member) + return fmt.Errorf("combining %s and ToPorts is not supported yet", member) } } @@ -219,7 +322,11 @@ func (e *EgressRule) sanitize() error { } for member := range l7Members { if l7Members[member] > 0 && !l7EgressSupport[member] { - return fmt.Errorf("L7 protocol %s is not supported on egress yet", member) + where := "" + if hostPolicy { + where = "host " + } + return fmt.Errorf("L7 protocol %s is not supported on %segress yet", member, where) } } @@ -228,23 +335,69 @@ func (e *EgressRule) sanitize() error { } if len(e.ICMPs) > 0 && len(e.ToPorts) > 0 { - return fmt.Errorf("The ICMPs block may only be present without ToPorts. Define a separate rule to use ToPorts.") + return errUnsupportedICMPWithToPorts } - for _, es := range e.ToEndpoints { - if err := es.sanitize(); err != nil { + for i := range e.ToPorts { + if err := e.ToPorts[i].sanitize(false); err != nil { return err } } - for _, es := range e.ToRequires { - if err := es.sanitize(); err != nil { + for n := range e.ICMPs { + if err := e.ICMPs[n].verify(); err != nil { return err } } + for i := range e.ToFQDNs { + err := e.ToFQDNs[i].sanitize() + if err != nil { + return err + } + } + + e.SetAggregatedSelectors() + + return nil +} + +func (e *EgressRule) l3Members() map[string]int { + l3Members := e.EgressCommonRule.l3Members() + l3Members["ToFQDNs"] = len(e.ToFQDNs) + return l3Members +} + +func (e *EgressRule) l3DependentL4Support() map[string]bool { + l3DependentL4Support := e.EgressCommonRule.l3DependentL4Support() + l3DependentL4Support["ToFQDNs"] = true + return l3DependentL4Support +} + +func (e *EgressDenyRule) sanitize() error { + l3Members := e.l3Members() + l3DependentL4Support := e.l3DependentL4Support() + + if err := e.EgressCommonRule.sanitize(l3Members); err != nil { + return err + } + + for member := range l3Members { + if l3Members[member] > 0 && len(e.ToPorts) > 0 && !l3DependentL4Support[member] { + return fmt.Errorf("combining %s and ToPorts is not supported yet", member) + } + } + + if len(e.ICMPs) > 0 && !option.Config.EnableICMPRules { + return fmt.Errorf("ICMP rules can only be applied when the %q flag is set", option.EnableICMPRules) + } + + if len(e.ICMPs) > 0 && len(e.ToPorts) > 0 { + return errUnsupportedICMPWithToPorts + } + for i := range e.ToPorts { - if err := e.ToPorts[i].sanitize(false); err != nil { + if err := e.ToPorts[i].sanitize(); err != nil { return err } } @@ -255,39 +408,95 @@ func (e *EgressRule) sanitize() error { } } - prefixLengths := map[int]exists{} + e.SetAggregatedSelectors() + + return nil +} + +func (e *EgressDenyRule) l3Members() map[string]int { + return e.EgressCommonRule.l3Members() +} + +func (e *EgressDenyRule) l3DependentL4Support() map[string]bool { + return e.EgressCommonRule.l3DependentL4Support() +} + +func (e *EgressCommonRule) sanitize(l3Members map[string]int) error { + for m1 := range l3Members { + for m2 := range l3Members { + if m2 != m1 && l3Members[m1] > 0 && l3Members[m2] > 0 { + return fmt.Errorf("combining %s and %s is not supported yet", m1, m2) + } + } + } + + var retErr error + + if len(e.ToNodes) > 0 && !option.Config.EnableNodeSelectorLabels { + retErr = ErrFromToNodesRequiresNodeSelectorOption + } + + for _, es := range e.ToEndpoints { + if err := es.sanitize(); err != nil { + return errors.Join(err, retErr) + } + } + + for _, es := range e.ToRequires { + if err := es.sanitize(); err != nil { + return errors.Join(err, retErr) + } + } + + for _, ns := range e.ToNodes { + if err := ns.sanitize(); err != nil { + return errors.Join(err, retErr) + } + } + for i := range e.ToCIDR { - prefixLength, err := e.ToCIDR[i].sanitize() - if err != nil { - return err + if err := e.ToCIDR[i].sanitize(); err != nil { + return errors.Join(err, retErr) } - prefixLengths[prefixLength] = exists{} } for i := range e.ToCIDRSet { - prefixLength, err := e.ToCIDRSet[i].sanitize() - if err != nil { - return err + if err := e.ToCIDRSet[i].sanitize(); err != nil { + return errors.Join(err, retErr) } - prefixLengths[prefixLength] = exists{} } for _, toEntity := range e.ToEntities { _, ok := EntitySelectorMapping[toEntity] if !ok { - return fmt.Errorf("unsupported entity: %s", toEntity) + return errors.Join(fmt.Errorf("unsupported entity: %s", toEntity), retErr) } } - for i := range e.ToFQDNs { - err := e.ToFQDNs[i].sanitize() - if err != nil { - return err - } - } + return retErr +} - e.SetAggregatedSelectors() +func (e *EgressCommonRule) l3Members() map[string]int { + return map[string]int{ + "ToCIDR": len(e.ToCIDR), + "ToCIDRSet": countNonGeneratedCIDRRules(e.ToCIDRSet), + "ToEndpoints": countNonGeneratedEndpoints(e.ToEndpoints), + "ToEntities": len(e.ToEntities), + "ToServices": len(e.ToServices), + "ToGroups": len(e.ToGroups), + "ToNodes": len(e.ToNodes), + } +} - return nil +func (e *EgressCommonRule) l3DependentL4Support() map[string]bool { + return map[string]bool{ + "ToCIDR": true, + "ToCIDRSet": true, + "ToEndpoints": true, + "ToEntities": true, + "ToServices": true, + "ToGroups": true, + "ToNodes": true, + } } func (pr *L7Rules) sanitize(ports []PortProtocol) error { @@ -315,7 +524,7 @@ func (pr *L7Rules) sanitize(ports []PortProtocol) error { // Forthcoming TPROXY redirection restricts DNS proxy to the standard DNS port (53). // Require the port 53 be explicitly configured, and disallow other port numbers. if len(ports) == 0 { - return fmt.Errorf("Port 53 must be specified for DNS rules") + return errors.New("port 53 must be specified for DNS rules") } nTypes++ @@ -344,6 +553,10 @@ func (pr *L7Rules) sanitize(ports []PortProtocol) error { return nil } +// It is not allowed to configure an ingress listener, but we still +// have some unit tests relying on this. So, allow overriding this check in the unit tests. +var TestAllowIngressListener = false + func (pr *PortRule) sanitize(ingress bool) error { hasDNSRules := pr.Rules != nil && len(pr.Rules.DNS) > 0 if ingress && hasDNSRules { @@ -355,7 +568,7 @@ func (pr *PortRule) sanitize(ingress bool) error { } for _, sn := range pr.ServerNames { if sn == "" { - return fmt.Errorf("Empty server name is not allowed") + return errEmptyServerName } } @@ -366,7 +579,7 @@ func (pr *PortRule) sanitize(ingress bool) error { for i := range pr.Ports { var isZero bool var err error - if isZero, err = pr.Ports[i].sanitize(); err != nil { + if isZero, err = pr.Ports[i].sanitize(hasDNSRules); err != nil { return err } if isZero { @@ -386,7 +599,7 @@ func (pr *PortRule) sanitize(ingress bool) error { // For now we have only tested custom listener support on the egress path. TODO // (jrajahalme): Lift this limitation in follow-up work once proper testing has been // done on the ingress path. - if ingress { + if ingress && !TestAllowIngressListener { return fmt.Errorf("Listener is not allowed on ingress (%s)", listener.Name) } // There is no quarantee that Listener will support Cilium policy enforcement. Even @@ -401,7 +614,7 @@ func (pr *PortRule) sanitize(ingress bool) error { // Sanitize L7 rules if !pr.Rules.IsEmpty() { if haveZeroPort { - return fmt.Errorf("L7 rules can not be used when a port is 0") + return errors.New("L7 rules can not be used when a port is 0") } if err := pr.Rules.sanitize(pr.Ports); err != nil { @@ -411,9 +624,22 @@ func (pr *PortRule) sanitize(ingress bool) error { return nil } -func (pp *PortProtocol) sanitize() (isZero bool, err error) { +func (pr *PortDenyRule) sanitize() error { + if len(pr.Ports) > maxPorts { + return fmt.Errorf("too many ports, the max is %d", maxPorts) + } + for i := range pr.Ports { + if _, err := pr.Ports[i].sanitize(false); err != nil { + return err + } + } + + return nil +} + +func (pp *PortProtocol) sanitize(hasDNSRules bool) (isZero bool, err error) { if pp.Port == "" { - return isZero, fmt.Errorf("Port must be specified") + return isZero, errors.New("Port must be specified") } // Port names are formatted as IANA Service Names. This means that @@ -424,9 +650,12 @@ func (pp *PortProtocol) sanitize() (isZero bool, err error) { } else { p, err := strconv.ParseUint(pp.Port, 0, 16) if err != nil { - return isZero, fmt.Errorf("Unable to parse port: %s", err) + return isZero, fmt.Errorf("unable to parse port: %w", err) } isZero = p == 0 + if hasDNSRules && pp.EndPort > int32(p) { + return isZero, errors.New("DNS rules do not support port ranges") + } } pp.Protocol, err = ParseL4Proto(string(pp.Protocol)) @@ -447,68 +676,87 @@ func (ir *ICMPRule) verify() error { return nil } -// sanitize the given CIDR. If successful, returns the prefixLength specified -// in the cidr and nil. Otherwise, returns (0, nil). -func (c CIDR) sanitize() (prefixLength int, err error) { +// sanitize the given CIDR. +func (c CIDR) sanitize() error { strCIDR := string(c) if strCIDR == "" { - return 0, fmt.Errorf("IP must be specified") + return fmt.Errorf("IP must be specified") } - _, ipnet, err := net.ParseCIDR(strCIDR) - if err == nil { - var bits int - prefixLength, bits = ipnet.Mask.Size() - if prefixLength == 0 && bits == 0 { - return 0, fmt.Errorf("CIDR cannot specify non-contiguous mask %s", - ipnet.Mask.String()) - } - } else { - // Try to parse as a fully masked IP or an IP subnetwork - ip := net.ParseIP(strCIDR) - if ip == nil { - return 0, fmt.Errorf("Unable to parse CIDR: %s", err) + prefix, err := netip.ParsePrefix(strCIDR) + if err != nil { + _, err := netip.ParseAddr(strCIDR) + if err != nil { + return fmt.Errorf("unable to parse CIDR: %w", err) } + return nil + } + prefixLength := prefix.Bits() + if prefixLength < 0 { + return fmt.Errorf("CIDR cannot specify non-contiguous mask %s", prefix) } - return prefixLength, nil + return nil } // sanitize validates a CIDRRule by checking that the CIDR prefix itself is // valid, and ensuring that all of the exception CIDR prefixes are contained // within the allowed CIDR prefix. -func (c *CIDRRule) sanitize() (prefixLength int, err error) { +func (c *CIDRRule) sanitize() error { + // Exactly one of CIDR, CIDRGroupRef, or CIDRGroupSelector must be set + cnt := 0 + if len(c.CIDRGroupRef) > 0 { + cnt++ + } + if len(c.Cidr) > 0 { + cnt++ + } + if c.CIDRGroupSelector != nil { + cnt++ + es := NewESFromK8sLabelSelector(labels.LabelSourceCIDRGroupKeyPrefix, c.CIDRGroupSelector) + if err := es.sanitize(); err != nil { + return fmt.Errorf("failed to parse cidrGroupSelector %v: %w", c.CIDRGroupSelector.String(), err) + } + } + if cnt == 0 { + return fmt.Errorf("one of cidr, cidrGroupRef, or cidrGroupSelector is required") + } + if cnt > 1 { + return fmt.Errorf("more than one of cidr, cidrGroupRef, or cidrGroupSelector may not be set") + } + + if len(c.CIDRGroupRef) > 0 || c.CIDRGroupSelector != nil { + return nil // these are selectors; + } // Only allow notation /. Note that this differs from // the logic in api.CIDR.Sanitize(). - _, cidrNet, err := net.ParseCIDR(string(c.Cidr)) + prefix, err := netip.ParsePrefix(string(c.Cidr)) if err != nil { - return 0, fmt.Errorf("Unable to parse CIDRRule %q: %s", c.Cidr, err) + return fmt.Errorf("unable to parse CIDRRule %q: %w", c.Cidr, err) } - var bits int - prefixLength, bits = cidrNet.Mask.Size() - if prefixLength == 0 && bits == 0 { - return 0, fmt.Errorf("CIDR cannot specify non-contiguous mask %s", - cidrNet.Mask.String()) + prefixLength := prefix.Bits() + if prefixLength < 0 { + return fmt.Errorf("CIDR cannot specify non-contiguous mask %s", prefix) } // Ensure that each provided exception CIDR prefix is formatted correctly, // and is contained within the CIDR prefix to/from which we want to allow // traffic. for _, p := range c.ExceptCIDRs { - exceptCIDRAddr, _, err := net.ParseCIDR(string(p)) + except, err := netip.ParsePrefix(string(p)) if err != nil { - return 0, err + return err } // Note: this also checks that the allow CIDR prefix and the exception // CIDR prefixes are part of the same address family. - if !cidrNet.Contains(exceptCIDRAddr) { - return 0, fmt.Errorf("allow CIDR prefix %s does not contain "+ + if !prefix.Contains(except.Addr()) { + return fmt.Errorf("allow CIDR prefix %s does not contain "+ "exclude CIDR prefix %s", c.Cidr, p) } } - return prefixLength, nil + return nil } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go b/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go index 6da00c9a32..7dc6c987fb 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/selector.go @@ -34,6 +34,10 @@ type EndpointSelector struct { // EndpointSelectors are created via `NewESFromMatchRequirements`. It is // immutable after its creation. cachedLabelSelectorString string `json:"-"` + + // Generated indicates whether the rule was generated based on other rules + // or provided by user + Generated bool `json:"-"` } // LabelSelectorString returns a user-friendly string representation of @@ -164,7 +168,6 @@ func (n EndpointSelector) GetMatch(key string) ([]string, bool) { func labelSelectorToRequirements(labelSelector *slim_metav1.LabelSelector) *k8sLbls.Requirements { selector, err := slim_metav1.LabelSelectorAsSelector(labelSelector) if err != nil { - metrics.PolicyImportErrorsTotal.Inc() // Deprecated in Cilium 1.14, to be removed in 1.15. metrics.PolicyChangeTotal.WithLabelValues(metrics.LabelValueOutcomeFail).Inc() log.WithError(err).WithField(logfields.EndpointLabelSelector, logfields.Repr(labelSelector)).Error("unable to construct selector in label selector") @@ -234,6 +237,8 @@ var ( labels.IDNameHost: newReservedEndpointSelector(labels.IDNameHost), labels.IDNameRemoteNode: newReservedEndpointSelector(labels.IDNameRemoteNode), labels.IDNameWorld: newReservedEndpointSelector(labels.IDNameWorld), + labels.IDNameWorldIPv4: newReservedEndpointSelector(labels.IDNameWorldIPv4), + labels.IDNameWorldIPv6: newReservedEndpointSelector(labels.IDNameWorldIPv6), } ) @@ -342,9 +347,9 @@ func (n *EndpointSelector) ConvertToLabelSelectorRequirementSlice() []slim_metav // sanitize returns an error if the EndpointSelector's LabelSelector is invalid. func (n *EndpointSelector) sanitize() error { - errList := validation.ValidateLabelSelector(n.LabelSelector, nil) + errList := validation.ValidateLabelSelector(n.LabelSelector, validation.LabelSelectorValidationOptions{AllowInvalidLabelValueInSelector: false}, nil) if len(errList) > 0 { - return fmt.Errorf("invalid label selector: %s", errList.ToAggregate().Error()) + return fmt.Errorf("invalid label selector: %w", errList.ToAggregate()) } return nil } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/service.go b/vendor/github.com/cilium/cilium/pkg/policy/api/service.go index 57868a367a..4c8debda91 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/service.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/service.go @@ -6,7 +6,10 @@ package api // ServiceSelector is a label selector for k8s services type ServiceSelector EndpointSelector -// Service wraps around selectors for services +// Service selects policy targets that are bundled as part of a +// logical load-balanced service. +// +// Currently only Kubernetes-based Services are supported. type Service struct { // K8sServiceSelector selects services by k8s labels and namespace K8sServiceSelector *K8sServiceSelectorNamespace `json:"k8sServiceSelector,omitempty"` @@ -14,13 +17,13 @@ type Service struct { K8sService *K8sServiceNamespace `json:"k8sService,omitempty"` } -// K8sServiceNamespace is an abstraction for the k8s service + namespace types. +// K8sServiceNamespace selects services by name and, optionally, namespace. type K8sServiceNamespace struct { ServiceName string `json:"serviceName,omitempty"` Namespace string `json:"namespace,omitempty"` } -// K8sServiceSelectorNamespace wraps service selector with namespace +// K8sServiceSelectorNamespace selects services by labels. type K8sServiceSelectorNamespace struct { // +kubebuilder:validation:Required Selector ServiceSelector `json:"selector"` diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go b/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go index 7424d34005..f6019b3234 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/utils.go @@ -4,6 +4,7 @@ package api import ( + "context" "fmt" "strings" ) @@ -146,7 +147,7 @@ const ( ForceNamespace Option = iota ) -func ResourceQualifiedName(namespace, cecName, resourceName string, options ...Option) string { +func ResourceQualifiedName(namespace, cecName, resourceName string, options ...Option) (name string, updated bool) { forceNamespace := false for _, option := range options { switch option { @@ -157,7 +158,7 @@ func ResourceQualifiedName(namespace, cecName, resourceName string, options ...O idx := strings.IndexRune(resourceName, '/') if resourceName == "" || idx >= 0 && (!forceNamespace || (idx == len(namespace) && strings.HasPrefix(resourceName, namespace))) { - return resourceName + return resourceName, false } var sb strings.Builder @@ -168,5 +169,29 @@ func ResourceQualifiedName(namespace, cecName, resourceName string, options ...O sb.WriteRune('/') sb.WriteString(resourceName) - return sb.String() + return sb.String(), true +} + +// ParseQualifiedName returns the namespace, name, and the resource name of a name qualified with ResourceQualifiedName() +func ParseQualifiedName(qualifiedName string) (namespace, name, resourceName string) { + parts := strings.SplitN(qualifiedName, "/", 3) + if len(parts) < 3 { + return "", "", qualifiedName + } + return parts[0], parts[1], parts[2] +} + +// ExtractCidrSet abstracts away some of the logic from the CreateDerivative methods +func ExtractCidrSet(ctx context.Context, groups []Groups) ([]CIDRRule, error) { + var cidrSet []CIDRRule + for _, group := range groups { + c, err := group.GetCidrSet(ctx) + if err != nil { + return cidrSet, err + } + if len(c) > 0 { + cidrSet = append(cidrSet, c...) + } + } + return cidrSet, nil } diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepcopy.go index 7d1b0b70ba..d00bcda93c 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepcopy.go @@ -12,6 +12,7 @@ import ( labels "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/labels" v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" kafka "github.com/cilium/proxy/pkg/policy/api/kafka" + intstr "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -66,6 +67,11 @@ func (in *Authentication) DeepCopy() *Authentication { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CIDRRule) DeepCopyInto(out *CIDRRule) { *out = *in + if in.CIDRGroupSelector != nil { + in, out := &in.CIDRGroupSelector, &out.CIDRGroupSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } if in.ExceptCIDRs != nil { in, out := &in.ExceptCIDRs, &out.ExceptCIDRs *out = make([]CIDR, len(*in)) @@ -126,6 +132,32 @@ func (in CIDRSlice) DeepCopy() CIDRSlice { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultDenyConfig) DeepCopyInto(out *DefaultDenyConfig) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(bool) + **out = **in + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultDenyConfig. +func (in *DefaultDenyConfig) DeepCopy() *DefaultDenyConfig { + if in == nil { + return nil + } + out := new(DefaultDenyConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EgressCommonRule) DeepCopyInto(out *EgressCommonRule) { *out = *in @@ -169,7 +201,14 @@ func (in *EgressCommonRule) DeepCopyInto(out *EgressCommonRule) { } if in.ToGroups != nil { in, out := &in.ToGroups, &out.ToGroups - *out = make([]ToGroups, len(*in)) + *out = make([]Groups, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ToNodes != nil { + in, out := &in.ToNodes, &out.ToNodes + *out = make([]EndpointSelector, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -392,6 +431,27 @@ func (in FQDNSelectorSlice) DeepCopy() FQDNSelectorSlice { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Groups) DeepCopyInto(out *Groups) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSGroup) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Groups. +func (in *Groups) DeepCopy() *Groups { + if in == nil { + return nil + } + out := new(Groups) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HeaderMatch) DeepCopyInto(out *HeaderMatch) { *out = *in @@ -416,6 +476,11 @@ func (in *HeaderMatch) DeepCopy() *HeaderMatch { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ICMPField) DeepCopyInto(out *ICMPField) { *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(intstr.IntOrString) + **out = **in + } return } @@ -435,7 +500,9 @@ func (in *ICMPRule) DeepCopyInto(out *ICMPRule) { if in.Fields != nil { in, out := &in.Fields, &out.Fields *out = make([]ICMPField, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -506,6 +573,20 @@ func (in *IngressCommonRule) DeepCopyInto(out *IngressCommonRule) { *out = make(EntitySlice, len(*in)) copy(*out, *in) } + if in.FromGroups != nil { + in, out := &in.FromGroups, &out.FromGroups + *out = make([]Groups, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FromNodes != nil { + in, out := &in.FromNodes, &out.FromNodes + *out = make([]EndpointSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.aggregatedSelectors != nil { in, out := &in.aggregatedSelectors, &out.aggregatedSelectors *out = make(EndpointSelectorSlice, len(*in)) @@ -924,6 +1005,7 @@ func (in *Rule) DeepCopyInto(out *Rule) { } } out.Labels = in.Labels.DeepCopy() + in.EnableDefaultDeny.DeepCopyInto(&out.EnableDefaultDeny) return } @@ -1057,24 +1139,3 @@ func (in *TLSContext) DeepCopy() *TLSContext { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ToGroups) DeepCopyInto(out *ToGroups) { - *out = *in - if in.AWS != nil { - in, out := &in.AWS, &out.AWS - *out = new(AWSGroup) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ToGroups. -func (in *ToGroups) DeepCopy() *ToGroups { - if in == nil { - return nil - } - out := new(ToGroups) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go index a6573025d3..8b28910702 100644 --- a/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/policy/api/zz_generated.deepequal.go @@ -104,6 +104,14 @@ func (in *CIDRRule) DeepEqual(other *CIDRRule) bool { if in.CIDRGroupRef != other.CIDRGroupRef { return false } + if (in.CIDRGroupSelector == nil) != (other.CIDRGroupSelector == nil) { + return false + } else if in.CIDRGroupSelector != nil { + if !in.CIDRGroupSelector.DeepEqual(other.CIDRGroupSelector) { + return false + } + } + if ((in.ExceptCIDRs != nil) && (other.ExceptCIDRs != nil)) || ((in.ExceptCIDRs == nil) != (other.ExceptCIDRs == nil)) { in, other := &in.ExceptCIDRs, &other.ExceptCIDRs if other == nil { @@ -170,7 +178,33 @@ func (in *CIDRSlice) DeepEqual(other *CIDRSlice) bool { // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. -func (in *EgressCommonRule) DeepEqual(other *EgressCommonRule) bool { +func (in *DefaultDenyConfig) DeepEqual(other *DefaultDenyConfig) bool { + if other == nil { + return false + } + + if (in.Ingress == nil) != (other.Ingress == nil) { + return false + } else if in.Ingress != nil { + if *in.Ingress != *other.Ingress { + return false + } + } + + if (in.Egress == nil) != (other.Egress == nil) { + return false + } else if in.Egress != nil { + if *in.Egress != *other.Egress { + return false + } + } + + return true +} + +// deepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *EgressCommonRule) deepEqual(other *EgressCommonRule) bool { if other == nil { return false } @@ -264,6 +298,23 @@ func (in *EgressCommonRule) DeepEqual(other *EgressCommonRule) bool { } } + if ((in.ToNodes != nil) && (other.ToNodes != nil)) || ((in.ToNodes == nil) != (other.ToNodes == nil)) { + in, other := &in.ToNodes, &other.ToNodes + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + if ((in.aggregatedSelectors != nil) && (other.aggregatedSelectors != nil)) || ((in.aggregatedSelectors == nil) != (other.aggregatedSelectors == nil)) { in, other := &in.aggregatedSelectors, &other.aggregatedSelectors if other == nil || !in.DeepEqual(other) { @@ -371,6 +422,9 @@ func (in *EndpointSelector) DeepEqual(other *EndpointSelector) bool { if in.cachedLabelSelectorString != other.cachedLabelSelectorString { return false } + if in.Generated != other.Generated { + return false + } return true } @@ -469,6 +523,24 @@ func (in *FQDNSelectorSlice) DeepEqual(other *FQDNSelectorSlice) bool { return true } +// DeepEqual is an autogenerated deepequal function, deeply comparing the +// receiver with other. in must be non-nil. +func (in *Groups) DeepEqual(other *Groups) bool { + if other == nil { + return false + } + + if (in.AWS == nil) != (other.AWS == nil) { + return false + } else if in.AWS != nil { + if !in.AWS.DeepEqual(other.AWS) { + return false + } + } + + return true +} + // DeepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. func (in *HeaderMatch) DeepEqual(other *HeaderMatch) bool { @@ -497,9 +569,9 @@ func (in *HeaderMatch) DeepEqual(other *HeaderMatch) bool { return true } -// DeepEqual is an autogenerated deepequal function, deeply comparing the +// deepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. -func (in *ICMPField) DeepEqual(other *ICMPField) bool { +func (in *ICMPField) deepEqual(other *ICMPField) bool { if other == nil { return false } @@ -507,9 +579,6 @@ func (in *ICMPField) DeepEqual(other *ICMPField) bool { if in.Family != other.Family { return false } - if in.Type != other.Type { - return false - } return true } @@ -561,9 +630,9 @@ func (in *ICMPRules) DeepEqual(other *ICMPRules) bool { return true } -// DeepEqual is an autogenerated deepequal function, deeply comparing the +// deepEqual is an autogenerated deepequal function, deeply comparing the // receiver with other. in must be non-nil. -func (in *IngressCommonRule) DeepEqual(other *IngressCommonRule) bool { +func (in *IngressCommonRule) deepEqual(other *IngressCommonRule) bool { if other == nil { return false } @@ -623,6 +692,40 @@ func (in *IngressCommonRule) DeepEqual(other *IngressCommonRule) bool { } } + if ((in.FromGroups != nil) && (other.FromGroups != nil)) || ((in.FromGroups == nil) != (other.FromGroups == nil)) { + in, other := &in.FromGroups, &other.FromGroups + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + + if ((in.FromNodes != nil) && (other.FromNodes != nil)) || ((in.FromNodes == nil) != (other.FromNodes == nil)) { + in, other := &in.FromNodes, &other.FromNodes + if other == nil { + return false + } + + if len(*in) != len(*other) { + return false + } else { + for i, inElement := range *in { + if !inElement.DeepEqual(&(*other)[i]) { + return false + } + } + } + } + if ((in.aggregatedSelectors != nil) && (other.aggregatedSelectors != nil)) || ((in.aggregatedSelectors == nil) != (other.aggregatedSelectors == nil)) { in, other := &in.aggregatedSelectors, &other.aggregatedSelectors if other == nil || !in.DeepEqual(other) { @@ -831,6 +934,9 @@ func (in *Listener) DeepEqual(other *Listener) bool { if in.Name != other.Name { return false } + if in.Priority != other.Priority { + return false + } return true } @@ -892,6 +998,9 @@ func (in *PortProtocol) DeepEqual(other *PortProtocol) bool { if in.Port != other.Port { return false } + if in.EndPort != other.EndPort { + return false + } if in.Protocol != other.Protocol { return false } @@ -1179,6 +1288,10 @@ func (in *Rule) deepEqual(other *Rule) bool { } } + if !in.EnableDefaultDeny.DeepEqual(&other.EnableDefaultDeny) { + return false + } + if in.Description != other.Description { return false } @@ -1275,6 +1388,9 @@ func (in *ServiceSelector) DeepEqual(other *ServiceSelector) bool { if in.cachedLabelSelectorString != other.cachedLabelSelectorString { return false } + if in.Generated != other.Generated { + return false + } return true } @@ -1306,21 +1422,3 @@ func (in *TLSContext) DeepEqual(other *TLSContext) bool { return true } - -// DeepEqual is an autogenerated deepequal function, deeply comparing the -// receiver with other. in must be non-nil. -func (in *ToGroups) DeepEqual(other *ToGroups) bool { - if other == nil { - return false - } - - if (in.AWS == nil) != (other.AWS == nil) { - return false - } else if in.AWS != nil { - if !in.AWS.DeepEqual(other.AWS) { - return false - } - } - - return true -} diff --git a/vendor/github.com/cilium/cilium/pkg/promise/promise.go b/vendor/github.com/cilium/cilium/pkg/promise/promise.go index 1164b9cf13..d6a7a5f4e0 100644 --- a/vendor/github.com/cilium/cilium/pkg/promise/promise.go +++ b/vendor/github.com/cilium/cilium/pkg/promise/promise.go @@ -88,13 +88,15 @@ func (p *promise[T]) Reject(err error) { // Await blocks until the promise has been resolved, rejected or context cancelled. func (p *promise[T]) Await(ctx context.Context) (value T, err error) { - // Fork off a goroutine to wait for cancellation and wake up. - ctx, cancel := context.WithCancel(ctx) - defer cancel() - go func() { - <-ctx.Done() + // Wake up the for-loop below if the context is cancelled. + // See https://pkg.go.dev/context#AfterFunc for a more detailed + // explanation of this pattern + cleanupCancellation := context.AfterFunc(ctx, func() { + p.Lock() + defer p.Unlock() p.cond.Broadcast() - }() + }) + defer cleanupCancellation() p.Lock() defer p.Unlock() @@ -130,3 +132,14 @@ func Map[A, B any](p Promise[A], transform func(A) B) Promise[B] { return transform(v), nil }) } + +// MapError transforms the error of a rejected promise with the provided function. +func MapError[A any](p Promise[A], transform func(error) error) Promise[A] { + return wrappedPromise[A](func(ctx context.Context) (out A, err error) { + v, err := p.Await(ctx) + if err != nil { + err = transform(err) + } + return v, err + }) +} diff --git a/vendor/github.com/cilium/cilium/pkg/resiliency/error.go b/vendor/github.com/cilium/cilium/pkg/resiliency/error.go new file mode 100644 index 0000000000..cd348fc00e --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/resiliency/error.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resiliency + +// retryableErr tracks errors that could be retried. +type retryableErr struct { + error +} + +// Retryable returns a new instance. +func Retryable(e error) retryableErr { + return retryableErr{error: e} +} diff --git a/vendor/github.com/cilium/cilium/pkg/resiliency/errorset.go b/vendor/github.com/cilium/cilium/pkg/resiliency/errorset.go new file mode 100644 index 0000000000..9ac59e72d2 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/resiliency/errorset.go @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resiliency + +import ( + "errors" + "fmt" +) + +type tuple struct { + index int + err error +} + +// ErrorSet tracks a collection of unique errors. +type ErrorSet struct { + total, failed int + msg string + errs map[string]tuple +} + +// NewErrorSet returns a new instance. +func NewErrorSet(msg string, c int) *ErrorSet { + return &ErrorSet{ + msg: msg, + total: c, + errs: make(map[string]tuple), + } +} + +// Add adds one or more errors to the set. +func (e *ErrorSet) Add(errs ...error) { + for _, err := range errs { + if err == nil { + continue + } + if _, ok := e.errs[err.Error()]; ok { + continue + } + e.errs[err.Error()] = tuple{index: e.failed, err: err} + e.failed++ + } +} + +// Error returns a list of unique errors or nil. +func (e *ErrorSet) Errors() []error { + if len(e.errs) == 0 { + return nil + } + errs := make([]error, len(e.errs)+1) + errs[0] = fmt.Errorf("%s (%d/%d) failed", e.msg, e.failed, e.total) + for _, t := range e.errs { + errs[t.index+1] = t.err + } + + return errs +} + +// Error returns a new composite error or nil. +func (e *ErrorSet) Error() error { + return errors.Join(e.Errors()...) +} diff --git a/vendor/github.com/cilium/cilium/pkg/resiliency/helpers.go b/vendor/github.com/cilium/cilium/pkg/resiliency/helpers.go new file mode 100644 index 0000000000..0cd5d9cb8a --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/resiliency/helpers.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resiliency + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/util/wait" +) + +// RetryFunc tracks resiliency retry calls. +type RetryFunc func(ctx context.Context, retries int) (bool, error) + +// Retry retries the provided call using exponential retries given an initial duration for up to max retries count. +func Retry(ctx context.Context, duration time.Duration, maxRetries int, fn RetryFunc) error { + bo := wait.Backoff{ + Duration: duration, + Factor: 1, + Jitter: 0.1, + Steps: maxRetries, + } + + var retries int + f := func(ctx context.Context) (bool, error) { + retries++ + return fn(ctx, retries) + } + + return wait.ExponentialBackoffWithContext(ctx, bo, f) +} diff --git a/vendor/github.com/cilium/cilium/pkg/resiliency/retry.go b/vendor/github.com/cilium/cilium/pkg/resiliency/retry.go new file mode 100644 index 0000000000..a6f1bc6344 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/resiliency/retry.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package resiliency + +import ( + "errors" +) + +// IsRetryable checks if an error can be retried. +func IsRetryable(e error) bool { + return errors.As(e, new(retryableErr)) +} diff --git a/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go b/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go index 99240e6262..cc00c25ac5 100644 --- a/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go +++ b/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go @@ -5,11 +5,11 @@ package safetime import ( "runtime" - "time" "github.com/sirupsen/logrus" "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/cilium/pkg/time" ) // TimeSinceSafe returns the duration since t. If the duration is negative, diff --git a/vendor/github.com/cilium/cilium/pkg/slices/slices.go b/vendor/github.com/cilium/cilium/pkg/slices/slices.go index 243a00819e..9e81fb9551 100644 --- a/vendor/github.com/cilium/cilium/pkg/slices/slices.go +++ b/vendor/github.com/cilium/cilium/pkg/slices/slices.go @@ -4,10 +4,8 @@ package slices import ( - "sort" - - "golang.org/x/exp/constraints" - "golang.org/x/exp/slices" + "cmp" + "slices" ) // Unique deduplicates the elements in the input slice, preserving their ordering and @@ -46,6 +44,7 @@ func Unique[S ~[]T, T comparable](s S) S { } } + clear(s[last:]) // zero out obsolete elements for GC return s[:last] } @@ -69,41 +68,22 @@ func UniqueFunc[S ~[]T, T any, K comparable](s S, key func(i int) K) S { last++ } + clear(s[last:]) // zero out obsolete elements for GC return s[:last] } // SortedUnique sorts and dedup the input slice in place. // It uses the < operator to compare the elements in the slice and thus requires // the elements to satisfies contraints.Ordered. -func SortedUnique[S ~[]T, T constraints.Ordered](s S) S { +func SortedUnique[S ~[]T, T cmp.Ordered](s S) S { if len(s) < 2 { return s } - sort.Slice(s, func(i, j int) bool { - return s[i] < s[j] - }) + slices.Sort(s) return slices.Compact(s) } -// SortedUniqueFunc is like SortedUnique but allows the user to specify custom functions -// for ordering (less function) and comparing (eq function) the elements in the slice. -// This is useful in all the cases where SortedUnique cannot be used: -// - for types that do not satisfy constraints.Ordered (e.g: composite types) -// - when the user wants to customize how elements are compared (e.g: user wants to enforce reverse ordering) -func SortedUniqueFunc[S ~[]T, T any]( - s S, - less func(i, j int) bool, - eq func(a, b T) bool, -) S { - if len(s) < 2 { - return s - } - - sort.Slice(s, less) - return slices.CompactFunc(s, eq) -} - // Diff returns a slice of elements which is the difference of a and b. // The returned slice keeps the elements in the same order found in the "a" slice. // Both input slices are considered as sets, that is, all elements are considered as @@ -145,3 +125,21 @@ func SubsetOf[S ~[]T, T comparable](a, b S) (bool, []T) { d := Diff(a, b) return len(d) == 0, d } + +// XorNil returns true if one of the two slices is nil while the other is not. +func XorNil[T any](s1, s2 []T) bool { + return s1 == nil && s2 != nil || + s1 != nil && s2 == nil +} + +// AllMatch returns true if pred is true for each element in s, false otherwise. +// May not evaluate on all elements if not necessary for determining the result. +// If the slice is empty then true is returned and predicate is not evaluated. +func AllMatch[T any](s []T, pred func(v T) bool) bool { + for _, v := range s { + if !pred(v) { + return false + } + } + return true +} diff --git a/vendor/github.com/cilium/cilium/pkg/source/source.go b/vendor/github.com/cilium/cilium/pkg/source/source.go new file mode 100644 index 0000000000..0406349f65 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/source/source.go @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package source + +import ( + "github.com/cilium/hive/cell" +) + +// Source describes the source of a definition +type Source string + +const ( + // Unspec is used when the source is unspecified + Unspec Source = "unspec" + + // KubeAPIServer is the source used for state which represents the + // kube-apiserver, such as the IPs associated with it. This is not to be + // confused with the Kubernetes source. + // KubeAPIServer state has the strongest ownership and can only be + // overwritten by itself. + KubeAPIServer Source = "kube-apiserver" + + // Local is the source used for state derived from local agent state. + // Local state has the second strongest ownership, behind KubeAPIServer. + Local Source = "local" + + // KVStore is the source used for state derived from a key value store. + // State in the key value stored takes precedence over orchestration + // system state such as Kubernetes. + KVStore Source = "kvstore" + + // CustomResource is the source used for state derived from Kubernetes + // custom resources + CustomResource Source = "custom-resource" + + // Kubernetes is the source used for state derived from Kubernetes + Kubernetes Source = "k8s" + + // ClusterMesh is the source used for state derived from remote clusters + ClusterMesh Source = "clustermesh" + + // LocalAPI is the source used for state derived from the API served + // locally on the node. + LocalAPI Source = "api" + + // Generated is the source used for generated state which can be + // overwritten by all other sources, except for restored (and unspec). + Generated Source = "generated" + + // Restored is the source used for restored state from data left behind + // by the previous agent instance. Can be overwritten by all other + // sources (except for unspec). + Restored Source = "restored" + + // Directory is the source used for watching and reading + // cilium network policy files from specific directory. + Directory Source = "directory" + + // Please remember to add your source to defaultSources below. +) + +// Sources is a priority-sorted slice of sources. +type Sources []Source + +var defaultSources Sources = []Source{ + KubeAPIServer, + Local, + KVStore, + CustomResource, + Kubernetes, + ClusterMesh, + LocalAPI, + Generated, + Restored, + Unspec, +} + +// AllowOverwrite returns true if new state from a particular source is allowed +// to overwrite existing state from another source +func AllowOverwrite(existing, new Source) bool { + switch existing { + + // KubeAPIServer state can only be overwritten by other kube-apiserver + // state. + case KubeAPIServer: + return new == KubeAPIServer + + // Local state can only be overwritten by other local state or + // kube-apiserver state. + case Local: + return new == Local || new == KubeAPIServer + + // KVStore can be overwritten by other kvstore, local state, or + // kube-apiserver state. + case KVStore: + return new == KVStore || new == Local || new == KubeAPIServer + + // Custom-resource state can be overwritten by other CRD, kvstore, + // local or kube-apiserver state. + case CustomResource: + return new == CustomResource || new == KVStore || new == Local || new == KubeAPIServer + + // Kubernetes state can be overwritten by everything except clustermesh, + // local API, generated, restored and unspecified state. + case Kubernetes: + return new != ClusterMesh && new != LocalAPI && new != Generated && new != Restored && new != Unspec + + // ClusterMesh state can be overwritten by everything except local API, + // generated, restored and unspecified state. + case ClusterMesh: + return new != LocalAPI && new != Generated && new != Restored && new != Unspec + + // Local API state can be overwritten by everything except restored, + // generated and unspecified state + case LocalAPI: + return new != Generated && new != Restored && new != Unspec + + // Generated can be overwritten by everything except by Restored and + // Unspecified + case Generated: + return new != Restored && new != Unspec + + // Restored can be overwritten by everything except by Unspecified + case Restored: + return new != Unspec + + // Unspecified state can be overwritten by everything + case Unspec: + return true + } + return true +} + +var Cell = cell.Module( + "source", + "Definitions and priorities of data sources", + cell.Provide(NewSources), +) + +// NewSources returns sources ordered from the most preferred. +func NewSources() Sources { + return defaultSources +} diff --git a/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go b/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go index e3f7dbb6cd..2557233533 100644 --- a/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go +++ b/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go @@ -4,12 +4,11 @@ package spanstat import ( - "time" - "github.com/cilium/cilium/pkg/lock" "github.com/cilium/cilium/pkg/logging" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/safetime" + "github.com/cilium/cilium/pkg/time" ) var ( diff --git a/vendor/github.com/cilium/cilium/pkg/time/time.go b/vendor/github.com/cilium/cilium/pkg/time/time.go new file mode 100644 index 0000000000..925088acd7 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/time/time.go @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// package time is a wrapper for the stdlib time library that aliases most +// underlying types, but allows overrides for testing purposes. +// +// Synced to go-1.20.7. +package time + +import ( + "time" +) + +const ( + Layout = time.Layout + ANSIC = time.ANSIC + UnixDate = time.UnixDate + RubyDate = time.RubyDate + RFC822 = time.RFC822 + RFC822Z = time.RFC822Z + RFC850 = time.RFC850 + RFC1123 = time.RFC1123 + RFC1123Z = time.RFC1123Z + RFC3339 = time.RFC3339 + RFC3339Nano = time.RFC3339Nano + Kitchen = time.Kitchen + Stamp = time.Stamp + StampMilli = time.StampMilli + StampMicro = time.StampMicro + StampNano = time.StampNano + DateTime = time.DateTime + DateOnly = time.DateOnly + TimeOnly = time.TimeOnly + + Nanosecond = time.Nanosecond + Microsecond = time.Microsecond + Millisecond = time.Millisecond + Second = time.Second + Minute = time.Minute + Hour = time.Hour +) + +var ( + ParseDuration = time.ParseDuration + Since = time.Since + Until = time.Until + FixedZone = time.FixedZone + LoadLocation = time.LoadLocation + LoadLocationFromTZData = time.LoadLocationFromTZData + Date = time.Date + Now = time.Now + Parse = time.Parse + ParseInLocation = time.ParseInLocation + UTC = time.UTC + Unix = time.Unix + UnixMicro = time.UnixMicro + UnixMilli = time.UnixMilli +) + +type ( + Duration = time.Duration + Location = time.Location + Month = time.Month + ParseError = time.ParseError + Ticker = time.Ticker + Time = time.Time + Timer = time.Timer + Weekday = time.Weekday +) + +var ( + MaxInternalTimerDelay time.Duration +) + +// After overrides the stdlib time.After to enforce maximum sleepiness via +// option.MaxInternalTimerDelay. +func After(d Duration) <-chan Time { + if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { + d = MaxInternalTimerDelay + } + return time.After(d) +} + +// Sleep overrides the stdlib time.Sleep to enforce maximum sleepiness via +// option.MaxInternalTimerDelay. +func Sleep(d time.Duration) { + if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { + d = MaxInternalTimerDelay + } + time.Sleep(d) +} + +// Tick overrides the stdlib time.Tick to enforce maximum sleepiness via +// option.MaxInternalTimerDelay. +func Tick(d Duration) <-chan time.Time { + return NewTicker(d).C +} + +// NewTicker overrides the stdlib time.NewTicker to enforce maximum sleepiness +// via option.MaxInternalTimerDelay. +func NewTicker(d Duration) *time.Ticker { + if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { + d = MaxInternalTimerDelay + } + return time.NewTicker(d) +} + +// NewTimer overrides the stdlib time.NewTimer to enforce maximum sleepiness +// via option.MaxInternalTimerDelay. +func NewTimer(d Duration) *time.Timer { + if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { + d = MaxInternalTimerDelay + } + return time.NewTimer(d) +} + +// NewTimerWithoutMaxDelay returns a time.NewTimer without enforcing maximum +// sleepiness. This function should only be used in cases where the timer firing +// early impacts correctness. If in doubt, you probably should use NewTimer. +func NewTimerWithoutMaxDelay(d Duration) *time.Timer { + return time.NewTimer(d) +} + +// AfterFunc overrides the stdlib time.AfterFunc to enforce maximum sleepiness +// via option.MaxInternalTimerDelay. +func AfterFunc(d Duration, f func()) *time.Timer { + if MaxInternalTimerDelay > 0 && d > MaxInternalTimerDelay { + d = MaxInternalTimerDelay + } + return time.AfterFunc(d, f) +} diff --git a/vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go b/vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go new file mode 100644 index 0000000000..988d11e077 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/u8proto/u8proto.go @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package u8proto + +import ( + "fmt" + "strconv" + "strings" +) + +// These definitions must contain and be compatible with the string +// values defined for pkg/pollicy/api/L4Proto + +const ( + // ANY represents all protocols. + ANY U8proto = 0 + ICMP U8proto = 1 + TCP U8proto = 6 + UDP U8proto = 17 + ICMPv6 U8proto = 58 + SCTP U8proto = 132 +) + +var protoNames = map[U8proto]string{ + 0: "ANY", + 1: "ICMP", + 6: "TCP", + 17: "UDP", + 58: "ICMPv6", + 132: "SCTP", +} + +var ProtoIDs = map[string]U8proto{ + "all": 0, + "any": 0, + "none": 0, + "icmp": 1, + "tcp": 6, + "udp": 17, + "icmpv6": 58, + "sctp": 132, +} + +type U8proto uint8 + +func (p U8proto) String() string { + if _, ok := protoNames[p]; ok { + return protoNames[p] + } + return strconv.Itoa(int(p)) +} + +func ParseProtocol(proto string) (U8proto, error) { + if u, ok := ProtoIDs[strings.ToLower(proto)]; ok { + return u, nil + } + return 0, fmt.Errorf("unknown protocol '%s'", proto) +} + +func FromNumber(proto uint8) (U8proto, error) { + _, ok := protoNames[U8proto(proto)] + if !ok { + return 0, fmt.Errorf("unknown protocol %d", proto) + } + return U8proto(proto), nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/version/version_unix.go b/vendor/github.com/cilium/cilium/pkg/version/version_unix.go index 09e72b8882..b4d5454e97 100644 --- a/vendor/github.com/cilium/cilium/pkg/version/version_unix.go +++ b/vendor/github.com/cilium/cilium/pkg/version/version_unix.go @@ -18,23 +18,33 @@ import ( func parseKernelVersion(ver string) (semver.Version, error) { verStrs := strings.Split(ver, ".") - switch { - case len(verStrs) < 2: + + // We are assuming the kernel version will be one of the following: + // 4.9.17-040917-generic or 4.9-040917-generic or 4-generic + // So as observed, the kernel value is N.N.N-m or N.N-m or N-m + // This implies the len(verStrs) should be between 1 and 3 + + if len(verStrs) < 1 || len(verStrs) > 3 { return semver.Version{}, fmt.Errorf("unable to get kernel version from %q", ver) - case len(verStrs) < 3: - verStrs = append(verStrs, "0") } - // We are assuming the kernel version will be something as: - // 4.9.17-040917-generic - // If verStrs is []string{ "4", "9", "17-040917-generic" } - // then we need to retrieve patch number. - patch := regexp.MustCompilePOSIX(`^[0-9]+`).FindString(verStrs[2]) + // Given the observations, we use regular expression to extract + // the patch number from the last element of the verStrs array and + // append "0" to the verStrs array in case the until its length is + // 3 as in all cases we want to return from this function : + // Major.Minor.PatchNumber + + patch := regexp.MustCompilePOSIX(`^[0-9]+`).FindString(verStrs[len(verStrs)-1]) if patch == "" { - verStrs[2] = "0" + verStrs[len(verStrs)-1] = "0" } else { - verStrs[2] = patch + verStrs[len(verStrs)-1] = patch } + + for len(verStrs) < 3 { + verStrs = append(verStrs, "0") + } + return versioncheck.Version(strings.Join(verStrs[:3], ".")) } diff --git a/vendor/github.com/cilium/cilium/pkg/versioncheck/check.go b/vendor/github.com/cilium/cilium/pkg/versioncheck/check.go index 6b5e34534f..88474cfa7e 100644 --- a/vendor/github.com/cilium/cilium/pkg/versioncheck/check.go +++ b/vendor/github.com/cilium/cilium/pkg/versioncheck/check.go @@ -20,7 +20,7 @@ import ( func MustCompile(constraint string) semver.Range { verCheck, err := Compile(constraint) if err != nil { - panic(fmt.Errorf("cannot compile go-version constraint '%s' %s", constraint, err)) + panic(fmt.Errorf("cannot compile go-version constraint '%s': %w", constraint, err)) } return verCheck } @@ -36,7 +36,7 @@ func Compile(constraint string) (semver.Range, error) { func MustVersion(version string) semver.Version { ver, err := Version(version) if err != nil { - panic(fmt.Errorf("cannot compile go-version version '%s' %s", version, err)) + panic(fmt.Errorf("cannot compile go-version version '%s': %w", version, err)) } return ver } diff --git a/vendor/github.com/cilium/ebpf/.clang-format b/vendor/github.com/cilium/ebpf/.clang-format new file mode 100644 index 0000000000..0ff4257606 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.clang-format @@ -0,0 +1,25 @@ +--- +Language: Cpp +BasedOnStyle: LLVM +AlignAfterOpenBracket: DontAlign +AlignConsecutiveAssignments: true +AlignEscapedNewlines: DontAlign +# mkdocs annotations in source code are written as trailing comments +# and alignment pushes these really far away from the content. +AlignTrailingComments: false +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortFunctionsOnASingleLine: false +BreakBeforeBraces: Attach +IndentWidth: 4 +KeepEmptyLinesAtTheStartOfBlocks: false +TabWidth: 4 +UseTab: ForContinuationAndIndentation +ColumnLimit: 1000 +# Go compiler comments need to stay unindented. +CommentPragmas: '^go:.*' +# linux/bpf.h needs to be included before bpf/bpf_helpers.h for types like __u64 +# and sorting makes this impossible. +SortIncludes: false +... diff --git a/vendor/github.com/cilium/ebpf/.gitattributes b/vendor/github.com/cilium/ebpf/.gitattributes new file mode 100644 index 0000000000..113f97b980 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.gitattributes @@ -0,0 +1 @@ +internal/sys/types.go linguist-generated=false diff --git a/vendor/github.com/cilium/ebpf/.gitignore b/vendor/github.com/cilium/ebpf/.gitignore new file mode 100644 index 0000000000..b46162b8ec --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.o +!*_bpf*.o + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/cilium/ebpf/.golangci.yaml b/vendor/github.com/cilium/ebpf/.golangci.yaml new file mode 100644 index 0000000000..366d4893f2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.golangci.yaml @@ -0,0 +1,19 @@ +--- +linters: + disable-all: true + enable: + - goimports + - gosimple + - govet + - ineffassign + - misspell + - staticcheck + - typecheck + - unused + - gofmt +linters-settings: + goimports: + # A comma-separated list of prefixes, which, if set, checks import paths + # with the given prefixes are grouped after 3rd-party packages. + # Default: "" + local-prefixes: github.com/cilium/ebpf diff --git a/vendor/github.com/cilium/ebpf/.vimto.toml b/vendor/github.com/cilium/ebpf/.vimto.toml new file mode 100644 index 0000000000..49a12dbc09 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/.vimto.toml @@ -0,0 +1,12 @@ +kernel="ghcr.io/cilium/ci-kernels:stable" +smp="cpus=2" +memory="1G" +user="root" +setup=[ + "mount -t cgroup2 -o nosuid,noexec,nodev cgroup2 /sys/fs/cgroup", + "/bin/sh -c 'modprobe bpf_testmod || true'", + "dmesg --clear", +] +teardown=[ + "dmesg --read-clear", +] diff --git a/vendor/github.com/cilium/ebpf/CODEOWNERS b/vendor/github.com/cilium/ebpf/CODEOWNERS new file mode 100644 index 0000000000..0f76dce85c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CODEOWNERS @@ -0,0 +1,13 @@ +* @cilium/ebpf-lib-maintainers + +features/ @rgo3 +link/ @mmat11 + +perf/ @florianl +ringbuf/ @florianl + +btf/ @dylandreimerink + +cmd/bpf2go/ @mejedi + +docs/ @ti-mo diff --git a/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..8e42838c5a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at nathanjsweet at gmail dot com or i at lmb dot io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/cilium/ebpf/CONTRIBUTING.md b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md new file mode 100644 index 0000000000..673a9ac290 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contributing to ebpf-go + +Want to contribute to ebpf-go? There are a few things you need to know. + +We wrote a [contribution guide](https://ebpf-go.dev/contributing/) to help you get started. diff --git a/vendor/github.com/cilium/ebpf/LICENSE b/vendor/github.com/cilium/ebpf/LICENSE new file mode 100644 index 0000000000..c637ae99c2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Copyright (c) 2017 Nathan Sweet +Copyright (c) 2018, 2019 Cloudflare +Copyright (c) 2019 Authors of Cilium + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/cilium/ebpf/MAINTAINERS.md b/vendor/github.com/cilium/ebpf/MAINTAINERS.md new file mode 100644 index 0000000000..a56a03e394 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/MAINTAINERS.md @@ -0,0 +1,3 @@ +# Maintainers + +Maintainers can be found in the [Cilium Maintainers file](https://github.com/cilium/community/blob/main/roles/Maintainers.md) diff --git a/vendor/github.com/cilium/ebpf/Makefile b/vendor/github.com/cilium/ebpf/Makefile new file mode 100644 index 0000000000..e0fe974920 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/Makefile @@ -0,0 +1,114 @@ +# The development version of clang is distributed as the 'clang' binary, +# while stable/released versions have a version number attached. +# Pin the default clang to a stable version. +CLANG ?= clang-17 +STRIP ?= llvm-strip-17 +OBJCOPY ?= llvm-objcopy-17 +CFLAGS := -O2 -g -Wall -Werror $(CFLAGS) + +CI_KERNEL_URL ?= https://github.com/cilium/ci-kernels/raw/master/ + +# Obtain an absolute path to the directory of the Makefile. +# Assume the Makefile is in the root of the repository. +REPODIR := $(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) +UIDGID := $(shell stat -c '%u:%g' ${REPODIR}) + +# Prefer podman if installed, otherwise use docker. +# Note: Setting the var at runtime will always override. +CONTAINER_ENGINE ?= $(if $(shell command -v podman), podman, docker) +CONTAINER_RUN_ARGS ?= $(if $(filter ${CONTAINER_ENGINE}, podman), --log-driver=none, --user "${UIDGID}") + +IMAGE := $(shell cat ${REPODIR}/testdata/docker/IMAGE) +VERSION := $(shell cat ${REPODIR}/testdata/docker/VERSION) + +TARGETS := \ + testdata/loader-clang-11 \ + testdata/loader-clang-14 \ + testdata/loader-$(CLANG) \ + testdata/manyprogs \ + testdata/btf_map_init \ + testdata/invalid_map \ + testdata/raw_tracepoint \ + testdata/invalid_map_static \ + testdata/invalid_btf_map_init \ + testdata/strings \ + testdata/freplace \ + testdata/fentry_fexit \ + testdata/iproute2_map_compat \ + testdata/map_spin_lock \ + testdata/subprog_reloc \ + testdata/fwd_decl \ + testdata/kconfig \ + testdata/ksym \ + testdata/kfunc \ + testdata/invalid-kfunc \ + testdata/kfunc-kmod \ + testdata/constants \ + testdata/errors \ + testdata/variables \ + btf/testdata/relocs \ + btf/testdata/relocs_read \ + btf/testdata/relocs_read_tgt \ + btf/testdata/relocs_enum \ + btf/testdata/tags \ + cmd/bpf2go/testdata/minimal + +.PHONY: all clean container-all container-shell generate + +.DEFAULT_TARGET = container-all + +# Build all ELF binaries using a containerized LLVM toolchain. +container-all: + +${CONTAINER_ENGINE} run --rm -ti ${CONTAINER_RUN_ARGS} \ + -v "${REPODIR}":/ebpf -w /ebpf --env MAKEFLAGS \ + --env HOME="/tmp" \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \ + "${IMAGE}:${VERSION}" \ + make all + +# (debug) Drop the user into a shell inside the container as root. +# Set BPF2GO_ envs to make 'make generate' just work. +container-shell: + ${CONTAINER_ENGINE} run --rm -ti \ + -v "${REPODIR}":/ebpf -w /ebpf \ + --env BPF2GO_CC="$(CLANG)" \ + --env BPF2GO_FLAGS="-fdebug-prefix-map=/ebpf=. $(CFLAGS)" \ + "${IMAGE}:${VERSION}" + +clean: + find "$(CURDIR)" -name "*.elf" -delete + find "$(CURDIR)" -name "*.o" -delete + +format: + find . -type f -name "*.c" | xargs clang-format -i + +all: format $(addsuffix -el.elf,$(TARGETS)) $(addsuffix -eb.elf,$(TARGETS)) generate + ln -srf testdata/loader-$(CLANG)-el.elf testdata/loader-el.elf + ln -srf testdata/loader-$(CLANG)-eb.elf testdata/loader-eb.elf + +generate: + go generate -run "internal/cmd/gentypes" ./... + go generate -skip "internal/cmd/gentypes" ./... + +testdata/loader-%-el.elf: testdata/loader.c + $* $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ + +testdata/loader-%-eb.elf: testdata/loader.c + $* $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ + +%-el.elf: %.c + $(CLANG) $(CFLAGS) -target bpfel -c $< -o $@ + $(STRIP) -g $@ + +%-eb.elf : %.c + $(CLANG) $(CFLAGS) -target bpfeb -c $< -o $@ + $(STRIP) -g $@ + +.PHONY: update-kernel-deps +update-kernel-deps: export KERNEL_VERSION?=6.8 +update-kernel-deps: + ./testdata/sh/update-kernel-deps.sh + $(MAKE) container-all diff --git a/vendor/github.com/cilium/ebpf/README.md b/vendor/github.com/cilium/ebpf/README.md new file mode 100644 index 0000000000..8238256c8e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/README.md @@ -0,0 +1,73 @@ +# eBPF + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/cilium/ebpf)](https://pkg.go.dev/github.com/cilium/ebpf) + +![HoneyGopher](docs/ebpf/ebpf-go.png) + +ebpf-go is a pure Go library that provides utilities for loading, compiling, and +debugging eBPF programs. It has minimal external dependencies and is intended to +be used in long running processes. + +See [ebpf.io](https://ebpf.io) for complementary projects from the wider eBPF +ecosystem. + +## Getting Started + +Please take a look at our [Getting Started] guide. + +[Contributions](https://ebpf-go.dev/contributing) are highly encouraged, as they highlight certain use cases of +eBPF and the library, and help shape the future of the project. + +## Getting Help + +The community actively monitors our [GitHub Discussions](https://github.com/cilium/ebpf/discussions) page. +Please search for existing threads before starting a new one. Refrain from +opening issues on the bug tracker if you're just starting out or if you're not +sure if something is a bug in the library code. + +Alternatively, [join](https://ebpf.io/slack) the +[#ebpf-go](https://cilium.slack.com/messages/ebpf-go) channel on Slack if you +have other questions regarding the project. Note that this channel is ephemeral +and has its history erased past a certain point, which is less helpful for +others running into the same problem later. + +## Packages + +This library includes the following packages: + +* [asm](https://pkg.go.dev/github.com/cilium/ebpf/asm) contains a basic + assembler, allowing you to write eBPF assembly instructions directly + within your Go code. (You don't need to use this if you prefer to write your eBPF program in C.) +* [cmd/bpf2go](https://pkg.go.dev/github.com/cilium/ebpf/cmd/bpf2go) allows + compiling and embedding eBPF programs written in C within Go code. As well as + compiling the C code, it auto-generates Go code for loading and manipulating + the eBPF program and map objects. +* [link](https://pkg.go.dev/github.com/cilium/ebpf/link) allows attaching eBPF + to various hooks +* [perf](https://pkg.go.dev/github.com/cilium/ebpf/perf) allows reading from a + `PERF_EVENT_ARRAY` +* [ringbuf](https://pkg.go.dev/github.com/cilium/ebpf/ringbuf) allows reading from a + `BPF_MAP_TYPE_RINGBUF` map +* [features](https://pkg.go.dev/github.com/cilium/ebpf/features) implements the equivalent + of `bpftool feature probe` for discovering BPF-related kernel features using native Go. +* [rlimit](https://pkg.go.dev/github.com/cilium/ebpf/rlimit) provides a convenient API to lift + the `RLIMIT_MEMLOCK` constraint on kernels before 5.11. +* [btf](https://pkg.go.dev/github.com/cilium/ebpf/btf) allows reading the BPF Type Format. +* [pin](https://pkg.go.dev/github.com/cilium/ebpf/pin) provides APIs for working with pinned objects on bpffs. + +## Requirements + +* A version of Go that is [supported by + upstream](https://golang.org/doc/devel/release.html#policy) +* CI is run against kernel.org LTS releases. >= 4.4 should work but EOL'ed versions + are not supported. + +## License + +MIT + +### eBPF Gopher + +The eBPF honeygopher is based on the Go gopher designed by Renee French. + +[Getting Started]: https://ebpf-go.dev/guides/getting-started/ diff --git a/vendor/github.com/cilium/ebpf/asm/alu.go b/vendor/github.com/cilium/ebpf/asm/alu.go new file mode 100644 index 0000000000..282233d327 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/alu.go @@ -0,0 +1,180 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output alu_string.go -type=Source,Endianness,ALUOp + +// Source of ALU / ALU64 / Branch operations +// +// msb lsb +// +------------+-+---+ +// | op |S|cls| +// +------------+-+---+ +type Source uint16 + +const sourceMask OpCode = 0x0008 + +// Source bitmask +const ( + // InvalidSource is returned by getters when invoked + // on non ALU / branch OpCodes. + InvalidSource Source = 0xffff + // ImmSource src is from constant + ImmSource Source = 0x0000 + // RegSource src is from register + RegSource Source = 0x0008 +) + +// The Endianness of a byte swap instruction. +type Endianness uint8 + +const endianMask = sourceMask + +// Endian flags +const ( + InvalidEndian Endianness = 0xff + // Convert to little endian + LE Endianness = 0x00 + // Convert to big endian + BE Endianness = 0x08 +) + +// ALUOp are ALU / ALU64 operations +// +// msb lsb +// +-------+----+-+---+ +// | EXT | OP |s|cls| +// +-------+----+-+---+ +type ALUOp uint16 + +const aluMask OpCode = 0x3ff0 + +const ( + // InvalidALUOp is returned by getters when invoked + // on non ALU OpCodes + InvalidALUOp ALUOp = 0xffff + // Add - addition + Add ALUOp = 0x0000 + // Sub - subtraction + Sub ALUOp = 0x0010 + // Mul - multiplication + Mul ALUOp = 0x0020 + // Div - division + Div ALUOp = 0x0030 + // SDiv - signed division + SDiv ALUOp = Div + 0x0100 + // Or - bitwise or + Or ALUOp = 0x0040 + // And - bitwise and + And ALUOp = 0x0050 + // LSh - bitwise shift left + LSh ALUOp = 0x0060 + // RSh - bitwise shift right + RSh ALUOp = 0x0070 + // Neg - sign/unsign signing bit + Neg ALUOp = 0x0080 + // Mod - modulo + Mod ALUOp = 0x0090 + // SMod - signed modulo + SMod ALUOp = Mod + 0x0100 + // Xor - bitwise xor + Xor ALUOp = 0x00a0 + // Mov - move value from one place to another + Mov ALUOp = 0x00b0 + // MovSX8 - move lower 8 bits, sign extended upper bits of target + MovSX8 ALUOp = Mov + 0x0100 + // MovSX16 - move lower 16 bits, sign extended upper bits of target + MovSX16 ALUOp = Mov + 0x0200 + // MovSX32 - move lower 32 bits, sign extended upper bits of target + MovSX32 ALUOp = Mov + 0x0300 + // ArSh - arithmetic shift + ArSh ALUOp = 0x00c0 + // Swap - endian conversions + Swap ALUOp = 0x00d0 +) + +// HostTo converts from host to another endianness. +func HostTo(endian Endianness, dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALUClass).SetALUOp(Swap).SetSource(Source(endian)), + Dst: dst, + Constant: imm, + } +} + +// BSwap unconditionally reverses the order of bytes in a register. +func BSwap(dst Register, size Size) Instruction { + var imm int64 + switch size { + case Half: + imm = 16 + case Word: + imm = 32 + case DWord: + imm = 64 + default: + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: OpCode(ALU64Class).SetALUOp(Swap), + Dst: dst, + Constant: imm, + } +} + +// Op returns the OpCode for an ALU operation with a given source. +func (op ALUOp) Op(source Source) OpCode { + return OpCode(ALU64Class).SetALUOp(op).SetSource(source) +} + +// Reg emits `dst (op) src`. +func (op ALUOp) Reg(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm emits `dst (op) value`. +func (op ALUOp) Imm(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op(ImmSource), + Dst: dst, + Constant: int64(value), + } +} + +// Op32 returns the OpCode for a 32-bit ALU operation with a given source. +func (op ALUOp) Op32(source Source) OpCode { + return OpCode(ALUClass).SetALUOp(op).SetSource(source) +} + +// Reg32 emits `dst (op) src`, zeroing the upper 32 bit of dst. +func (op ALUOp) Reg32(dst, src Register) Instruction { + return Instruction{ + OpCode: op.Op32(RegSource), + Dst: dst, + Src: src, + } +} + +// Imm32 emits `dst (op) value`, zeroing the upper 32 bit of dst. +func (op ALUOp) Imm32(dst Register, value int32) Instruction { + return Instruction{ + OpCode: op.Op32(ImmSource), + Dst: dst, + Constant: int64(value), + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/alu_string.go b/vendor/github.com/cilium/ebpf/asm/alu_string.go new file mode 100644 index 0000000000..35b406bf3f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/alu_string.go @@ -0,0 +1,117 @@ +// Code generated by "stringer -output alu_string.go -type=Source,Endianness,ALUOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSource-65535] + _ = x[ImmSource-0] + _ = x[RegSource-8] +} + +const ( + _Source_name_0 = "ImmSource" + _Source_name_1 = "RegSource" + _Source_name_2 = "InvalidSource" +) + +func (i Source) String() string { + switch { + case i == 0: + return _Source_name_0 + case i == 8: + return _Source_name_1 + case i == 65535: + return _Source_name_2 + default: + return "Source(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidEndian-255] + _ = x[LE-0] + _ = x[BE-8] +} + +const ( + _Endianness_name_0 = "LE" + _Endianness_name_1 = "BE" + _Endianness_name_2 = "InvalidEndian" +) + +func (i Endianness) String() string { + switch { + case i == 0: + return _Endianness_name_0 + case i == 8: + return _Endianness_name_1 + case i == 255: + return _Endianness_name_2 + default: + return "Endianness(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidALUOp-65535] + _ = x[Add-0] + _ = x[Sub-16] + _ = x[Mul-32] + _ = x[Div-48] + _ = x[SDiv-304] + _ = x[Or-64] + _ = x[And-80] + _ = x[LSh-96] + _ = x[RSh-112] + _ = x[Neg-128] + _ = x[Mod-144] + _ = x[SMod-400] + _ = x[Xor-160] + _ = x[Mov-176] + _ = x[MovSX8-432] + _ = x[MovSX16-688] + _ = x[MovSX32-944] + _ = x[ArSh-192] + _ = x[Swap-208] +} + +const _ALUOp_name = "AddSubMulDivOrAndLShRShNegModXorMovArShSwapSDivSModMovSX8MovSX16MovSX32InvalidALUOp" + +var _ALUOp_map = map[ALUOp]string{ + 0: _ALUOp_name[0:3], + 16: _ALUOp_name[3:6], + 32: _ALUOp_name[6:9], + 48: _ALUOp_name[9:12], + 64: _ALUOp_name[12:14], + 80: _ALUOp_name[14:17], + 96: _ALUOp_name[17:20], + 112: _ALUOp_name[20:23], + 128: _ALUOp_name[23:26], + 144: _ALUOp_name[26:29], + 160: _ALUOp_name[29:32], + 176: _ALUOp_name[32:35], + 192: _ALUOp_name[35:39], + 208: _ALUOp_name[39:43], + 304: _ALUOp_name[43:47], + 400: _ALUOp_name[47:51], + 432: _ALUOp_name[51:57], + 688: _ALUOp_name[57:64], + 944: _ALUOp_name[64:71], + 65535: _ALUOp_name[71:83], +} + +func (i ALUOp) String() string { + if str, ok := _ALUOp_map[i]; ok { + return str + } + return "ALUOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/asm/doc.go b/vendor/github.com/cilium/ebpf/asm/doc.go new file mode 100644 index 0000000000..7031bdc276 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/doc.go @@ -0,0 +1,2 @@ +// Package asm is an assembler for eBPF bytecode. +package asm diff --git a/vendor/github.com/cilium/ebpf/asm/func.go b/vendor/github.com/cilium/ebpf/asm/func.go new file mode 100644 index 0000000000..84a40b2277 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func.go @@ -0,0 +1,250 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output func_string.go -type=BuiltinFunc + +// BuiltinFunc is a built-in eBPF function. +type BuiltinFunc int32 + +func (_ BuiltinFunc) Max() BuiltinFunc { + return maxBuiltinFunc - 1 +} + +// eBPF built-in functions +// +// You can regenerate this list using the following gawk script: +// +// /FN\(.+\),/ { +// match($1, /\(([a-z_0-9]+),/, r) +// split(r[1], p, "_") +// printf "Fn" +// for (i in p) { +// printf "%s%s", toupper(substr(p[i], 1, 1)), substr(p[i], 2) +// } +// print "" +// } +// +// The script expects include/uapi/linux/bpf.h as it's input. +const ( + FnUnspec BuiltinFunc = iota + FnMapLookupElem + FnMapUpdateElem + FnMapDeleteElem + FnProbeRead + FnKtimeGetNs + FnTracePrintk + FnGetPrandomU32 + FnGetSmpProcessorId + FnSkbStoreBytes + FnL3CsumReplace + FnL4CsumReplace + FnTailCall + FnCloneRedirect + FnGetCurrentPidTgid + FnGetCurrentUidGid + FnGetCurrentComm + FnGetCgroupClassid + FnSkbVlanPush + FnSkbVlanPop + FnSkbGetTunnelKey + FnSkbSetTunnelKey + FnPerfEventRead + FnRedirect + FnGetRouteRealm + FnPerfEventOutput + FnSkbLoadBytes + FnGetStackid + FnCsumDiff + FnSkbGetTunnelOpt + FnSkbSetTunnelOpt + FnSkbChangeProto + FnSkbChangeType + FnSkbUnderCgroup + FnGetHashRecalc + FnGetCurrentTask + FnProbeWriteUser + FnCurrentTaskUnderCgroup + FnSkbChangeTail + FnSkbPullData + FnCsumUpdate + FnSetHashInvalid + FnGetNumaNodeId + FnSkbChangeHead + FnXdpAdjustHead + FnProbeReadStr + FnGetSocketCookie + FnGetSocketUid + FnSetHash + FnSetsockopt + FnSkbAdjustRoom + FnRedirectMap + FnSkRedirectMap + FnSockMapUpdate + FnXdpAdjustMeta + FnPerfEventReadValue + FnPerfProgReadValue + FnGetsockopt + FnOverrideReturn + FnSockOpsCbFlagsSet + FnMsgRedirectMap + FnMsgApplyBytes + FnMsgCorkBytes + FnMsgPullData + FnBind + FnXdpAdjustTail + FnSkbGetXfrmState + FnGetStack + FnSkbLoadBytesRelative + FnFibLookup + FnSockHashUpdate + FnMsgRedirectHash + FnSkRedirectHash + FnLwtPushEncap + FnLwtSeg6StoreBytes + FnLwtSeg6AdjustSrh + FnLwtSeg6Action + FnRcRepeat + FnRcKeydown + FnSkbCgroupId + FnGetCurrentCgroupId + FnGetLocalStorage + FnSkSelectReuseport + FnSkbAncestorCgroupId + FnSkLookupTcp + FnSkLookupUdp + FnSkRelease + FnMapPushElem + FnMapPopElem + FnMapPeekElem + FnMsgPushData + FnMsgPopData + FnRcPointerRel + FnSpinLock + FnSpinUnlock + FnSkFullsock + FnTcpSock + FnSkbEcnSetCe + FnGetListenerSock + FnSkcLookupTcp + FnTcpCheckSyncookie + FnSysctlGetName + FnSysctlGetCurrentValue + FnSysctlGetNewValue + FnSysctlSetNewValue + FnStrtol + FnStrtoul + FnSkStorageGet + FnSkStorageDelete + FnSendSignal + FnTcpGenSyncookie + FnSkbOutput + FnProbeReadUser + FnProbeReadKernel + FnProbeReadUserStr + FnProbeReadKernelStr + FnTcpSendAck + FnSendSignalThread + FnJiffies64 + FnReadBranchRecords + FnGetNsCurrentPidTgid + FnXdpOutput + FnGetNetnsCookie + FnGetCurrentAncestorCgroupId + FnSkAssign + FnKtimeGetBootNs + FnSeqPrintf + FnSeqWrite + FnSkCgroupId + FnSkAncestorCgroupId + FnRingbufOutput + FnRingbufReserve + FnRingbufSubmit + FnRingbufDiscard + FnRingbufQuery + FnCsumLevel + FnSkcToTcp6Sock + FnSkcToTcpSock + FnSkcToTcpTimewaitSock + FnSkcToTcpRequestSock + FnSkcToUdp6Sock + FnGetTaskStack + FnLoadHdrOpt + FnStoreHdrOpt + FnReserveHdrOpt + FnInodeStorageGet + FnInodeStorageDelete + FnDPath + FnCopyFromUser + FnSnprintfBtf + FnSeqPrintfBtf + FnSkbCgroupClassid + FnRedirectNeigh + FnPerCpuPtr + FnThisCpuPtr + FnRedirectPeer + FnTaskStorageGet + FnTaskStorageDelete + FnGetCurrentTaskBtf + FnBprmOptsSet + FnKtimeGetCoarseNs + FnImaInodeHash + FnSockFromFile + FnCheckMtu + FnForEachMapElem + FnSnprintf + FnSysBpf + FnBtfFindByNameKind + FnSysClose + FnTimerInit + FnTimerSetCallback + FnTimerStart + FnTimerCancel + FnGetFuncIp + FnGetAttachCookie + FnTaskPtRegs + FnGetBranchSnapshot + FnTraceVprintk + FnSkcToUnixSock + FnKallsymsLookupName + FnFindVma + FnLoop + FnStrncmp + FnGetFuncArg + FnGetFuncRet + FnGetFuncArgCnt + FnGetRetval + FnSetRetval + FnXdpGetBuffLen + FnXdpLoadBytes + FnXdpStoreBytes + FnCopyFromUserTask + FnSkbSetTstamp + FnImaFileHash + FnKptrXchg + FnMapLookupPercpuElem + FnSkcToMptcpSock + FnDynptrFromMem + FnRingbufReserveDynptr + FnRingbufSubmitDynptr + FnRingbufDiscardDynptr + FnDynptrRead + FnDynptrWrite + FnDynptrData + FnTcpRawGenSyncookieIpv4 + FnTcpRawGenSyncookieIpv6 + FnTcpRawCheckSyncookieIpv4 + FnTcpRawCheckSyncookieIpv6 + FnKtimeGetTaiNs + FnUserRingbufDrain + FnCgrpStorageGet + FnCgrpStorageDelete + + maxBuiltinFunc +) + +// Call emits a function call. +func (fn BuiltinFunc) Call() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Constant: int64(fn), + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/func_string.go b/vendor/github.com/cilium/ebpf/asm/func_string.go new file mode 100644 index 0000000000..47150bc4f2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/func_string.go @@ -0,0 +1,235 @@ +// Code generated by "stringer -output func_string.go -type=BuiltinFunc"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[FnUnspec-0] + _ = x[FnMapLookupElem-1] + _ = x[FnMapUpdateElem-2] + _ = x[FnMapDeleteElem-3] + _ = x[FnProbeRead-4] + _ = x[FnKtimeGetNs-5] + _ = x[FnTracePrintk-6] + _ = x[FnGetPrandomU32-7] + _ = x[FnGetSmpProcessorId-8] + _ = x[FnSkbStoreBytes-9] + _ = x[FnL3CsumReplace-10] + _ = x[FnL4CsumReplace-11] + _ = x[FnTailCall-12] + _ = x[FnCloneRedirect-13] + _ = x[FnGetCurrentPidTgid-14] + _ = x[FnGetCurrentUidGid-15] + _ = x[FnGetCurrentComm-16] + _ = x[FnGetCgroupClassid-17] + _ = x[FnSkbVlanPush-18] + _ = x[FnSkbVlanPop-19] + _ = x[FnSkbGetTunnelKey-20] + _ = x[FnSkbSetTunnelKey-21] + _ = x[FnPerfEventRead-22] + _ = x[FnRedirect-23] + _ = x[FnGetRouteRealm-24] + _ = x[FnPerfEventOutput-25] + _ = x[FnSkbLoadBytes-26] + _ = x[FnGetStackid-27] + _ = x[FnCsumDiff-28] + _ = x[FnSkbGetTunnelOpt-29] + _ = x[FnSkbSetTunnelOpt-30] + _ = x[FnSkbChangeProto-31] + _ = x[FnSkbChangeType-32] + _ = x[FnSkbUnderCgroup-33] + _ = x[FnGetHashRecalc-34] + _ = x[FnGetCurrentTask-35] + _ = x[FnProbeWriteUser-36] + _ = x[FnCurrentTaskUnderCgroup-37] + _ = x[FnSkbChangeTail-38] + _ = x[FnSkbPullData-39] + _ = x[FnCsumUpdate-40] + _ = x[FnSetHashInvalid-41] + _ = x[FnGetNumaNodeId-42] + _ = x[FnSkbChangeHead-43] + _ = x[FnXdpAdjustHead-44] + _ = x[FnProbeReadStr-45] + _ = x[FnGetSocketCookie-46] + _ = x[FnGetSocketUid-47] + _ = x[FnSetHash-48] + _ = x[FnSetsockopt-49] + _ = x[FnSkbAdjustRoom-50] + _ = x[FnRedirectMap-51] + _ = x[FnSkRedirectMap-52] + _ = x[FnSockMapUpdate-53] + _ = x[FnXdpAdjustMeta-54] + _ = x[FnPerfEventReadValue-55] + _ = x[FnPerfProgReadValue-56] + _ = x[FnGetsockopt-57] + _ = x[FnOverrideReturn-58] + _ = x[FnSockOpsCbFlagsSet-59] + _ = x[FnMsgRedirectMap-60] + _ = x[FnMsgApplyBytes-61] + _ = x[FnMsgCorkBytes-62] + _ = x[FnMsgPullData-63] + _ = x[FnBind-64] + _ = x[FnXdpAdjustTail-65] + _ = x[FnSkbGetXfrmState-66] + _ = x[FnGetStack-67] + _ = x[FnSkbLoadBytesRelative-68] + _ = x[FnFibLookup-69] + _ = x[FnSockHashUpdate-70] + _ = x[FnMsgRedirectHash-71] + _ = x[FnSkRedirectHash-72] + _ = x[FnLwtPushEncap-73] + _ = x[FnLwtSeg6StoreBytes-74] + _ = x[FnLwtSeg6AdjustSrh-75] + _ = x[FnLwtSeg6Action-76] + _ = x[FnRcRepeat-77] + _ = x[FnRcKeydown-78] + _ = x[FnSkbCgroupId-79] + _ = x[FnGetCurrentCgroupId-80] + _ = x[FnGetLocalStorage-81] + _ = x[FnSkSelectReuseport-82] + _ = x[FnSkbAncestorCgroupId-83] + _ = x[FnSkLookupTcp-84] + _ = x[FnSkLookupUdp-85] + _ = x[FnSkRelease-86] + _ = x[FnMapPushElem-87] + _ = x[FnMapPopElem-88] + _ = x[FnMapPeekElem-89] + _ = x[FnMsgPushData-90] + _ = x[FnMsgPopData-91] + _ = x[FnRcPointerRel-92] + _ = x[FnSpinLock-93] + _ = x[FnSpinUnlock-94] + _ = x[FnSkFullsock-95] + _ = x[FnTcpSock-96] + _ = x[FnSkbEcnSetCe-97] + _ = x[FnGetListenerSock-98] + _ = x[FnSkcLookupTcp-99] + _ = x[FnTcpCheckSyncookie-100] + _ = x[FnSysctlGetName-101] + _ = x[FnSysctlGetCurrentValue-102] + _ = x[FnSysctlGetNewValue-103] + _ = x[FnSysctlSetNewValue-104] + _ = x[FnStrtol-105] + _ = x[FnStrtoul-106] + _ = x[FnSkStorageGet-107] + _ = x[FnSkStorageDelete-108] + _ = x[FnSendSignal-109] + _ = x[FnTcpGenSyncookie-110] + _ = x[FnSkbOutput-111] + _ = x[FnProbeReadUser-112] + _ = x[FnProbeReadKernel-113] + _ = x[FnProbeReadUserStr-114] + _ = x[FnProbeReadKernelStr-115] + _ = x[FnTcpSendAck-116] + _ = x[FnSendSignalThread-117] + _ = x[FnJiffies64-118] + _ = x[FnReadBranchRecords-119] + _ = x[FnGetNsCurrentPidTgid-120] + _ = x[FnXdpOutput-121] + _ = x[FnGetNetnsCookie-122] + _ = x[FnGetCurrentAncestorCgroupId-123] + _ = x[FnSkAssign-124] + _ = x[FnKtimeGetBootNs-125] + _ = x[FnSeqPrintf-126] + _ = x[FnSeqWrite-127] + _ = x[FnSkCgroupId-128] + _ = x[FnSkAncestorCgroupId-129] + _ = x[FnRingbufOutput-130] + _ = x[FnRingbufReserve-131] + _ = x[FnRingbufSubmit-132] + _ = x[FnRingbufDiscard-133] + _ = x[FnRingbufQuery-134] + _ = x[FnCsumLevel-135] + _ = x[FnSkcToTcp6Sock-136] + _ = x[FnSkcToTcpSock-137] + _ = x[FnSkcToTcpTimewaitSock-138] + _ = x[FnSkcToTcpRequestSock-139] + _ = x[FnSkcToUdp6Sock-140] + _ = x[FnGetTaskStack-141] + _ = x[FnLoadHdrOpt-142] + _ = x[FnStoreHdrOpt-143] + _ = x[FnReserveHdrOpt-144] + _ = x[FnInodeStorageGet-145] + _ = x[FnInodeStorageDelete-146] + _ = x[FnDPath-147] + _ = x[FnCopyFromUser-148] + _ = x[FnSnprintfBtf-149] + _ = x[FnSeqPrintfBtf-150] + _ = x[FnSkbCgroupClassid-151] + _ = x[FnRedirectNeigh-152] + _ = x[FnPerCpuPtr-153] + _ = x[FnThisCpuPtr-154] + _ = x[FnRedirectPeer-155] + _ = x[FnTaskStorageGet-156] + _ = x[FnTaskStorageDelete-157] + _ = x[FnGetCurrentTaskBtf-158] + _ = x[FnBprmOptsSet-159] + _ = x[FnKtimeGetCoarseNs-160] + _ = x[FnImaInodeHash-161] + _ = x[FnSockFromFile-162] + _ = x[FnCheckMtu-163] + _ = x[FnForEachMapElem-164] + _ = x[FnSnprintf-165] + _ = x[FnSysBpf-166] + _ = x[FnBtfFindByNameKind-167] + _ = x[FnSysClose-168] + _ = x[FnTimerInit-169] + _ = x[FnTimerSetCallback-170] + _ = x[FnTimerStart-171] + _ = x[FnTimerCancel-172] + _ = x[FnGetFuncIp-173] + _ = x[FnGetAttachCookie-174] + _ = x[FnTaskPtRegs-175] + _ = x[FnGetBranchSnapshot-176] + _ = x[FnTraceVprintk-177] + _ = x[FnSkcToUnixSock-178] + _ = x[FnKallsymsLookupName-179] + _ = x[FnFindVma-180] + _ = x[FnLoop-181] + _ = x[FnStrncmp-182] + _ = x[FnGetFuncArg-183] + _ = x[FnGetFuncRet-184] + _ = x[FnGetFuncArgCnt-185] + _ = x[FnGetRetval-186] + _ = x[FnSetRetval-187] + _ = x[FnXdpGetBuffLen-188] + _ = x[FnXdpLoadBytes-189] + _ = x[FnXdpStoreBytes-190] + _ = x[FnCopyFromUserTask-191] + _ = x[FnSkbSetTstamp-192] + _ = x[FnImaFileHash-193] + _ = x[FnKptrXchg-194] + _ = x[FnMapLookupPercpuElem-195] + _ = x[FnSkcToMptcpSock-196] + _ = x[FnDynptrFromMem-197] + _ = x[FnRingbufReserveDynptr-198] + _ = x[FnRingbufSubmitDynptr-199] + _ = x[FnRingbufDiscardDynptr-200] + _ = x[FnDynptrRead-201] + _ = x[FnDynptrWrite-202] + _ = x[FnDynptrData-203] + _ = x[FnTcpRawGenSyncookieIpv4-204] + _ = x[FnTcpRawGenSyncookieIpv6-205] + _ = x[FnTcpRawCheckSyncookieIpv4-206] + _ = x[FnTcpRawCheckSyncookieIpv6-207] + _ = x[FnKtimeGetTaiNs-208] + _ = x[FnUserRingbufDrain-209] + _ = x[FnCgrpStorageGet-210] + _ = x[FnCgrpStorageDelete-211] + _ = x[maxBuiltinFunc-212] +} + +const _BuiltinFunc_name = "FnUnspecFnMapLookupElemFnMapUpdateElemFnMapDeleteElemFnProbeReadFnKtimeGetNsFnTracePrintkFnGetPrandomU32FnGetSmpProcessorIdFnSkbStoreBytesFnL3CsumReplaceFnL4CsumReplaceFnTailCallFnCloneRedirectFnGetCurrentPidTgidFnGetCurrentUidGidFnGetCurrentCommFnGetCgroupClassidFnSkbVlanPushFnSkbVlanPopFnSkbGetTunnelKeyFnSkbSetTunnelKeyFnPerfEventReadFnRedirectFnGetRouteRealmFnPerfEventOutputFnSkbLoadBytesFnGetStackidFnCsumDiffFnSkbGetTunnelOptFnSkbSetTunnelOptFnSkbChangeProtoFnSkbChangeTypeFnSkbUnderCgroupFnGetHashRecalcFnGetCurrentTaskFnProbeWriteUserFnCurrentTaskUnderCgroupFnSkbChangeTailFnSkbPullDataFnCsumUpdateFnSetHashInvalidFnGetNumaNodeIdFnSkbChangeHeadFnXdpAdjustHeadFnProbeReadStrFnGetSocketCookieFnGetSocketUidFnSetHashFnSetsockoptFnSkbAdjustRoomFnRedirectMapFnSkRedirectMapFnSockMapUpdateFnXdpAdjustMetaFnPerfEventReadValueFnPerfProgReadValueFnGetsockoptFnOverrideReturnFnSockOpsCbFlagsSetFnMsgRedirectMapFnMsgApplyBytesFnMsgCorkBytesFnMsgPullDataFnBindFnXdpAdjustTailFnSkbGetXfrmStateFnGetStackFnSkbLoadBytesRelativeFnFibLookupFnSockHashUpdateFnMsgRedirectHashFnSkRedirectHashFnLwtPushEncapFnLwtSeg6StoreBytesFnLwtSeg6AdjustSrhFnLwtSeg6ActionFnRcRepeatFnRcKeydownFnSkbCgroupIdFnGetCurrentCgroupIdFnGetLocalStorageFnSkSelectReuseportFnSkbAncestorCgroupIdFnSkLookupTcpFnSkLookupUdpFnSkReleaseFnMapPushElemFnMapPopElemFnMapPeekElemFnMsgPushDataFnMsgPopDataFnRcPointerRelFnSpinLockFnSpinUnlockFnSkFullsockFnTcpSockFnSkbEcnSetCeFnGetListenerSockFnSkcLookupTcpFnTcpCheckSyncookieFnSysctlGetNameFnSysctlGetCurrentValueFnSysctlGetNewValueFnSysctlSetNewValueFnStrtolFnStrtoulFnSkStorageGetFnSkStorageDeleteFnSendSignalFnTcpGenSyncookieFnSkbOutputFnProbeReadUserFnProbeReadKernelFnProbeReadUserStrFnProbeReadKernelStrFnTcpSendAckFnSendSignalThreadFnJiffies64FnReadBranchRecordsFnGetNsCurrentPidTgidFnXdpOutputFnGetNetnsCookieFnGetCurrentAncestorCgroupIdFnSkAssignFnKtimeGetBootNsFnSeqPrintfFnSeqWriteFnSkCgroupIdFnSkAncestorCgroupIdFnRingbufOutputFnRingbufReserveFnRingbufSubmitFnRingbufDiscardFnRingbufQueryFnCsumLevelFnSkcToTcp6SockFnSkcToTcpSockFnSkcToTcpTimewaitSockFnSkcToTcpRequestSockFnSkcToUdp6SockFnGetTaskStackFnLoadHdrOptFnStoreHdrOptFnReserveHdrOptFnInodeStorageGetFnInodeStorageDeleteFnDPathFnCopyFromUserFnSnprintfBtfFnSeqPrintfBtfFnSkbCgroupClassidFnRedirectNeighFnPerCpuPtrFnThisCpuPtrFnRedirectPeerFnTaskStorageGetFnTaskStorageDeleteFnGetCurrentTaskBtfFnBprmOptsSetFnKtimeGetCoarseNsFnImaInodeHashFnSockFromFileFnCheckMtuFnForEachMapElemFnSnprintfFnSysBpfFnBtfFindByNameKindFnSysCloseFnTimerInitFnTimerSetCallbackFnTimerStartFnTimerCancelFnGetFuncIpFnGetAttachCookieFnTaskPtRegsFnGetBranchSnapshotFnTraceVprintkFnSkcToUnixSockFnKallsymsLookupNameFnFindVmaFnLoopFnStrncmpFnGetFuncArgFnGetFuncRetFnGetFuncArgCntFnGetRetvalFnSetRetvalFnXdpGetBuffLenFnXdpLoadBytesFnXdpStoreBytesFnCopyFromUserTaskFnSkbSetTstampFnImaFileHashFnKptrXchgFnMapLookupPercpuElemFnSkcToMptcpSockFnDynptrFromMemFnRingbufReserveDynptrFnRingbufSubmitDynptrFnRingbufDiscardDynptrFnDynptrReadFnDynptrWriteFnDynptrDataFnTcpRawGenSyncookieIpv4FnTcpRawGenSyncookieIpv6FnTcpRawCheckSyncookieIpv4FnTcpRawCheckSyncookieIpv6FnKtimeGetTaiNsFnUserRingbufDrainFnCgrpStorageGetFnCgrpStorageDeletemaxBuiltinFunc" + +var _BuiltinFunc_index = [...]uint16{0, 8, 23, 38, 53, 64, 76, 89, 104, 123, 138, 153, 168, 178, 193, 212, 230, 246, 264, 277, 289, 306, 323, 338, 348, 363, 380, 394, 406, 416, 433, 450, 466, 481, 497, 512, 528, 544, 568, 583, 596, 608, 624, 639, 654, 669, 683, 700, 714, 723, 735, 750, 763, 778, 793, 808, 828, 847, 859, 875, 894, 910, 925, 939, 952, 958, 973, 990, 1000, 1022, 1033, 1049, 1066, 1082, 1096, 1115, 1133, 1148, 1158, 1169, 1182, 1202, 1219, 1238, 1259, 1272, 1285, 1296, 1309, 1321, 1334, 1347, 1359, 1373, 1383, 1395, 1407, 1416, 1429, 1446, 1460, 1479, 1494, 1517, 1536, 1555, 1563, 1572, 1586, 1603, 1615, 1632, 1643, 1658, 1675, 1693, 1713, 1725, 1743, 1754, 1773, 1794, 1805, 1821, 1849, 1859, 1875, 1886, 1896, 1908, 1928, 1943, 1959, 1974, 1990, 2004, 2015, 2030, 2044, 2066, 2087, 2102, 2116, 2128, 2141, 2156, 2173, 2193, 2200, 2214, 2227, 2241, 2259, 2274, 2285, 2297, 2311, 2327, 2346, 2365, 2378, 2396, 2410, 2424, 2434, 2450, 2460, 2468, 2487, 2497, 2508, 2526, 2538, 2551, 2562, 2579, 2591, 2610, 2624, 2639, 2659, 2668, 2674, 2683, 2695, 2707, 2722, 2733, 2744, 2759, 2773, 2788, 2806, 2820, 2833, 2843, 2864, 2880, 2895, 2917, 2938, 2960, 2972, 2985, 2997, 3021, 3045, 3071, 3097, 3112, 3130, 3146, 3165, 3179} + +func (i BuiltinFunc) String() string { + if i < 0 || i >= BuiltinFunc(len(_BuiltinFunc_index)-1) { + return "BuiltinFunc(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _BuiltinFunc_name[_BuiltinFunc_index[i]:_BuiltinFunc_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/asm/instruction.go b/vendor/github.com/cilium/ebpf/asm/instruction.go new file mode 100644 index 0000000000..86b384c02a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/instruction.go @@ -0,0 +1,953 @@ +package asm + +import ( + "crypto/sha1" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "math" + "sort" + "strings" + + "github.com/cilium/ebpf/internal/sys" +) + +// InstructionSize is the size of a BPF instruction in bytes +const InstructionSize = 8 + +// RawInstructionOffset is an offset in units of raw BPF instructions. +type RawInstructionOffset uint64 + +var ErrUnreferencedSymbol = errors.New("unreferenced symbol") +var ErrUnsatisfiedMapReference = errors.New("unsatisfied map reference") +var ErrUnsatisfiedProgramReference = errors.New("unsatisfied program reference") + +// Bytes returns the offset of an instruction in bytes. +func (rio RawInstructionOffset) Bytes() uint64 { + return uint64(rio) * InstructionSize +} + +// Instruction is a single eBPF instruction. +type Instruction struct { + OpCode OpCode + Dst Register + Src Register + Offset int16 + Constant int64 + + // Metadata contains optional metadata about this instruction. + Metadata Metadata +} + +// Unmarshal decodes a BPF instruction. +func (ins *Instruction) Unmarshal(r io.Reader, bo binary.ByteOrder) (uint64, error) { + data := make([]byte, InstructionSize) + if _, err := io.ReadFull(r, data); err != nil { + return 0, err + } + + ins.OpCode = OpCode(data[0]) + + regs := data[1] + switch bo { + case binary.LittleEndian: + ins.Dst, ins.Src = Register(regs&0xF), Register(regs>>4) + case binary.BigEndian: + ins.Dst, ins.Src = Register(regs>>4), Register(regs&0xf) + } + + ins.Offset = int16(bo.Uint16(data[2:4])) + + if ins.OpCode.Class().IsALU() { + switch ins.OpCode.ALUOp() { + case Div: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SDiv) + ins.Offset = 0 + } + case Mod: + if ins.Offset == 1 { + ins.OpCode = ins.OpCode.SetALUOp(SMod) + ins.Offset = 0 + } + case Mov: + switch ins.Offset { + case 8: + ins.OpCode = ins.OpCode.SetALUOp(MovSX8) + ins.Offset = 0 + case 16: + ins.OpCode = ins.OpCode.SetALUOp(MovSX16) + ins.Offset = 0 + case 32: + ins.OpCode = ins.OpCode.SetALUOp(MovSX32) + ins.Offset = 0 + } + } + } + + // Convert to int32 before widening to int64 + // to ensure the signed bit is carried over. + ins.Constant = int64(int32(bo.Uint32(data[4:8]))) + + if !ins.OpCode.IsDWordLoad() { + return InstructionSize, nil + } + + // Pull another instruction from the stream to retrieve the second + // half of the 64-bit immediate value. + if _, err := io.ReadFull(r, data); err != nil { + // No Wrap, to avoid io.EOF clash + return 0, errors.New("64bit immediate is missing second half") + } + + // Require that all fields other than the value are zero. + if bo.Uint32(data[0:4]) != 0 { + return 0, errors.New("64bit immediate has non-zero fields") + } + + cons1 := uint32(ins.Constant) + cons2 := int32(bo.Uint32(data[4:8])) + ins.Constant = int64(cons2)<<32 | int64(cons1) + + return 2 * InstructionSize, nil +} + +// Marshal encodes a BPF instruction. +func (ins Instruction) Marshal(w io.Writer, bo binary.ByteOrder) (uint64, error) { + if ins.OpCode == InvalidOpCode { + return 0, errors.New("invalid opcode") + } + + isDWordLoad := ins.OpCode.IsDWordLoad() + + cons := int32(ins.Constant) + if isDWordLoad { + // Encode least significant 32bit first for 64bit operations. + cons = int32(uint32(ins.Constant)) + } + + regs, err := newBPFRegisters(ins.Dst, ins.Src, bo) + if err != nil { + return 0, fmt.Errorf("can't marshal registers: %s", err) + } + + if ins.OpCode.Class().IsALU() { + newOffset := int16(0) + switch ins.OpCode.ALUOp() { + case SDiv: + ins.OpCode = ins.OpCode.SetALUOp(Div) + newOffset = 1 + case SMod: + ins.OpCode = ins.OpCode.SetALUOp(Mod) + newOffset = 1 + case MovSX8: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 8 + case MovSX16: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 16 + case MovSX32: + ins.OpCode = ins.OpCode.SetALUOp(Mov) + newOffset = 32 + } + if newOffset != 0 && ins.Offset != 0 { + return 0, fmt.Errorf("extended ALU opcodes should have an .Offset of 0: %s", ins) + } + ins.Offset = newOffset + } + + op, err := ins.OpCode.bpfOpCode() + if err != nil { + return 0, err + } + + data := make([]byte, InstructionSize) + data[0] = op + data[1] = byte(regs) + bo.PutUint16(data[2:4], uint16(ins.Offset)) + bo.PutUint32(data[4:8], uint32(cons)) + if _, err := w.Write(data); err != nil { + return 0, err + } + + if !isDWordLoad { + return InstructionSize, nil + } + + // The first half of the second part of a double-wide instruction + // must be zero. The second half carries the value. + bo.PutUint32(data[0:4], 0) + bo.PutUint32(data[4:8], uint32(ins.Constant>>32)) + if _, err := w.Write(data); err != nil { + return 0, err + } + + return 2 * InstructionSize, nil +} + +// AssociateMap associates a Map with this Instruction. +// +// Implicitly clears the Instruction's Reference field. +// +// Returns an error if the Instruction is not a map load. +func (ins *Instruction) AssociateMap(m FDer) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.Metadata.Set(referenceMeta{}, nil) + ins.Metadata.Set(mapMeta{}, m) + + return nil +} + +// RewriteMapPtr changes an instruction to use a new map fd. +// +// Returns an error if the instruction doesn't load a map. +// +// Deprecated: use AssociateMap instead. If you cannot provide a Map, +// wrap an fd in a type implementing FDer. +func (ins *Instruction) RewriteMapPtr(fd int) error { + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.encodeMapFD(fd) + + return nil +} + +func (ins *Instruction) encodeMapFD(fd int) { + // Preserve the offset value for direct map loads. + offset := uint64(ins.Constant) & (math.MaxUint32 << 32) + rawFd := uint64(uint32(fd)) + ins.Constant = int64(offset | rawFd) +} + +// MapPtr returns the map fd for this instruction. +// +// The result is undefined if the instruction is not a load from a map, +// see IsLoadFromMap. +// +// Deprecated: use Map() instead. +func (ins *Instruction) MapPtr() int { + // If there is a map associated with the instruction, return its FD. + if fd := ins.Metadata.Get(mapMeta{}); fd != nil { + return fd.(FDer).FD() + } + + // Fall back to the fd stored in the Constant field + return ins.mapFd() +} + +// mapFd returns the map file descriptor stored in the 32 least significant +// bits of ins' Constant field. +func (ins *Instruction) mapFd() int { + return int(int32(ins.Constant)) +} + +// RewriteMapOffset changes the offset of a direct load from a map. +// +// Returns an error if the instruction is not a direct load. +func (ins *Instruction) RewriteMapOffset(offset uint32) error { + if !ins.OpCode.IsDWordLoad() { + return fmt.Errorf("%s is not a 64 bit load", ins.OpCode) + } + + if ins.Src != PseudoMapValue { + return errors.New("not a direct load from a map") + } + + fd := uint64(ins.Constant) & math.MaxUint32 + ins.Constant = int64(uint64(offset)<<32 | fd) + return nil +} + +func (ins *Instruction) mapOffset() uint32 { + return uint32(uint64(ins.Constant) >> 32) +} + +// IsLoadFromMap returns true if the instruction loads from a map. +// +// This covers both loading the map pointer and direct map value loads. +func (ins *Instruction) IsLoadFromMap() bool { + return ins.OpCode == LoadImmOp(DWord) && (ins.Src == PseudoMapFD || ins.Src == PseudoMapValue) +} + +// IsFunctionCall returns true if the instruction calls another BPF function. +// +// This is not the same thing as a BPF helper call. +func (ins *Instruction) IsFunctionCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == PseudoCall +} + +// IsKfuncCall returns true if the instruction calls a kfunc. +// +// This is not the same thing as a BPF helper call. +func (ins *Instruction) IsKfuncCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == PseudoKfuncCall +} + +// IsLoadOfFunctionPointer returns true if the instruction loads a function pointer. +func (ins *Instruction) IsLoadOfFunctionPointer() bool { + return ins.OpCode.IsDWordLoad() && ins.Src == PseudoFunc +} + +// IsFunctionReference returns true if the instruction references another BPF +// function, either by invoking a Call jump operation or by loading a function +// pointer. +func (ins *Instruction) IsFunctionReference() bool { + return ins.IsFunctionCall() || ins.IsLoadOfFunctionPointer() +} + +// IsBuiltinCall returns true if the instruction is a built-in call, i.e. BPF helper call. +func (ins *Instruction) IsBuiltinCall() bool { + return ins.OpCode.JumpOp() == Call && ins.Src == R0 && ins.Dst == R0 +} + +// IsConstantLoad returns true if the instruction loads a constant of the +// given size. +func (ins *Instruction) IsConstantLoad(size Size) bool { + return ins.OpCode == LoadImmOp(size) && ins.Src == R0 && ins.Offset == 0 +} + +// Format implements fmt.Formatter. +func (ins Instruction) Format(f fmt.State, c rune) { + if c != 'v' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", c) + return + } + + op := ins.OpCode + + if op == InvalidOpCode { + fmt.Fprint(f, "INVALID") + return + } + + // Omit trailing space for Exit + if op.JumpOp() == Exit { + fmt.Fprint(f, op) + return + } + + if ins.IsLoadFromMap() { + fd := ins.mapFd() + m := ins.Map() + switch ins.Src { + case PseudoMapFD: + if m != nil { + fmt.Fprintf(f, "LoadMapPtr dst: %s map: %s", ins.Dst, m) + } else { + fmt.Fprintf(f, "LoadMapPtr dst: %s fd: %d", ins.Dst, fd) + } + + case PseudoMapValue: + if m != nil { + fmt.Fprintf(f, "LoadMapValue dst: %s, map: %s off: %d", ins.Dst, m, ins.mapOffset()) + } else { + fmt.Fprintf(f, "LoadMapValue dst: %s, fd: %d off: %d", ins.Dst, fd, ins.mapOffset()) + } + } + + goto ref + } + + switch cls := op.Class(); { + case cls.isLoadOrStore(): + fmt.Fprintf(f, "%v ", op) + switch op.Mode() { + case ImmMode: + fmt.Fprintf(f, "dst: %s imm: %d", ins.Dst, ins.Constant) + case AbsMode: + fmt.Fprintf(f, "imm: %d", ins.Constant) + case IndMode: + fmt.Fprintf(f, "dst: %s src: %s imm: %d", ins.Dst, ins.Src, ins.Constant) + case MemMode, MemSXMode: + fmt.Fprintf(f, "dst: %s src: %s off: %d imm: %d", ins.Dst, ins.Src, ins.Offset, ins.Constant) + case XAddMode: + fmt.Fprintf(f, "dst: %s src: %s", ins.Dst, ins.Src) + } + + case cls.IsALU(): + fmt.Fprintf(f, "%v", op) + if op == Swap.Op(ImmSource) { + fmt.Fprintf(f, "%d", ins.Constant) + } + + fmt.Fprintf(f, " dst: %s ", ins.Dst) + switch { + case op.ALUOp() == Swap: + break + case op.Source() == ImmSource: + fmt.Fprintf(f, "imm: %d", ins.Constant) + default: + fmt.Fprintf(f, "src: %s", ins.Src) + } + + case cls.IsJump(): + fmt.Fprintf(f, "%v ", op) + switch jop := op.JumpOp(); jop { + case Call: + switch ins.Src { + case PseudoCall: + // bpf-to-bpf call + fmt.Fprint(f, ins.Constant) + case PseudoKfuncCall: + // kfunc call + fmt.Fprintf(f, "Kfunc(%d)", ins.Constant) + default: + fmt.Fprint(f, BuiltinFunc(ins.Constant)) + } + + case Ja: + if ins.OpCode.Class() == Jump32Class { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "off: %d", ins.Offset) + } + + default: + fmt.Fprintf(f, "dst: %s off: %d ", ins.Dst, ins.Offset) + if op.Source() == ImmSource { + fmt.Fprintf(f, "imm: %d", ins.Constant) + } else { + fmt.Fprintf(f, "src: %s", ins.Src) + } + } + default: + fmt.Fprintf(f, "%v ", op) + } + +ref: + if ins.Reference() != "" { + fmt.Fprintf(f, " <%s>", ins.Reference()) + } +} + +func (ins Instruction) equal(other Instruction) bool { + return ins.OpCode == other.OpCode && + ins.Dst == other.Dst && + ins.Src == other.Src && + ins.Offset == other.Offset && + ins.Constant == other.Constant +} + +// Size returns the amount of bytes ins would occupy in binary form. +func (ins Instruction) Size() uint64 { + return uint64(InstructionSize * ins.OpCode.rawInstructions()) +} + +// WithMetadata sets the given Metadata on the Instruction. e.g. to copy +// Metadata from another Instruction when replacing it. +func (ins Instruction) WithMetadata(meta Metadata) Instruction { + ins.Metadata = meta + return ins +} + +type symbolMeta struct{} + +// WithSymbol marks the Instruction as a Symbol, which other Instructions +// can point to using corresponding calls to WithReference. +func (ins Instruction) WithSymbol(name string) Instruction { + ins.Metadata.Set(symbolMeta{}, name) + return ins +} + +// Sym creates a symbol. +// +// Deprecated: use WithSymbol instead. +func (ins Instruction) Sym(name string) Instruction { + return ins.WithSymbol(name) +} + +// Symbol returns the value ins has been marked with using WithSymbol, +// otherwise returns an empty string. A symbol is often an Instruction +// at the start of a function body. +func (ins Instruction) Symbol() string { + sym, _ := ins.Metadata.Get(symbolMeta{}).(string) + return sym +} + +type referenceMeta struct{} + +// WithReference makes ins reference another Symbol or map by name. +func (ins Instruction) WithReference(ref string) Instruction { + ins.Metadata.Set(referenceMeta{}, ref) + return ins +} + +// Reference returns the Symbol or map name referenced by ins, if any. +func (ins Instruction) Reference() string { + ref, _ := ins.Metadata.Get(referenceMeta{}).(string) + return ref +} + +type mapMeta struct{} + +// Map returns the Map referenced by ins, if any. +// An Instruction will contain a Map if e.g. it references an existing, +// pinned map that was opened during ELF loading. +func (ins Instruction) Map() FDer { + fd, _ := ins.Metadata.Get(mapMeta{}).(FDer) + return fd +} + +type sourceMeta struct{} + +// WithSource adds source information about the Instruction. +func (ins Instruction) WithSource(src fmt.Stringer) Instruction { + ins.Metadata.Set(sourceMeta{}, src) + return ins +} + +// Source returns source information about the Instruction. The field is +// present when the compiler emits BTF line info about the Instruction and +// usually contains the line of source code responsible for it. +func (ins Instruction) Source() fmt.Stringer { + str, _ := ins.Metadata.Get(sourceMeta{}).(fmt.Stringer) + return str +} + +// A Comment can be passed to Instruction.WithSource to add a comment +// to an instruction. +type Comment string + +func (s Comment) String() string { + return string(s) +} + +// FDer represents a resource tied to an underlying file descriptor. +// Used as a stand-in for e.g. ebpf.Map since that type cannot be +// imported here and FD() is the only method we rely on. +type FDer interface { + FD() int +} + +// Instructions is an eBPF program. +type Instructions []Instruction + +// Unmarshal unmarshals an Instructions from a binary instruction stream. +// All instructions in insns are replaced by instructions decoded from r. +func (insns *Instructions) Unmarshal(r io.Reader, bo binary.ByteOrder) error { + if len(*insns) > 0 { + *insns = nil + } + + var offset uint64 + for { + var ins Instruction + n, err := ins.Unmarshal(r, bo) + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return fmt.Errorf("offset %d: %w", offset, err) + } + + *insns = append(*insns, ins) + offset += n + } + + return nil +} + +// Name returns the name of the function insns belongs to, if any. +func (insns Instructions) Name() string { + if len(insns) == 0 { + return "" + } + return insns[0].Symbol() +} + +func (insns Instructions) String() string { + return fmt.Sprint(insns) +} + +// Size returns the amount of bytes insns would occupy in binary form. +func (insns Instructions) Size() uint64 { + var sum uint64 + for _, ins := range insns { + sum += ins.Size() + } + return sum +} + +// AssociateMap updates all Instructions that Reference the given symbol +// to point to an existing Map m instead. +// +// Returns ErrUnreferencedSymbol error if no references to symbol are found +// in insns. If symbol is anything else than the symbol name of map (e.g. +// a bpf2bpf subprogram), an error is returned. +func (insns Instructions) AssociateMap(symbol string, m FDer) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if err := ins.AssociateMap(m); err != nil { + return err + } + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + +// RewriteMapPtr rewrites all loads of a specific map pointer to a new fd. +// +// Returns ErrUnreferencedSymbol if the symbol isn't used. +// +// Deprecated: use AssociateMap instead. +func (insns Instructions) RewriteMapPtr(symbol string, fd int) error { + if symbol == "" { + return errors.New("empty symbol") + } + + var found bool + for i := range insns { + ins := &insns[i] + if ins.Reference() != symbol { + continue + } + + if !ins.IsLoadFromMap() { + return errors.New("not a load from a map") + } + + ins.encodeMapFD(fd) + + found = true + } + + if !found { + return fmt.Errorf("symbol %s: %w", symbol, ErrUnreferencedSymbol) + } + + return nil +} + +// SymbolOffsets returns the set of symbols and their offset in +// the instructions. +func (insns Instructions) SymbolOffsets() (map[string]int, error) { + offsets := make(map[string]int) + + for i, ins := range insns { + if ins.Symbol() == "" { + continue + } + + if _, ok := offsets[ins.Symbol()]; ok { + return nil, fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + offsets[ins.Symbol()] = i + } + + return offsets, nil +} + +// FunctionReferences returns a set of symbol names these Instructions make +// bpf-to-bpf calls to. +func (insns Instructions) FunctionReferences() []string { + calls := make(map[string]struct{}) + for _, ins := range insns { + if ins.Constant != -1 { + // BPF-to-BPF calls have -1 constants. + continue + } + + if ins.Reference() == "" { + continue + } + + if !ins.IsFunctionReference() { + continue + } + + calls[ins.Reference()] = struct{}{} + } + + result := make([]string, 0, len(calls)) + for call := range calls { + result = append(result, call) + } + + sort.Strings(result) + return result +} + +// ReferenceOffsets returns the set of references and their offset in +// the instructions. +func (insns Instructions) ReferenceOffsets() map[string][]int { + offsets := make(map[string][]int) + + for i, ins := range insns { + if ins.Reference() == "" { + continue + } + + offsets[ins.Reference()] = append(offsets[ins.Reference()], i) + } + + return offsets +} + +// Format implements fmt.Formatter. +// +// You can control indentation of symbols by +// specifying a width. Setting a precision controls the indentation of +// instructions. +// The default character is a tab, which can be overridden by specifying +// the ' ' space flag. +func (insns Instructions) Format(f fmt.State, c rune) { + if c != 's' && c != 'v' { + fmt.Fprintf(f, "{UNKNOWN FORMAT '%c'}", c) + return + } + + // Precision is better in this case, because it allows + // specifying 0 padding easily. + padding, ok := f.Precision() + if !ok { + padding = 1 + } + + indent := strings.Repeat("\t", padding) + if f.Flag(' ') { + indent = strings.Repeat(" ", padding) + } + + symPadding, ok := f.Width() + if !ok { + symPadding = padding - 1 + } + if symPadding < 0 { + symPadding = 0 + } + + symIndent := strings.Repeat("\t", symPadding) + if f.Flag(' ') { + symIndent = strings.Repeat(" ", symPadding) + } + + // Guess how many digits we need at most, by assuming that all instructions + // are double wide. + highestOffset := len(insns) * 2 + offsetWidth := int(math.Ceil(math.Log10(float64(highestOffset)))) + + iter := insns.Iterate() + for iter.Next() { + if iter.Ins.Symbol() != "" { + fmt.Fprintf(f, "%s%s:\n", symIndent, iter.Ins.Symbol()) + } + if src := iter.Ins.Source(); src != nil { + line := strings.TrimSpace(src.String()) + if line != "" { + fmt.Fprintf(f, "%s%*s; %s\n", indent, offsetWidth, " ", line) + } + } + fmt.Fprintf(f, "%s%*d: %v\n", indent, offsetWidth, iter.Offset, iter.Ins) + } +} + +// Marshal encodes a BPF program into the kernel format. +// +// insns may be modified if there are unresolved jumps or bpf2bpf calls. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without a matching Symbol Instruction within insns. +func (insns Instructions) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := insns.encodeFunctionReferences(); err != nil { + return err + } + + if err := insns.encodeMapPointers(); err != nil { + return err + } + + for i, ins := range insns { + if _, err := ins.Marshal(w, bo); err != nil { + return fmt.Errorf("instruction %d: %w", i, err) + } + } + return nil +} + +// Tag calculates the kernel tag for a series of instructions. +// +// It mirrors bpf_prog_calc_tag in the kernel and so can be compared +// to ProgramInfo.Tag to figure out whether a loaded program matches +// certain instructions. +func (insns Instructions) Tag(bo binary.ByteOrder) (string, error) { + h := sha1.New() + for i, ins := range insns { + if ins.IsLoadFromMap() { + ins.Constant = 0 + } + _, err := ins.Marshal(h, bo) + if err != nil { + return "", fmt.Errorf("instruction %d: %w", i, err) + } + } + return hex.EncodeToString(h.Sum(nil)[:sys.BPF_TAG_SIZE]), nil +} + +// encodeFunctionReferences populates the Offset (or Constant, depending on +// the instruction type) field of instructions with a Reference field to point +// to the offset of the corresponding instruction with a matching Symbol field. +// +// Only Reference Instructions that are either jumps or BPF function references +// (calls or function pointer loads) are populated. +// +// Returns ErrUnsatisfiedProgramReference if there is a Reference Instruction +// without at least one corresponding Symbol Instruction within insns. +func (insns Instructions) encodeFunctionReferences() error { + // Index the offsets of instructions tagged as a symbol. + symbolOffsets := make(map[string]RawInstructionOffset) + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if ins.Symbol() == "" { + continue + } + + if _, ok := symbolOffsets[ins.Symbol()]; ok { + return fmt.Errorf("duplicate symbol %s", ins.Symbol()) + } + + symbolOffsets[ins.Symbol()] = iter.Offset + } + + // Find all instructions tagged as references to other symbols. + // Depending on the instruction type, populate their constant or offset + // fields to point to the symbol they refer to within the insn stream. + iter = insns.Iterate() + for iter.Next() { + i := iter.Index + offset := iter.Offset + ins := iter.Ins + + if ins.Reference() == "" { + continue + } + + switch { + case ins.IsFunctionReference() && ins.Constant == -1, + ins.OpCode == Ja.opCode(Jump32Class, ImmSource) && ins.Constant == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Constant = int64(symOffset - offset - 1) + + case ins.OpCode.Class().IsJump() && ins.Offset == -1: + symOffset, ok := symbolOffsets[ins.Reference()] + if !ok { + return fmt.Errorf("%s at insn %d: symbol %q: %w", ins.OpCode, i, ins.Reference(), ErrUnsatisfiedProgramReference) + } + + ins.Offset = int16(symOffset - offset - 1) + } + } + + return nil +} + +// encodeMapPointers finds all Map Instructions and encodes their FDs +// into their Constant fields. +func (insns Instructions) encodeMapPointers() error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + if !ins.IsLoadFromMap() { + continue + } + + m := ins.Map() + if m == nil { + continue + } + + fd := m.FD() + if fd < 0 { + return fmt.Errorf("map %s: %w", m, sys.ErrClosedFd) + } + + ins.encodeMapFD(m.FD()) + } + + return nil +} + +// Iterate allows iterating a BPF program while keeping track of +// various offsets. +// +// Modifying the instruction slice will lead to undefined behaviour. +func (insns Instructions) Iterate() *InstructionIterator { + return &InstructionIterator{insns: insns} +} + +// InstructionIterator iterates over a BPF program. +type InstructionIterator struct { + insns Instructions + // The instruction in question. + Ins *Instruction + // The index of the instruction in the original instruction slice. + Index int + // The offset of the instruction in raw BPF instructions. This accounts + // for double-wide instructions. + Offset RawInstructionOffset +} + +// Next returns true as long as there are any instructions remaining. +func (iter *InstructionIterator) Next() bool { + if len(iter.insns) == 0 { + return false + } + + if iter.Ins != nil { + iter.Index++ + iter.Offset += RawInstructionOffset(iter.Ins.OpCode.rawInstructions()) + } + iter.Ins = &iter.insns[0] + iter.insns = iter.insns[1:] + return true +} + +type bpfRegisters uint8 + +func newBPFRegisters(dst, src Register, bo binary.ByteOrder) (bpfRegisters, error) { + switch bo { + case binary.LittleEndian: + return bpfRegisters((src << 4) | (dst & 0xF)), nil + case binary.BigEndian: + return bpfRegisters((dst << 4) | (src & 0xF)), nil + default: + return 0, fmt.Errorf("unrecognized ByteOrder %T", bo) + } +} + +// IsUnreferencedSymbol returns true if err was caused by +// an unreferenced symbol. +// +// Deprecated: use errors.Is(err, asm.ErrUnreferencedSymbol). +func IsUnreferencedSymbol(err error) bool { + return errors.Is(err, ErrUnreferencedSymbol) +} diff --git a/vendor/github.com/cilium/ebpf/asm/jump.go b/vendor/github.com/cilium/ebpf/asm/jump.go new file mode 100644 index 0000000000..2738d736b2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/jump.go @@ -0,0 +1,135 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output jump_string.go -type=JumpOp + +// JumpOp affect control flow. +// +// msb lsb +// +----+-+---+ +// |OP |s|cls| +// +----+-+---+ +type JumpOp uint8 + +const jumpMask OpCode = 0xf0 + +const ( + // InvalidJumpOp is returned by getters when invoked + // on non branch OpCodes + InvalidJumpOp JumpOp = 0xff + // Ja jumps by offset unconditionally + Ja JumpOp = 0x00 + // JEq jumps by offset if r == imm + JEq JumpOp = 0x10 + // JGT jumps by offset if r > imm + JGT JumpOp = 0x20 + // JGE jumps by offset if r >= imm + JGE JumpOp = 0x30 + // JSet jumps by offset if r & imm + JSet JumpOp = 0x40 + // JNE jumps by offset if r != imm + JNE JumpOp = 0x50 + // JSGT jumps by offset if signed r > signed imm + JSGT JumpOp = 0x60 + // JSGE jumps by offset if signed r >= signed imm + JSGE JumpOp = 0x70 + // Call builtin or user defined function from imm + Call JumpOp = 0x80 + // Exit ends execution, with value in r0 + Exit JumpOp = 0x90 + // JLT jumps by offset if r < imm + JLT JumpOp = 0xa0 + // JLE jumps by offset if r <= imm + JLE JumpOp = 0xb0 + // JSLT jumps by offset if signed r < signed imm + JSLT JumpOp = 0xc0 + // JSLE jumps by offset if signed r <= signed imm + JSLE JumpOp = 0xd0 +) + +// Return emits an exit instruction. +// +// Requires a return value in R0. +func Return() Instruction { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Exit), + } +} + +// Op returns the OpCode for a given jump source. +func (op JumpOp) Op(source Source) OpCode { + return OpCode(JumpClass).SetJumpOp(op).SetSource(source) +} + +// Imm compares 64 bit dst to 64 bit value (sign extended), and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Imm(dst Register, value int32, label string) Instruction { + return Instruction{ + OpCode: op.opCode(JumpClass, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) +} + +// Imm32 compares 32 bit dst to 32 bit value, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Imm32(dst Register, value int32, label string) Instruction { + return Instruction{ + OpCode: op.opCode(Jump32Class, ImmSource), + Dst: dst, + Offset: -1, + Constant: int64(value), + }.WithReference(label) +} + +// Reg compares 64 bit dst to 64 bit src, and adjusts PC by offset if the condition is fulfilled. +func (op JumpOp) Reg(dst, src Register, label string) Instruction { + return Instruction{ + OpCode: op.opCode(JumpClass, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} + +// Reg32 compares 32 bit dst to 32 bit src, and adjusts PC by offset if the condition is fulfilled. +// Requires kernel 5.1. +func (op JumpOp) Reg32(dst, src Register, label string) Instruction { + return Instruction{ + OpCode: op.opCode(Jump32Class, RegSource), + Dst: dst, + Src: src, + Offset: -1, + }.WithReference(label) +} + +func (op JumpOp) opCode(class Class, source Source) OpCode { + if op == Exit || op == Call { + return InvalidOpCode + } + + return OpCode(class).SetJumpOp(op).SetSource(source) +} + +// LongJump returns a jump always instruction with a range of [-2^31, 2^31 - 1]. +func LongJump(label string) Instruction { + return Instruction{ + OpCode: Ja.opCode(Jump32Class, ImmSource), + Constant: -1, + }.WithReference(label) +} + +// Label adjusts PC to the address of the label. +func (op JumpOp) Label(label string) Instruction { + if op == Call { + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(Call), + Src: PseudoCall, + Constant: -1, + }.WithReference(label) + } + + return Instruction{ + OpCode: OpCode(JumpClass).SetJumpOp(op), + Offset: -1, + }.WithReference(label) +} diff --git a/vendor/github.com/cilium/ebpf/asm/jump_string.go b/vendor/github.com/cilium/ebpf/asm/jump_string.go new file mode 100644 index 0000000000..85a4aaffa5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/jump_string.go @@ -0,0 +1,53 @@ +// Code generated by "stringer -output jump_string.go -type=JumpOp"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidJumpOp-255] + _ = x[Ja-0] + _ = x[JEq-16] + _ = x[JGT-32] + _ = x[JGE-48] + _ = x[JSet-64] + _ = x[JNE-80] + _ = x[JSGT-96] + _ = x[JSGE-112] + _ = x[Call-128] + _ = x[Exit-144] + _ = x[JLT-160] + _ = x[JLE-176] + _ = x[JSLT-192] + _ = x[JSLE-208] +} + +const _JumpOp_name = "JaJEqJGTJGEJSetJNEJSGTJSGECallExitJLTJLEJSLTJSLEInvalidJumpOp" + +var _JumpOp_map = map[JumpOp]string{ + 0: _JumpOp_name[0:2], + 16: _JumpOp_name[2:5], + 32: _JumpOp_name[5:8], + 48: _JumpOp_name[8:11], + 64: _JumpOp_name[11:15], + 80: _JumpOp_name[15:18], + 96: _JumpOp_name[18:22], + 112: _JumpOp_name[22:26], + 128: _JumpOp_name[26:30], + 144: _JumpOp_name[30:34], + 160: _JumpOp_name[34:37], + 176: _JumpOp_name[37:40], + 192: _JumpOp_name[40:44], + 208: _JumpOp_name[44:48], + 255: _JumpOp_name[48:61], +} + +func (i JumpOp) String() string { + if str, ok := _JumpOp_map[i]; ok { + return str + } + return "JumpOp(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store.go b/vendor/github.com/cilium/ebpf/asm/load_store.go new file mode 100644 index 0000000000..cdb5c5cfa4 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/load_store.go @@ -0,0 +1,225 @@ +package asm + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output load_store_string.go -type=Mode,Size + +// Mode for load and store operations +// +// msb lsb +// +---+--+---+ +// |MDE|sz|cls| +// +---+--+---+ +type Mode uint8 + +const modeMask OpCode = 0xe0 + +const ( + // InvalidMode is returned by getters when invoked + // on non load / store OpCodes + InvalidMode Mode = 0xff + // ImmMode - immediate value + ImmMode Mode = 0x00 + // AbsMode - immediate value + offset + AbsMode Mode = 0x20 + // IndMode - indirect (imm+src) + IndMode Mode = 0x40 + // MemMode - load from memory + MemMode Mode = 0x60 + // MemSXMode - load from memory, sign extension + MemSXMode Mode = 0x80 + // XAddMode - add atomically across processors. + XAddMode Mode = 0xc0 +) + +// Size of load and store operations +// +// msb lsb +// +---+--+---+ +// |mde|SZ|cls| +// +---+--+---+ +type Size uint8 + +const sizeMask OpCode = 0x18 + +const ( + // InvalidSize is returned by getters when invoked + // on non load / store OpCodes + InvalidSize Size = 0xff + // DWord - double word; 64 bits + DWord Size = 0x18 + // Word - word; 32 bits + Word Size = 0x00 + // Half - half-word; 16 bits + Half Size = 0x08 + // Byte - byte; 8 bits + Byte Size = 0x10 +) + +// Sizeof returns the size in bytes. +func (s Size) Sizeof() int { + switch s { + case DWord: + return 8 + case Word: + return 4 + case Half: + return 2 + case Byte: + return 1 + default: + return -1 + } +} + +// LoadMemOp returns the OpCode to load a value of given size from memory. +func LoadMemOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemMode).SetSize(size) +} + +// LoadMemSXOp returns the OpCode to load a value of given size from memory sign extended. +func LoadMemSXOp(size Size) OpCode { + return OpCode(LdXClass).SetMode(MemSXMode).SetSize(size) +} + +// LoadMem emits `dst = *(size *)(src + offset)`. +func LoadMem(dst, src Register, offset int16, size Size) Instruction { + return Instruction{ + OpCode: LoadMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// LoadMemSX emits `dst = *(size *)(src + offset)` but sign extends dst. +func LoadMemSX(dst, src Register, offset int16, size Size) Instruction { + if size == DWord { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadMemSXOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// LoadImmOp returns the OpCode to load an immediate of given size. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImmOp(size Size) OpCode { + return OpCode(LdClass).SetMode(ImmMode).SetSize(size) +} + +// LoadImm emits `dst = (size)value`. +// +// As of kernel 4.20, only DWord size is accepted. +func LoadImm(dst Register, value int64, size Size) Instruction { + return Instruction{ + OpCode: LoadImmOp(size), + Dst: dst, + Constant: value, + } +} + +// LoadMapPtr stores a pointer to a map in dst. +func LoadMapPtr(dst Register, fd int) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: PseudoMapFD, + Constant: int64(uint32(fd)), + } +} + +// LoadMapValue stores a pointer to the value at a certain offset of a map. +func LoadMapValue(dst Register, fd int, offset uint32) Instruction { + if fd < 0 { + return Instruction{OpCode: InvalidOpCode} + } + + fdAndOffset := (uint64(offset) << 32) | uint64(uint32(fd)) + return Instruction{ + OpCode: LoadImmOp(DWord), + Dst: dst, + Src: PseudoMapValue, + Constant: int64(fdAndOffset), + } +} + +// LoadIndOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadIndOp(size Size) OpCode { + return OpCode(LdClass).SetMode(IndMode).SetSize(size) +} + +// LoadInd emits `dst = ntoh(*(size *)(((sk_buff *)R6)->data + src + offset))`. +func LoadInd(dst, src Register, offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadIndOp(size), + Dst: dst, + Src: src, + Constant: int64(offset), + } +} + +// LoadAbsOp returns the OpCode for loading a value of given size from an sk_buff. +func LoadAbsOp(size Size) OpCode { + return OpCode(LdClass).SetMode(AbsMode).SetSize(size) +} + +// LoadAbs emits `r0 = ntoh(*(size *)(((sk_buff *)R6)->data + offset))`. +func LoadAbs(offset int32, size Size) Instruction { + return Instruction{ + OpCode: LoadAbsOp(size), + Dst: R0, + Constant: int64(offset), + } +} + +// StoreMemOp returns the OpCode for storing a register of given size in memory. +func StoreMemOp(size Size) OpCode { + return OpCode(StXClass).SetMode(MemMode).SetSize(size) +} + +// StoreMem emits `*(size *)(dst + offset) = src` +func StoreMem(dst Register, offset int16, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreMemOp(size), + Dst: dst, + Src: src, + Offset: offset, + } +} + +// StoreImmOp returns the OpCode for storing an immediate of given size in memory. +func StoreImmOp(size Size) OpCode { + return OpCode(StClass).SetMode(MemMode).SetSize(size) +} + +// StoreImm emits `*(size *)(dst + offset) = value`. +func StoreImm(dst Register, offset int16, value int64, size Size) Instruction { + return Instruction{ + OpCode: StoreImmOp(size), + Dst: dst, + Offset: offset, + Constant: value, + } +} + +// StoreXAddOp returns the OpCode to atomically add a register to a value in memory. +func StoreXAddOp(size Size) OpCode { + return OpCode(StXClass).SetMode(XAddMode).SetSize(size) +} + +// StoreXAdd atomically adds src to *dst. +func StoreXAdd(dst, src Register, size Size) Instruction { + return Instruction{ + OpCode: StoreXAddOp(size), + Dst: dst, + Src: src, + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/load_store_string.go b/vendor/github.com/cilium/ebpf/asm/load_store_string.go new file mode 100644 index 0000000000..c48080327c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/load_store_string.go @@ -0,0 +1,84 @@ +// Code generated by "stringer -output load_store_string.go -type=Mode,Size"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidMode-255] + _ = x[ImmMode-0] + _ = x[AbsMode-32] + _ = x[IndMode-64] + _ = x[MemMode-96] + _ = x[MemSXMode-128] + _ = x[XAddMode-192] +} + +const ( + _Mode_name_0 = "ImmMode" + _Mode_name_1 = "AbsMode" + _Mode_name_2 = "IndMode" + _Mode_name_3 = "MemMode" + _Mode_name_4 = "MemSXMode" + _Mode_name_5 = "XAddMode" + _Mode_name_6 = "InvalidMode" +) + +func (i Mode) String() string { + switch { + case i == 0: + return _Mode_name_0 + case i == 32: + return _Mode_name_1 + case i == 64: + return _Mode_name_2 + case i == 96: + return _Mode_name_3 + case i == 128: + return _Mode_name_4 + case i == 192: + return _Mode_name_5 + case i == 255: + return _Mode_name_6 + default: + return "Mode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSize-255] + _ = x[DWord-24] + _ = x[Word-0] + _ = x[Half-8] + _ = x[Byte-16] +} + +const ( + _Size_name_0 = "Word" + _Size_name_1 = "Half" + _Size_name_2 = "Byte" + _Size_name_3 = "DWord" + _Size_name_4 = "InvalidSize" +) + +func (i Size) String() string { + switch { + case i == 0: + return _Size_name_0 + case i == 8: + return _Size_name_1 + case i == 16: + return _Size_name_2 + case i == 24: + return _Size_name_3 + case i == 255: + return _Size_name_4 + default: + return "Size(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/cilium/ebpf/asm/metadata.go b/vendor/github.com/cilium/ebpf/asm/metadata.go new file mode 100644 index 0000000000..dd368a9360 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/metadata.go @@ -0,0 +1,80 @@ +package asm + +// Metadata contains metadata about an instruction. +type Metadata struct { + head *metaElement +} + +type metaElement struct { + next *metaElement + key, value interface{} +} + +// Find the element containing key. +// +// Returns nil if there is no such element. +func (m *Metadata) find(key interface{}) *metaElement { + for e := m.head; e != nil; e = e.next { + if e.key == key { + return e + } + } + return nil +} + +// Remove an element from the linked list. +// +// Copies as many elements of the list as necessary to remove r, but doesn't +// perform a full copy. +func (m *Metadata) remove(r *metaElement) { + current := &m.head + for e := m.head; e != nil; e = e.next { + if e == r { + // We've found the element we want to remove. + *current = e.next + + // No need to copy the tail. + return + } + + // There is another element in front of the one we want to remove. + // We have to copy it to be able to change metaElement.next. + cpy := &metaElement{key: e.key, value: e.value} + *current = cpy + current = &cpy.next + } +} + +// Set a key to a value. +// +// If value is nil, the key is removed. Avoids modifying old metadata by +// copying if necessary. +func (m *Metadata) Set(key, value interface{}) { + if e := m.find(key); e != nil { + if e.value == value { + // Key is present and the value is the same. Nothing to do. + return + } + + // Key is present with a different value. Create a copy of the list + // which doesn't have the element in it. + m.remove(e) + } + + // m.head is now a linked list that doesn't contain key. + if value == nil { + return + } + + m.head = &metaElement{key: key, value: value, next: m.head} +} + +// Get the value of a key. +// +// Returns nil if no value with the given key is present. +func (m *Metadata) Get(key interface{}) interface{} { + if e := m.find(key); e != nil { + return e.value + } + return nil +} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode.go b/vendor/github.com/cilium/ebpf/asm/opcode.go new file mode 100644 index 0000000000..1dfd0b171a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/opcode.go @@ -0,0 +1,303 @@ +package asm + +import ( + "fmt" + "strings" +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output opcode_string.go -type=Class + +// Class of operations +// +// msb lsb +// +---+--+---+ +// | ?? |CLS| +// +---+--+---+ +type Class uint8 + +const classMask OpCode = 0x07 + +const ( + // LdClass loads immediate values into registers. + // Also used for non-standard load operations from cBPF. + LdClass Class = 0x00 + // LdXClass loads memory into registers. + LdXClass Class = 0x01 + // StClass stores immediate values to memory. + StClass Class = 0x02 + // StXClass stores registers to memory. + StXClass Class = 0x03 + // ALUClass describes arithmetic operators. + ALUClass Class = 0x04 + // JumpClass describes jump operators. + JumpClass Class = 0x05 + // Jump32Class describes jump operators with 32-bit comparisons. + // Requires kernel 5.1. + Jump32Class Class = 0x06 + // ALU64Class describes arithmetic operators in 64-bit mode. + ALU64Class Class = 0x07 +) + +// IsLoad checks if this is either LdClass or LdXClass. +func (cls Class) IsLoad() bool { + return cls == LdClass || cls == LdXClass +} + +// IsStore checks if this is either StClass or StXClass. +func (cls Class) IsStore() bool { + return cls == StClass || cls == StXClass +} + +func (cls Class) isLoadOrStore() bool { + return cls.IsLoad() || cls.IsStore() +} + +// IsALU checks if this is either ALUClass or ALU64Class. +func (cls Class) IsALU() bool { + return cls == ALUClass || cls == ALU64Class +} + +// IsJump checks if this is either JumpClass or Jump32Class. +func (cls Class) IsJump() bool { + return cls == JumpClass || cls == Jump32Class +} + +func (cls Class) isJumpOrALU() bool { + return cls.IsJump() || cls.IsALU() +} + +// OpCode represents a single operation. +// It is not a 1:1 mapping to real eBPF opcodes. +// +// The encoding varies based on a 3-bit Class: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// ??? | CLS +// +// For ALUClass and ALUCLass32: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// OPC |S| CLS +// +// For LdClass, LdXclass, StClass and StXClass: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | MDE |SIZ| CLS +// +// For JumpClass, Jump32Class: +// +// 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 +// 0 | OPC |S| CLS +type OpCode uint16 + +// InvalidOpCode is returned by setters on OpCode +const InvalidOpCode OpCode = 0xffff + +// bpfOpCode returns the actual BPF opcode. +func (op OpCode) bpfOpCode() (byte, error) { + const opCodeMask = 0xff + + if !valid(op, opCodeMask) { + return 0, fmt.Errorf("invalid opcode %x", op) + } + + return byte(op & opCodeMask), nil +} + +// rawInstructions returns the number of BPF instructions required +// to encode this opcode. +func (op OpCode) rawInstructions() int { + if op.IsDWordLoad() { + return 2 + } + return 1 +} + +func (op OpCode) IsDWordLoad() bool { + return op == LoadImmOp(DWord) +} + +// Class returns the class of operation. +func (op OpCode) Class() Class { + return Class(op & classMask) +} + +// Mode returns the mode for load and store operations. +func (op OpCode) Mode() Mode { + if !op.Class().isLoadOrStore() { + return InvalidMode + } + return Mode(op & modeMask) +} + +// Size returns the size for load and store operations. +func (op OpCode) Size() Size { + if !op.Class().isLoadOrStore() { + return InvalidSize + } + return Size(op & sizeMask) +} + +// Source returns the source for branch and ALU operations. +func (op OpCode) Source() Source { + if !op.Class().isJumpOrALU() || op.ALUOp() == Swap { + return InvalidSource + } + return Source(op & sourceMask) +} + +// ALUOp returns the ALUOp. +func (op OpCode) ALUOp() ALUOp { + if !op.Class().IsALU() { + return InvalidALUOp + } + return ALUOp(op & aluMask) +} + +// Endianness returns the Endianness for a byte swap instruction. +func (op OpCode) Endianness() Endianness { + if op.ALUOp() != Swap { + return InvalidEndian + } + return Endianness(op & endianMask) +} + +// JumpOp returns the JumpOp. +// Returns InvalidJumpOp if it doesn't encode a jump. +func (op OpCode) JumpOp() JumpOp { + if !op.Class().IsJump() { + return InvalidJumpOp + } + + jumpOp := JumpOp(op & jumpMask) + + // Some JumpOps are only supported by JumpClass, not Jump32Class. + if op.Class() == Jump32Class && (jumpOp == Exit || jumpOp == Call) { + return InvalidJumpOp + } + + return jumpOp +} + +// SetMode sets the mode on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetMode(mode Mode) OpCode { + if !op.Class().isLoadOrStore() || !valid(OpCode(mode), modeMask) { + return InvalidOpCode + } + return (op & ^modeMask) | OpCode(mode) +} + +// SetSize sets the size on load and store operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSize(size Size) OpCode { + if !op.Class().isLoadOrStore() || !valid(OpCode(size), sizeMask) { + return InvalidOpCode + } + return (op & ^sizeMask) | OpCode(size) +} + +// SetSource sets the source on jump and ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetSource(source Source) OpCode { + if !op.Class().isJumpOrALU() || !valid(OpCode(source), sourceMask) { + return InvalidOpCode + } + return (op & ^sourceMask) | OpCode(source) +} + +// SetALUOp sets the ALUOp on ALU operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetALUOp(alu ALUOp) OpCode { + if !op.Class().IsALU() || !valid(OpCode(alu), aluMask) { + return InvalidOpCode + } + return (op & ^aluMask) | OpCode(alu) +} + +// SetJumpOp sets the JumpOp on jump operations. +// +// Returns InvalidOpCode if op is of the wrong class. +func (op OpCode) SetJumpOp(jump JumpOp) OpCode { + if !op.Class().IsJump() || !valid(OpCode(jump), jumpMask) { + return InvalidOpCode + } + + newOp := (op & ^jumpMask) | OpCode(jump) + + // Check newOp is legal. + if newOp.JumpOp() == InvalidJumpOp { + return InvalidOpCode + } + + return newOp +} + +func (op OpCode) String() string { + var f strings.Builder + + switch class := op.Class(); { + case class.isLoadOrStore(): + f.WriteString(strings.TrimSuffix(class.String(), "Class")) + + mode := op.Mode() + f.WriteString(strings.TrimSuffix(mode.String(), "Mode")) + + switch op.Size() { + case DWord: + f.WriteString("DW") + case Word: + f.WriteString("W") + case Half: + f.WriteString("H") + case Byte: + f.WriteString("B") + } + + case class.IsALU(): + if op.ALUOp() == Swap && op.Class() == ALU64Class { + // B to make BSwap, uncontitional byte swap + f.WriteString("B") + } + + f.WriteString(op.ALUOp().String()) + + if op.ALUOp() == Swap { + if op.Class() == ALUClass { + // Width for Endian is controlled by Constant + f.WriteString(op.Endianness().String()) + } + } else { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + + if class == ALUClass { + f.WriteString("32") + } + } + + case class.IsJump(): + f.WriteString(op.JumpOp().String()) + + if class == Jump32Class { + f.WriteString("32") + } + + if jop := op.JumpOp(); jop != Exit && jop != Call && jop != Ja { + f.WriteString(strings.TrimSuffix(op.Source().String(), "Source")) + } + + default: + fmt.Fprintf(&f, "OpCode(%#x)", uint8(op)) + } + + return f.String() +} + +// valid returns true if all bits in value are covered by mask. +func valid(value, mask OpCode) bool { + return value & ^mask == 0 +} diff --git a/vendor/github.com/cilium/ebpf/asm/opcode_string.go b/vendor/github.com/cilium/ebpf/asm/opcode_string.go new file mode 100644 index 0000000000..58bc3e7e7f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/opcode_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -output opcode_string.go -type=Class"; DO NOT EDIT. + +package asm + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[LdClass-0] + _ = x[LdXClass-1] + _ = x[StClass-2] + _ = x[StXClass-3] + _ = x[ALUClass-4] + _ = x[JumpClass-5] + _ = x[Jump32Class-6] + _ = x[ALU64Class-7] +} + +const _Class_name = "LdClassLdXClassStClassStXClassALUClassJumpClassJump32ClassALU64Class" + +var _Class_index = [...]uint8{0, 7, 15, 22, 30, 38, 47, 58, 68} + +func (i Class) String() string { + if i >= Class(len(_Class_index)-1) { + return "Class(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Class_name[_Class_index[i]:_Class_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/asm/register.go b/vendor/github.com/cilium/ebpf/asm/register.go new file mode 100644 index 0000000000..457a3b8a88 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/asm/register.go @@ -0,0 +1,51 @@ +package asm + +import ( + "fmt" +) + +// Register is the source or destination of most operations. +type Register uint8 + +// R0 contains return values. +const R0 Register = 0 + +// Registers for function arguments. +const ( + R1 Register = R0 + 1 + iota + R2 + R3 + R4 + R5 +) + +// Callee saved registers preserved by function calls. +const ( + R6 Register = R5 + 1 + iota + R7 + R8 + R9 +) + +// Read-only frame pointer to access stack. +const ( + R10 Register = R9 + 1 + RFP = R10 +) + +// Pseudo registers used by 64bit loads and jumps +const ( + PseudoMapFD = R1 // BPF_PSEUDO_MAP_FD + PseudoMapValue = R2 // BPF_PSEUDO_MAP_VALUE + PseudoCall = R1 // BPF_PSEUDO_CALL + PseudoFunc = R4 // BPF_PSEUDO_FUNC + PseudoKfuncCall = R2 // BPF_PSEUDO_KFUNC_CALL +) + +func (r Register) String() string { + v := uint8(r) + if v == 10 { + return "rfp" + } + return fmt.Sprintf("r%d", v) +} diff --git a/vendor/github.com/cilium/ebpf/attachtype_string.go b/vendor/github.com/cilium/ebpf/attachtype_string.go new file mode 100644 index 0000000000..bece896bb6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/attachtype_string.go @@ -0,0 +1,79 @@ +// Code generated by "stringer -type AttachType -trimprefix Attach"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[AttachNone-0] + _ = x[AttachCGroupInetIngress-0] + _ = x[AttachCGroupInetEgress-1] + _ = x[AttachCGroupInetSockCreate-2] + _ = x[AttachCGroupSockOps-3] + _ = x[AttachSkSKBStreamParser-4] + _ = x[AttachSkSKBStreamVerdict-5] + _ = x[AttachCGroupDevice-6] + _ = x[AttachSkMsgVerdict-7] + _ = x[AttachCGroupInet4Bind-8] + _ = x[AttachCGroupInet6Bind-9] + _ = x[AttachCGroupInet4Connect-10] + _ = x[AttachCGroupInet6Connect-11] + _ = x[AttachCGroupInet4PostBind-12] + _ = x[AttachCGroupInet6PostBind-13] + _ = x[AttachCGroupUDP4Sendmsg-14] + _ = x[AttachCGroupUDP6Sendmsg-15] + _ = x[AttachLircMode2-16] + _ = x[AttachFlowDissector-17] + _ = x[AttachCGroupSysctl-18] + _ = x[AttachCGroupUDP4Recvmsg-19] + _ = x[AttachCGroupUDP6Recvmsg-20] + _ = x[AttachCGroupGetsockopt-21] + _ = x[AttachCGroupSetsockopt-22] + _ = x[AttachTraceRawTp-23] + _ = x[AttachTraceFEntry-24] + _ = x[AttachTraceFExit-25] + _ = x[AttachModifyReturn-26] + _ = x[AttachLSMMac-27] + _ = x[AttachTraceIter-28] + _ = x[AttachCgroupInet4GetPeername-29] + _ = x[AttachCgroupInet6GetPeername-30] + _ = x[AttachCgroupInet4GetSockname-31] + _ = x[AttachCgroupInet6GetSockname-32] + _ = x[AttachXDPDevMap-33] + _ = x[AttachCgroupInetSockRelease-34] + _ = x[AttachXDPCPUMap-35] + _ = x[AttachSkLookup-36] + _ = x[AttachXDP-37] + _ = x[AttachSkSKBVerdict-38] + _ = x[AttachSkReuseportSelect-39] + _ = x[AttachSkReuseportSelectOrMigrate-40] + _ = x[AttachPerfEvent-41] + _ = x[AttachTraceKprobeMulti-42] + _ = x[AttachLSMCgroup-43] + _ = x[AttachStructOps-44] + _ = x[AttachNetfilter-45] + _ = x[AttachTCXIngress-46] + _ = x[AttachTCXEgress-47] + _ = x[AttachTraceUprobeMulti-48] + _ = x[AttachCgroupUnixConnect-49] + _ = x[AttachCgroupUnixSendmsg-50] + _ = x[AttachCgroupUnixRecvmsg-51] + _ = x[AttachCgroupUnixGetpeername-52] + _ = x[AttachCgroupUnixGetsockname-53] + _ = x[AttachNetkitPrimary-54] + _ = x[AttachNetkitPeer-55] +} + +const _AttachType_name = "NoneCGroupInetEgressCGroupInetSockCreateCGroupSockOpsSkSKBStreamParserSkSKBStreamVerdictCGroupDeviceSkMsgVerdictCGroupInet4BindCGroupInet6BindCGroupInet4ConnectCGroupInet6ConnectCGroupInet4PostBindCGroupInet6PostBindCGroupUDP4SendmsgCGroupUDP6SendmsgLircMode2FlowDissectorCGroupSysctlCGroupUDP4RecvmsgCGroupUDP6RecvmsgCGroupGetsockoptCGroupSetsockoptTraceRawTpTraceFEntryTraceFExitModifyReturnLSMMacTraceIterCgroupInet4GetPeernameCgroupInet6GetPeernameCgroupInet4GetSocknameCgroupInet6GetSocknameXDPDevMapCgroupInetSockReleaseXDPCPUMapSkLookupXDPSkSKBVerdictSkReuseportSelectSkReuseportSelectOrMigratePerfEventTraceKprobeMultiLSMCgroupStructOpsNetfilterTCXIngressTCXEgressTraceUprobeMultiCgroupUnixConnectCgroupUnixSendmsgCgroupUnixRecvmsgCgroupUnixGetpeernameCgroupUnixGetsocknameNetkitPrimaryNetkitPeer" + +var _AttachType_index = [...]uint16{0, 4, 20, 40, 53, 70, 88, 100, 112, 127, 142, 160, 178, 197, 216, 233, 250, 259, 272, 284, 301, 318, 334, 350, 360, 371, 381, 393, 399, 408, 430, 452, 474, 496, 505, 526, 535, 543, 546, 558, 575, 601, 610, 626, 635, 644, 653, 663, 672, 688, 705, 722, 739, 760, 781, 794, 804} + +func (i AttachType) String() string { + if i >= AttachType(len(_AttachType_index)-1) { + return "AttachType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _AttachType_name[_AttachType_index[i]:_AttachType_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf.go b/vendor/github.com/cilium/ebpf/btf/btf.go new file mode 100644 index 0000000000..880c5ade0c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf.go @@ -0,0 +1,713 @@ +package btf + +import ( + "bufio" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +const btfMagic = 0xeB9F + +// Errors returned by BTF functions. +var ( + ErrNotSupported = internal.ErrNotSupported + ErrNotFound = errors.New("not found") + ErrNoExtendedInfo = errors.New("no extended info") + ErrMultipleMatches = errors.New("multiple matching types") +) + +// ID represents the unique ID of a BTF object. +type ID = sys.BTFID + +// immutableTypes is a set of types which musn't be changed. +type immutableTypes struct { + // All types contained by the spec, not including types from the base in + // case the spec was parsed from split BTF. + types []Type + + // Type IDs indexed by type. + typeIDs map[Type]TypeID + + // The ID of the first type in types. + firstTypeID TypeID + + // Types indexed by essential name. + // Includes all struct flavors and types with the same name. + namedTypes map[essentialName][]TypeID + + // Byte order of the types. This affects things like struct member order + // when using bitfields. + byteOrder binary.ByteOrder +} + +func (s *immutableTypes) typeByID(id TypeID) (Type, bool) { + if id < s.firstTypeID { + return nil, false + } + + index := int(id - s.firstTypeID) + if index >= len(s.types) { + return nil, false + } + + return s.types[index], true +} + +// mutableTypes is a set of types which may be changed. +type mutableTypes struct { + imm immutableTypes + mu sync.RWMutex // protects copies below + copies map[Type]Type // map[orig]copy + copiedTypeIDs map[Type]TypeID // map[copy]origID +} + +// add a type to the set of mutable types. +// +// Copies type and all of its children once. Repeated calls with the same type +// do not copy again. +func (mt *mutableTypes) add(typ Type, typeIDs map[Type]TypeID) Type { + mt.mu.RLock() + cpy, ok := mt.copies[typ] + mt.mu.RUnlock() + + if ok { + // Fast path: the type has been copied before. + return cpy + } + + // modifyGraphPreorder copies the type graph node by node, so we can't drop + // the lock in between. + mt.mu.Lock() + defer mt.mu.Unlock() + + return copyType(typ, typeIDs, mt.copies, mt.copiedTypeIDs) +} + +// copy a set of mutable types. +func (mt *mutableTypes) copy() *mutableTypes { + if mt == nil { + return nil + } + + mtCopy := &mutableTypes{ + mt.imm, + sync.RWMutex{}, + make(map[Type]Type, len(mt.copies)), + make(map[Type]TypeID, len(mt.copiedTypeIDs)), + } + + // Prevent concurrent modification of mt.copiedTypeIDs. + mt.mu.RLock() + defer mt.mu.RUnlock() + + copiesOfCopies := make(map[Type]Type, len(mt.copies)) + for orig, copy := range mt.copies { + // NB: We make a copy of copy, not orig, so that changes to mutable types + // are preserved. + copyOfCopy := copyType(copy, mt.copiedTypeIDs, copiesOfCopies, mtCopy.copiedTypeIDs) + mtCopy.copies[orig] = copyOfCopy + } + + return mtCopy +} + +func (mt *mutableTypes) typeID(typ Type) (TypeID, error) { + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + mt.mu.RLock() + defer mt.mu.RUnlock() + + id, ok := mt.copiedTypeIDs[typ] + if !ok { + return 0, fmt.Errorf("no ID for type %s: %w", typ, ErrNotFound) + } + + return id, nil +} + +func (mt *mutableTypes) typeByID(id TypeID) (Type, bool) { + immT, ok := mt.imm.typeByID(id) + if !ok { + return nil, false + } + + return mt.add(immT, mt.imm.typeIDs), true +} + +func (mt *mutableTypes) anyTypesByName(name string) ([]Type, error) { + immTypes := mt.imm.namedTypes[newEssentialName(name)] + if len(immTypes) == 0 { + return nil, fmt.Errorf("type name %s: %w", name, ErrNotFound) + } + + // Return a copy to prevent changes to namedTypes. + result := make([]Type, 0, len(immTypes)) + for _, id := range immTypes { + immT, ok := mt.imm.typeByID(id) + if !ok { + return nil, fmt.Errorf("no type with ID %d", id) + } + + // Match against the full name, not just the essential one + // in case the type being looked up is a struct flavor. + if immT.TypeName() == name { + result = append(result, mt.add(immT, mt.imm.typeIDs)) + } + } + return result, nil +} + +// Spec allows querying a set of Types and loading the set into the +// kernel. +type Spec struct { + *mutableTypes + + // String table from ELF. + strings *stringTable +} + +// LoadSpec opens file and calls LoadSpecFromReader on it. +func LoadSpec(file string) (*Spec, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + return LoadSpecFromReader(fh) +} + +// LoadSpecFromReader reads from an ELF or a raw BTF blob. +// +// Returns ErrNotFound if reading from an ELF which contains no BTF. ExtInfos +// may be nil. +func LoadSpecFromReader(rd io.ReaderAt) (*Spec, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + if bo := guessRawBTFByteOrder(rd); bo != nil { + return loadRawSpec(io.NewSectionReader(rd, 0, math.MaxInt64), bo, nil) + } + + return nil, err + } + + return loadSpecFromELF(file) +} + +// LoadSpecAndExtInfosFromReader reads from an ELF. +// +// ExtInfos may be nil if the ELF doesn't contain section metadata. +// Returns ErrNotFound if the ELF contains no BTF. +func LoadSpecAndExtInfosFromReader(rd io.ReaderAt) (*Spec, *ExtInfos, error) { + file, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, nil, err + } + + spec, err := loadSpecFromELF(file) + if err != nil { + return nil, nil, err + } + + extInfos, err := loadExtInfosFromELF(file, spec) + if err != nil && !errors.Is(err, ErrNotFound) { + return nil, nil, err + } + + return spec, extInfos, nil +} + +// symbolOffsets extracts all symbols offsets from an ELF and indexes them by +// section and variable name. +// +// References to variables in BTF data sections carry unsigned 32-bit offsets. +// Some ELF symbols (e.g. in vmlinux) may point to virtual memory that is well +// beyond this range. Since these symbols cannot be described by BTF info, +// ignore them here. +func symbolOffsets(file *internal.SafeELFFile) (map[symbol]uint32, error) { + symbols, err := file.Symbols() + if err != nil { + return nil, fmt.Errorf("can't read symbols: %v", err) + } + + offsets := make(map[symbol]uint32) + for _, sym := range symbols { + if idx := sym.Section; idx >= elf.SHN_LORESERVE && idx <= elf.SHN_HIRESERVE { + // Ignore things like SHN_ABS + continue + } + + if sym.Value > math.MaxUint32 { + // VarSecinfo offset is u32, cannot reference symbols in higher regions. + continue + } + + if int(sym.Section) >= len(file.Sections) { + return nil, fmt.Errorf("symbol %s: invalid section %d", sym.Name, sym.Section) + } + + secName := file.Sections[sym.Section].Name + offsets[symbol{secName, sym.Name}] = uint32(sym.Value) + } + + return offsets, nil +} + +func loadSpecFromELF(file *internal.SafeELFFile) (*Spec, error) { + var ( + btfSection *elf.Section + sectionSizes = make(map[string]uint32) + ) + + for _, sec := range file.Sections { + switch sec.Name { + case ".BTF": + btfSection = sec + default: + if sec.Type != elf.SHT_PROGBITS && sec.Type != elf.SHT_NOBITS { + break + } + + if sec.Size > math.MaxUint32 { + return nil, fmt.Errorf("section %s exceeds maximum size", sec.Name) + } + + sectionSizes[sec.Name] = uint32(sec.Size) + } + } + + if btfSection == nil { + return nil, fmt.Errorf("btf: %w", ErrNotFound) + } + + offsets, err := symbolOffsets(file) + if err != nil { + return nil, err + } + + if btfSection.ReaderAt == nil { + return nil, fmt.Errorf("compressed BTF is not supported") + } + + spec, err := loadRawSpec(btfSection.ReaderAt, file.ByteOrder, nil) + if err != nil { + return nil, err + } + + err = fixupDatasec(spec.imm.types, sectionSizes, offsets) + if err != nil { + return nil, err + } + + return spec, nil +} + +func loadRawSpec(btf io.ReaderAt, bo binary.ByteOrder, base *Spec) (*Spec, error) { + var ( + baseStrings *stringTable + firstTypeID TypeID + err error + ) + + if base != nil { + if base.imm.firstTypeID != 0 { + return nil, fmt.Errorf("can't use split BTF as base") + } + + baseStrings = base.strings + + firstTypeID, err = base.nextTypeID() + if err != nil { + return nil, err + } + } + + types, rawStrings, err := parseBTF(btf, bo, baseStrings, base) + if err != nil { + return nil, err + } + + typeIDs, typesByName := indexTypes(types, firstTypeID) + + return &Spec{ + &mutableTypes{ + immutableTypes{ + types, + typeIDs, + firstTypeID, + typesByName, + bo, + }, + sync.RWMutex{}, + make(map[Type]Type), + make(map[Type]TypeID), + }, + rawStrings, + }, nil +} + +func indexTypes(types []Type, firstTypeID TypeID) (map[Type]TypeID, map[essentialName][]TypeID) { + namedTypes := 0 + for _, typ := range types { + if typ.TypeName() != "" { + // Do a pre-pass to figure out how big types by name has to be. + // Most types have unique names, so it's OK to ignore essentialName + // here. + namedTypes++ + } + } + + typeIDs := make(map[Type]TypeID, len(types)) + typesByName := make(map[essentialName][]TypeID, namedTypes) + + for i, typ := range types { + id := firstTypeID + TypeID(i) + typeIDs[typ] = id + + if name := newEssentialName(typ.TypeName()); name != "" { + typesByName[name] = append(typesByName[name], id) + } + } + + return typeIDs, typesByName +} + +func guessRawBTFByteOrder(r io.ReaderAt) binary.ByteOrder { + buf := new(bufio.Reader) + for _, bo := range []binary.ByteOrder{ + binary.LittleEndian, + binary.BigEndian, + } { + buf.Reset(io.NewSectionReader(r, 0, math.MaxInt64)) + if _, err := parseBTFHeader(buf, bo); err == nil { + return bo + } + } + + return nil +} + +// parseBTF reads a .BTF section into memory and parses it into a list of +// raw types and a string table. +func parseBTF(btf io.ReaderAt, bo binary.ByteOrder, baseStrings *stringTable, base *Spec) ([]Type, *stringTable, error) { + buf := internal.NewBufferedSectionReader(btf, 0, math.MaxInt64) + header, err := parseBTFHeader(buf, bo) + if err != nil { + return nil, nil, fmt.Errorf("parsing .BTF header: %v", err) + } + + rawStrings, err := readStringTable(io.NewSectionReader(btf, header.stringStart(), int64(header.StringLen)), + baseStrings) + if err != nil { + return nil, nil, fmt.Errorf("can't read type names: %w", err) + } + + buf.Reset(io.NewSectionReader(btf, header.typeStart(), int64(header.TypeLen))) + types, err := readAndInflateTypes(buf, bo, header.TypeLen, rawStrings, base) + if err != nil { + return nil, nil, err + } + + return types, rawStrings, nil +} + +type symbol struct { + section string + name string +} + +// fixupDatasec attempts to patch up missing info in Datasecs and its members by +// supplementing them with information from the ELF headers and symbol table. +func fixupDatasec(types []Type, sectionSizes map[string]uint32, offsets map[symbol]uint32) error { + for _, typ := range types { + ds, ok := typ.(*Datasec) + if !ok { + continue + } + + name := ds.Name + + // Some Datasecs are virtual and don't have corresponding ELF sections. + switch name { + case ".ksyms": + // .ksyms describes forward declarations of kfunc signatures, as well as + // references to kernel symbols. + // Nothing to fix up, all sizes and offsets are 0. + for _, vsi := range ds.Vars { + switch t := vsi.Type.(type) { + case *Func: + continue + case *Var: + if _, ok := t.Type.(*Void); !ok { + return fmt.Errorf("data section %s: expected %s to be *Void, not %T: %w", name, vsi.Type.TypeName(), vsi.Type, ErrNotSupported) + } + default: + return fmt.Errorf("data section %s: expected to be either *btf.Func or *btf.Var, not %T: %w", name, vsi.Type, ErrNotSupported) + } + } + + continue + case ".kconfig": + // .kconfig has a size of 0 and has all members' offsets set to 0. + // Fix up all offsets and set the Datasec's size. + if err := fixupDatasecLayout(ds); err != nil { + return err + } + + // Fix up extern to global linkage to avoid a BTF verifier error. + for _, vsi := range ds.Vars { + vsi.Type.(*Var).Linkage = GlobalVar + } + + continue + } + + if ds.Size != 0 { + continue + } + + ds.Size, ok = sectionSizes[name] + if !ok { + return fmt.Errorf("data section %s: missing size", name) + } + + for i := range ds.Vars { + symName := ds.Vars[i].Type.TypeName() + ds.Vars[i].Offset, ok = offsets[symbol{name, symName}] + if !ok { + return fmt.Errorf("data section %s: missing offset for symbol %s", name, symName) + } + } + } + + return nil +} + +// fixupDatasecLayout populates ds.Vars[].Offset according to var sizes and +// alignment. Calculate and set ds.Size. +func fixupDatasecLayout(ds *Datasec) error { + var off uint32 + + for i, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + return fmt.Errorf("member %d: unsupported type %T", i, vsi.Type) + } + + size, err := Sizeof(v.Type) + if err != nil { + return fmt.Errorf("variable %s: getting size: %w", v.Name, err) + } + align, err := alignof(v.Type) + if err != nil { + return fmt.Errorf("variable %s: getting alignment: %w", v.Name, err) + } + + // Align the current member based on the offset of the end of the previous + // member and the alignment of the current member. + off = internal.Align(off, uint32(align)) + + ds.Vars[i].Offset = off + + off += uint32(size) + } + + ds.Size = off + + return nil +} + +// Copy creates a copy of Spec. +func (s *Spec) Copy() *Spec { + if s == nil { + return nil + } + + return &Spec{ + s.mutableTypes.copy(), + s.strings, + } +} + +type sliceWriter []byte + +func (sw sliceWriter) Write(p []byte) (int, error) { + if len(p) != len(sw) { + return 0, errors.New("size doesn't match") + } + + return copy(sw, p), nil +} + +// nextTypeID returns the next unallocated type ID or an error if there are no +// more type IDs. +func (s *Spec) nextTypeID() (TypeID, error) { + id := s.imm.firstTypeID + TypeID(len(s.imm.types)) + if id < s.imm.firstTypeID { + return 0, fmt.Errorf("no more type IDs") + } + return id, nil +} + +// TypeByID returns the BTF Type with the given type ID. +// +// Returns an error wrapping ErrNotFound if a Type with the given ID +// does not exist in the Spec. +func (s *Spec) TypeByID(id TypeID) (Type, error) { + typ, ok := s.typeByID(id) + if !ok { + return nil, fmt.Errorf("look up type with ID %d (first ID is %d): %w", id, s.imm.firstTypeID, ErrNotFound) + } + + return typ, nil +} + +// TypeID returns the ID for a given Type. +// +// Returns an error wrapping [ErrNotFound] if the type isn't part of the Spec. +func (s *Spec) TypeID(typ Type) (TypeID, error) { + return s.mutableTypes.typeID(typ) +} + +// AnyTypesByName returns a list of BTF Types with the given name. +// +// If the BTF blob describes multiple compilation units like vmlinux, multiple +// Types with the same name and kind can exist, but might not describe the same +// data structure. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +func (s *Spec) AnyTypesByName(name string) ([]Type, error) { + return s.mutableTypes.anyTypesByName(name) +} + +// AnyTypeByName returns a Type with the given name. +// +// Returns an error if multiple types of that name exist. +func (s *Spec) AnyTypeByName(name string) (Type, error) { + types, err := s.AnyTypesByName(name) + if err != nil { + return nil, err + } + + if len(types) > 1 { + return nil, fmt.Errorf("found multiple types: %v", types) + } + + return types[0], nil +} + +// TypeByName searches for a Type with a specific name. Since multiple Types +// with the same name can exist, the parameter typ is taken to narrow down the +// search in case of a clash. +// +// typ must be a non-nil pointer to an implementation of a Type. On success, the +// address of the found Type will be copied to typ. +// +// Returns an error wrapping ErrNotFound if no matching Type exists in the Spec. +// Returns an error wrapping ErrMultipleTypes if multiple candidates are found. +func (s *Spec) TypeByName(name string, typ interface{}) error { + typeInterface := reflect.TypeOf((*Type)(nil)).Elem() + + // typ may be **T or *Type + typValue := reflect.ValueOf(typ) + if typValue.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", typ) + } + + typPtr := typValue.Elem() + if !typPtr.CanSet() { + return fmt.Errorf("%T cannot be set", typ) + } + + wanted := typPtr.Type() + if wanted == typeInterface { + // This is *Type. Unwrap the value's type. + wanted = typPtr.Elem().Type() + } + + if !wanted.AssignableTo(typeInterface) { + return fmt.Errorf("%T does not satisfy Type interface", typ) + } + + types, err := s.AnyTypesByName(name) + if err != nil { + return err + } + + var candidate Type + for _, typ := range types { + if reflect.TypeOf(typ) != wanted { + continue + } + + if candidate != nil { + return fmt.Errorf("type %s(%T): %w", name, typ, ErrMultipleMatches) + } + + candidate = typ + } + + if candidate == nil { + return fmt.Errorf("%s %s: %w", wanted, name, ErrNotFound) + } + + typPtr.Set(reflect.ValueOf(candidate)) + + return nil +} + +// LoadSplitSpecFromReader loads split BTF from a reader. +// +// Types from base are used to resolve references in the split BTF. +// The returned Spec only contains types from the split BTF, not from the base. +func LoadSplitSpecFromReader(r io.ReaderAt, base *Spec) (*Spec, error) { + return loadRawSpec(r, internal.NativeEndian, base) +} + +// TypesIterator iterates over types of a given spec. +type TypesIterator struct { + spec *Spec + id TypeID + done bool + // The last visited type in the spec. + Type Type +} + +// Iterate returns the types iterator. +func (s *Spec) Iterate() *TypesIterator { + return &TypesIterator{spec: s, id: s.imm.firstTypeID} +} + +// Next returns true as long as there are any remaining types. +func (iter *TypesIterator) Next() bool { + if iter.done { + return false + } + + var ok bool + iter.Type, ok = iter.spec.typeByID(iter.id) + iter.id++ + iter.done = !ok + if !iter.done { + // Skip declTags, during unmarshaling declTags become `Tags` fields of other types. + // We keep them in the spec to avoid holes in the ID space, but for the purposes of + // iteration, they are not useful to the user. + if _, ok := iter.Type.(*declTag); ok { + return iter.Next() + } + } + return !iter.done +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types.go b/vendor/github.com/cilium/ebpf/btf/btf_types.go new file mode 100644 index 0000000000..320311b332 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf_types.go @@ -0,0 +1,520 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind + +// btfKind describes a Type. +type btfKind uint8 + +// Equivalents of the BTF_KIND_* constants. +const ( + kindUnknown btfKind = iota // Unknown + kindInt // Int + kindPointer // Pointer + kindArray // Array + kindStruct // Struct + kindUnion // Union + kindEnum // Enum + kindForward // Forward + kindTypedef // Typedef + kindVolatile // Volatile + kindConst // Const + kindRestrict // Restrict + // Added ~4.20 + kindFunc // Func + kindFuncProto // FuncProto + // Added ~5.1 + kindVar // Var + kindDatasec // Datasec + // Added ~5.13 + kindFloat // Float + // Added 5.16 + kindDeclTag // DeclTag + // Added 5.17 + kindTypeTag // TypeTag + // Added 6.0 + kindEnum64 // Enum64 +) + +// FuncLinkage describes BTF function linkage metadata. +type FuncLinkage int + +// Equivalent of enum btf_func_linkage. +const ( + StaticFunc FuncLinkage = iota // static + GlobalFunc // global + ExternFunc // extern +) + +// VarLinkage describes BTF variable linkage metadata. +type VarLinkage int + +const ( + StaticVar VarLinkage = iota // static + GlobalVar // global + ExternVar // extern +) + +const ( + btfTypeKindShift = 24 + btfTypeKindLen = 5 + btfTypeVlenShift = 0 + btfTypeVlenMask = 16 + btfTypeKindFlagShift = 31 + btfTypeKindFlagMask = 1 +) + +var btfHeaderLen = binary.Size(&btfHeader{}) + +type btfHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + HdrLen uint32 + + TypeOff uint32 + TypeLen uint32 + StringOff uint32 + StringLen uint32 +} + +// typeStart returns the offset from the beginning of the .BTF section +// to the start of its type entries. +func (h *btfHeader) typeStart() int64 { + return int64(h.HdrLen + h.TypeOff) +} + +// stringStart returns the offset from the beginning of the .BTF section +// to the start of its string table. +func (h *btfHeader) stringStart() int64 { + return int64(h.HdrLen + h.StringOff) +} + +// parseBTFHeader parses the header of the .BTF section. +func parseBTFHeader(r io.Reader, bo binary.ByteOrder) (*btfHeader, error) { + var header btfHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + remainder := int64(header.HdrLen) - int64(binary.Size(&header)) + if remainder < 0 { + return nil, errors.New("header length shorter than btfHeader size") + } + + if _, err := io.CopyN(internal.DiscardZeroes{}, r, remainder); err != nil { + return nil, fmt.Errorf("header padding: %v", err) + } + + return &header, nil +} + +var btfTypeLen = binary.Size(btfType{}) + +// btfType is equivalent to struct btf_type in Documentation/bpf/btf.rst. +type btfType struct { + NameOff uint32 + /* "info" bits arrangement + * bits 0-15: vlen (e.g. # of struct's members), linkage + * bits 16-23: unused + * bits 24-28: kind (e.g. int, ptr, array...etc) + * bits 29-30: unused + * bit 31: kind_flag, currently used by + * struct, union and fwd + */ + Info uint32 + /* "size" is used by INT, ENUM, STRUCT and UNION. + * "size" tells the size of the type it is describing. + * + * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, + * FUNC and FUNC_PROTO. + * "type" is a type_id referring to another type. + */ + SizeType uint32 +} + +var btfTypeSize = int(unsafe.Sizeof(btfType{})) + +func unmarshalBtfType(bt *btfType, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfTypeSize { + return 0, fmt.Errorf("not enough bytes to unmarshal btfType") + } + + bt.NameOff = bo.Uint32(b[0:]) + bt.Info = bo.Uint32(b[4:]) + bt.SizeType = bo.Uint32(b[8:]) + return btfTypeSize, nil +} + +func mask(len uint32) uint32 { + return (1 << len) - 1 +} + +func readBits(value, len, shift uint32) uint32 { + return (value >> shift) & mask(len) +} + +func writeBits(value, len, shift, new uint32) uint32 { + value &^= mask(len) << shift + value |= (new & mask(len)) << shift + return value +} + +func (bt *btfType) info(len, shift uint32) uint32 { + return readBits(bt.Info, len, shift) +} + +func (bt *btfType) setInfo(value, len, shift uint32) { + bt.Info = writeBits(bt.Info, len, shift, value) +} + +func (bt *btfType) Kind() btfKind { + return btfKind(bt.info(btfTypeKindLen, btfTypeKindShift)) +} + +func (bt *btfType) SetKind(kind btfKind) { + bt.setInfo(uint32(kind), btfTypeKindLen, btfTypeKindShift) +} + +func (bt *btfType) Vlen() int { + return int(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetVlen(vlen int) { + bt.setInfo(uint32(vlen), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) kindFlagBool() bool { + return bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift) == 1 +} + +func (bt *btfType) setKindFlagBool(set bool) { + var value uint32 + if set { + value = 1 + } + bt.setInfo(value, btfTypeKindFlagMask, btfTypeKindFlagShift) +} + +// Bitfield returns true if the struct or union contain a bitfield. +func (bt *btfType) Bitfield() bool { + return bt.kindFlagBool() +} + +func (bt *btfType) SetBitfield(isBitfield bool) { + bt.setKindFlagBool(isBitfield) +} + +func (bt *btfType) FwdKind() FwdKind { + return FwdKind(bt.info(btfTypeKindFlagMask, btfTypeKindFlagShift)) +} + +func (bt *btfType) SetFwdKind(kind FwdKind) { + bt.setInfo(uint32(kind), btfTypeKindFlagMask, btfTypeKindFlagShift) +} + +func (bt *btfType) Signed() bool { + return bt.kindFlagBool() +} + +func (bt *btfType) SetSigned(signed bool) { + bt.setKindFlagBool(signed) +} + +func (bt *btfType) Linkage() FuncLinkage { + return FuncLinkage(bt.info(btfTypeVlenMask, btfTypeVlenShift)) +} + +func (bt *btfType) SetLinkage(linkage FuncLinkage) { + bt.setInfo(uint32(linkage), btfTypeVlenMask, btfTypeVlenShift) +} + +func (bt *btfType) Type() TypeID { + // TODO: Panic here if wrong kind? + return TypeID(bt.SizeType) +} + +func (bt *btfType) SetType(id TypeID) { + bt.SizeType = uint32(id) +} + +func (bt *btfType) Size() uint32 { + // TODO: Panic here if wrong kind? + return bt.SizeType +} + +func (bt *btfType) SetSize(size uint32) { + bt.SizeType = size +} + +func (bt *btfType) Marshal(w io.Writer, bo binary.ByteOrder) error { + buf := make([]byte, unsafe.Sizeof(*bt)) + bo.PutUint32(buf[0:], bt.NameOff) + bo.PutUint32(buf[4:], bt.Info) + bo.PutUint32(buf[8:], bt.SizeType) + _, err := w.Write(buf) + return err +} + +type rawType struct { + btfType + data interface{} +} + +func (rt *rawType) Marshal(w io.Writer, bo binary.ByteOrder) error { + if err := rt.btfType.Marshal(w, bo); err != nil { + return err + } + + if rt.data == nil { + return nil + } + + return binary.Write(w, bo, rt.data) +} + +// btfInt encodes additional data for integers. +// +// ? ? ? ? e e e e o o o o o o o o ? ? ? ? ? ? ? ? b b b b b b b b +// ? = undefined +// e = encoding +// o = offset (bitfields?) +// b = bits (bitfields) +type btfInt struct { + Raw uint32 +} + +const ( + btfIntEncodingLen = 4 + btfIntEncodingShift = 24 + btfIntOffsetLen = 8 + btfIntOffsetShift = 16 + btfIntBitsLen = 8 + btfIntBitsShift = 0 +) + +var btfIntLen = int(unsafe.Sizeof(btfInt{})) + +func unmarshalBtfInt(bi *btfInt, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfIntLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfInt") + } + + bi.Raw = bo.Uint32(b[0:]) + return btfIntLen, nil +} + +func (bi btfInt) Encoding() IntEncoding { + return IntEncoding(readBits(bi.Raw, btfIntEncodingLen, btfIntEncodingShift)) +} + +func (bi *btfInt) SetEncoding(e IntEncoding) { + bi.Raw = writeBits(uint32(bi.Raw), btfIntEncodingLen, btfIntEncodingShift, uint32(e)) +} + +func (bi btfInt) Offset() Bits { + return Bits(readBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift)) +} + +func (bi *btfInt) SetOffset(offset uint32) { + bi.Raw = writeBits(bi.Raw, btfIntOffsetLen, btfIntOffsetShift, offset) +} + +func (bi btfInt) Bits() Bits { + return Bits(readBits(bi.Raw, btfIntBitsLen, btfIntBitsShift)) +} + +func (bi *btfInt) SetBits(bits byte) { + bi.Raw = writeBits(bi.Raw, btfIntBitsLen, btfIntBitsShift, uint32(bits)) +} + +type btfArray struct { + Type TypeID + IndexType TypeID + Nelems uint32 +} + +var btfArrayLen = int(unsafe.Sizeof(btfArray{})) + +func unmarshalBtfArray(ba *btfArray, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfArrayLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfArray") + } + + ba.Type = TypeID(bo.Uint32(b[0:])) + ba.IndexType = TypeID(bo.Uint32(b[4:])) + ba.Nelems = bo.Uint32(b[8:]) + return btfArrayLen, nil +} + +type btfMember struct { + NameOff uint32 + Type TypeID + Offset uint32 +} + +var btfMemberLen = int(unsafe.Sizeof(btfMember{})) + +func unmarshalBtfMembers(members []btfMember, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range members { + if off+btfMemberLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfMember %d", i) + } + + members[i].NameOff = bo.Uint32(b[off+0:]) + members[i].Type = TypeID(bo.Uint32(b[off+4:])) + members[i].Offset = bo.Uint32(b[off+8:]) + + off += btfMemberLen + } + + return off, nil +} + +type btfVarSecinfo struct { + Type TypeID + Offset uint32 + Size uint32 +} + +var btfVarSecinfoLen = int(unsafe.Sizeof(btfVarSecinfo{})) + +func unmarshalBtfVarSecInfos(secinfos []btfVarSecinfo, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range secinfos { + if off+btfVarSecinfoLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVarSecinfo %d", i) + } + + secinfos[i].Type = TypeID(bo.Uint32(b[off+0:])) + secinfos[i].Offset = bo.Uint32(b[off+4:]) + secinfos[i].Size = bo.Uint32(b[off+8:]) + + off += btfVarSecinfoLen + } + + return off, nil +} + +type btfVariable struct { + Linkage uint32 +} + +var btfVariableLen = int(unsafe.Sizeof(btfVariable{})) + +func unmarshalBtfVariable(bv *btfVariable, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfVariableLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfVariable") + } + + bv.Linkage = bo.Uint32(b[0:]) + return btfVariableLen, nil +} + +type btfEnum struct { + NameOff uint32 + Val uint32 +} + +var btfEnumLen = int(unsafe.Sizeof(btfEnum{})) + +func unmarshalBtfEnums(enums []btfEnum, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range enums { + if off+btfEnumLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum %d", i) + } + + enums[i].NameOff = bo.Uint32(b[off+0:]) + enums[i].Val = bo.Uint32(b[off+4:]) + + off += btfEnumLen + } + + return off, nil +} + +type btfEnum64 struct { + NameOff uint32 + ValLo32 uint32 + ValHi32 uint32 +} + +var btfEnum64Len = int(unsafe.Sizeof(btfEnum64{})) + +func unmarshalBtfEnums64(enums []btfEnum64, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range enums { + if off+btfEnum64Len > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfEnum64 %d", i) + } + + enums[i].NameOff = bo.Uint32(b[off+0:]) + enums[i].ValLo32 = bo.Uint32(b[off+4:]) + enums[i].ValHi32 = bo.Uint32(b[off+8:]) + + off += btfEnum64Len + } + + return off, nil +} + +type btfParam struct { + NameOff uint32 + Type TypeID +} + +var btfParamLen = int(unsafe.Sizeof(btfParam{})) + +func unmarshalBtfParams(params []btfParam, b []byte, bo binary.ByteOrder) (int, error) { + off := 0 + for i := range params { + if off+btfParamLen > len(b) { + return 0, fmt.Errorf("not enough bytes to unmarshal btfParam %d", i) + } + + params[i].NameOff = bo.Uint32(b[off+0:]) + params[i].Type = TypeID(bo.Uint32(b[off+4:])) + + off += btfParamLen + } + + return off, nil +} + +type btfDeclTag struct { + ComponentIdx uint32 +} + +var btfDeclTagLen = int(unsafe.Sizeof(btfDeclTag{})) + +func unmarshalBtfDeclTag(bdt *btfDeclTag, b []byte, bo binary.ByteOrder) (int, error) { + if len(b) < btfDeclTagLen { + return 0, fmt.Errorf("not enough bytes to unmarshal btfDeclTag") + } + + bdt.ComponentIdx = bo.Uint32(b[0:]) + return btfDeclTagLen, nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/btf_types_string.go b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go new file mode 100644 index 0000000000..b7a1b80d15 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/btf_types_string.go @@ -0,0 +1,80 @@ +// Code generated by "stringer -linecomment -output=btf_types_string.go -type=FuncLinkage,VarLinkage,btfKind"; DO NOT EDIT. + +package btf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticFunc-0] + _ = x[GlobalFunc-1] + _ = x[ExternFunc-2] +} + +const _FuncLinkage_name = "staticglobalextern" + +var _FuncLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i FuncLinkage) String() string { + if i < 0 || i >= FuncLinkage(len(_FuncLinkage_index)-1) { + return "FuncLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _FuncLinkage_name[_FuncLinkage_index[i]:_FuncLinkage_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[StaticVar-0] + _ = x[GlobalVar-1] + _ = x[ExternVar-2] +} + +const _VarLinkage_name = "staticglobalextern" + +var _VarLinkage_index = [...]uint8{0, 6, 12, 18} + +func (i VarLinkage) String() string { + if i < 0 || i >= VarLinkage(len(_VarLinkage_index)-1) { + return "VarLinkage(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _VarLinkage_name[_VarLinkage_index[i]:_VarLinkage_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[kindUnknown-0] + _ = x[kindInt-1] + _ = x[kindPointer-2] + _ = x[kindArray-3] + _ = x[kindStruct-4] + _ = x[kindUnion-5] + _ = x[kindEnum-6] + _ = x[kindForward-7] + _ = x[kindTypedef-8] + _ = x[kindVolatile-9] + _ = x[kindConst-10] + _ = x[kindRestrict-11] + _ = x[kindFunc-12] + _ = x[kindFuncProto-13] + _ = x[kindVar-14] + _ = x[kindDatasec-15] + _ = x[kindFloat-16] + _ = x[kindDeclTag-17] + _ = x[kindTypeTag-18] + _ = x[kindEnum64-19] +} + +const _btfKind_name = "UnknownIntPointerArrayStructUnionEnumForwardTypedefVolatileConstRestrictFuncFuncProtoVarDatasecFloatDeclTagTypeTagEnum64" + +var _btfKind_index = [...]uint8{0, 7, 10, 17, 22, 28, 33, 37, 44, 51, 59, 64, 72, 76, 85, 88, 95, 100, 107, 114, 120} + +func (i btfKind) String() string { + if i >= btfKind(len(_btfKind_index)-1) { + return "btfKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _btfKind_name[_btfKind_index[i]:_btfKind_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/btf/core.go b/vendor/github.com/cilium/ebpf/btf/core.go new file mode 100644 index 0000000000..ee89f98331 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/core.go @@ -0,0 +1,1261 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "reflect" + "slices" + "strconv" + "strings" + + "github.com/cilium/ebpf/asm" +) + +// Code in this file is derived from libbpf, which is available under a BSD +// 2-Clause license. + +// A constant used when CO-RE relocation has to remove instructions. +// +// Taken from libbpf. +const COREBadRelocationSentinel = 0xbad2310 + +// COREFixup is the result of computing a CO-RE relocation for a target. +type COREFixup struct { + kind coreKind + local uint64 + target uint64 + // True if there is no valid fixup. The instruction is replaced with an + // invalid dummy. + poison bool + // True if the validation of the local value should be skipped. Used by + // some kinds of bitfield relocations. + skipLocalValidation bool +} + +func (f *COREFixup) equal(other COREFixup) bool { + return f.local == other.local && f.target == other.target +} + +func (f *COREFixup) String() string { + if f.poison { + return fmt.Sprintf("%s=poison", f.kind) + } + return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target) +} + +func (f *COREFixup) Apply(ins *asm.Instruction) error { + if f.poison { + // Relocation is poisoned, replace the instruction with an invalid one. + if ins.OpCode.IsDWordLoad() { + // Replace a dword load with a invalid dword load to preserve instruction size. + *ins = asm.LoadImm(asm.R10, COREBadRelocationSentinel, asm.DWord) + } else { + // Replace all single size instruction with a invalid call instruction. + *ins = asm.BuiltinFunc(COREBadRelocationSentinel).Call() + } + + // Add context to the kernel verifier output. + if source := ins.Source(); source != nil { + *ins = ins.WithSource(asm.Comment(fmt.Sprintf("instruction poisoned by CO-RE: %s", source))) + } else { + *ins = ins.WithSource(asm.Comment("instruction poisoned by CO-RE")) + } + + return nil + } + + switch class := ins.OpCode.Class(); class { + case asm.LdXClass, asm.StClass, asm.StXClass: + if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset { + return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local) + } + + if f.target > math.MaxInt16 { + return fmt.Errorf("offset %d exceeds MaxInt16", f.target) + } + + ins.Offset = int16(f.target) + + case asm.LdClass: + if !ins.IsConstantLoad(asm.DWord) { + return fmt.Errorf("not a dword-sized immediate load") + } + + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) + } + + ins.Constant = int64(f.target) + + case asm.ALUClass: + if ins.OpCode.ALUOp() == asm.Swap { + return fmt.Errorf("relocation against swap") + } + + fallthrough + + case asm.ALU64Class: + if src := ins.OpCode.Source(); src != asm.ImmSource { + return fmt.Errorf("invalid source %s", src) + } + + if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { + return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins) + } + + if f.target > math.MaxInt32 { + return fmt.Errorf("immediate %d exceeds MaxInt32", f.target) + } + + ins.Constant = int64(f.target) + + default: + return fmt.Errorf("invalid class %s", class) + } + + return nil +} + +func (f COREFixup) isNonExistant() bool { + return f.kind.checksForExistence() && f.target == 0 +} + +// coreKind is the type of CO-RE relocation as specified in BPF source code. +type coreKind uint32 + +const ( + reloFieldByteOffset coreKind = iota /* field byte offset */ + reloFieldByteSize /* field size in bytes */ + reloFieldExists /* field existence in target kernel */ + reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ + reloFieldLShiftU64 /* bitfield-specific left bitshift */ + reloFieldRShiftU64 /* bitfield-specific right bitshift */ + reloTypeIDLocal /* type ID in local BPF object */ + reloTypeIDTarget /* type ID in target kernel */ + reloTypeExists /* type existence in target kernel */ + reloTypeSize /* type size in bytes */ + reloEnumvalExists /* enum value existence in target kernel */ + reloEnumvalValue /* enum value integer value */ + reloTypeMatches /* type matches kernel type */ +) + +func (k coreKind) checksForExistence() bool { + return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists || k == reloTypeMatches +} + +func (k coreKind) String() string { + switch k { + case reloFieldByteOffset: + return "byte_off" + case reloFieldByteSize: + return "byte_sz" + case reloFieldExists: + return "field_exists" + case reloFieldSigned: + return "signed" + case reloFieldLShiftU64: + return "lshift_u64" + case reloFieldRShiftU64: + return "rshift_u64" + case reloTypeIDLocal: + return "local_type_id" + case reloTypeIDTarget: + return "target_type_id" + case reloTypeExists: + return "type_exists" + case reloTypeSize: + return "type_size" + case reloEnumvalExists: + return "enumval_exists" + case reloEnumvalValue: + return "enumval_value" + case reloTypeMatches: + return "type_matches" + default: + return fmt.Sprintf("unknown (%d)", k) + } +} + +// CORERelocate calculates changes needed to adjust eBPF instructions for differences +// in types. +// +// targets forms the set of types to relocate against. The first element has to be +// BTF for vmlinux, the following must be types for kernel modules. +// +// resolveLocalTypeID is called for each local type which requires a stable TypeID. +// Calling the function with the same type multiple times must produce the same +// result. It is the callers responsibility to ensure that the relocated instructions +// are loaded with matching BTF. +// +// Returns a list of fixups which can be applied to instructions to make them +// match the target type(s). +// +// Fixups are returned in the order of relos, e.g. fixup[i] is the solution +// for relos[i]. +func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { + if len(targets) == 0 { + // Explicitly check for nil here since the argument used to be optional. + return nil, fmt.Errorf("targets must be provided") + } + + // We can't encode type IDs that aren't for vmlinux into instructions at the + // moment. + resolveTargetTypeID := targets[0].TypeID + + for _, target := range targets { + if bo != target.imm.byteOrder { + return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder) + } + } + + type reloGroup struct { + relos []*CORERelocation + // Position of each relocation in relos. + indices []int + } + + // Split relocations into per Type lists. + relosByType := make(map[Type]*reloGroup) + result := make([]COREFixup, len(relos)) + for i, relo := range relos { + if relo.kind == reloTypeIDLocal { + // Filtering out reloTypeIDLocal here makes our lives a lot easier + // down the line, since it doesn't have a target at all. + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) + } + + id, err := resolveLocalTypeID(relo.typ) + if err != nil { + return nil, fmt.Errorf("%s: get type id: %w", relo.kind, err) + } + + result[i] = COREFixup{ + kind: relo.kind, + local: uint64(relo.id), + target: uint64(id), + } + continue + } + + group, ok := relosByType[relo.typ] + if !ok { + group = &reloGroup{} + relosByType[relo.typ] = group + } + group.relos = append(group.relos, relo) + group.indices = append(group.indices, i) + } + + for localType, group := range relosByType { + localTypeName := localType.TypeName() + if localTypeName == "" { + return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) + } + + essentialName := newEssentialName(localTypeName) + + var targetTypes []Type + for _, target := range targets { + namedTypeIDs := target.imm.namedTypes[essentialName] + targetTypes = slices.Grow(targetTypes, len(namedTypeIDs)) + for _, id := range namedTypeIDs { + typ, err := target.TypeByID(id) + if err != nil { + return nil, err + } + + targetTypes = append(targetTypes, typ) + } + } + + fixups, err := coreCalculateFixups(group.relos, targetTypes, bo, resolveTargetTypeID) + if err != nil { + return nil, fmt.Errorf("relocate %s: %w", localType, err) + } + + for j, index := range group.indices { + result[index] = fixups[j] + } + } + + return result, nil +} + +var errAmbiguousRelocation = errors.New("ambiguous relocation") +var errImpossibleRelocation = errors.New("impossible relocation") +var errIncompatibleTypes = errors.New("incompatible types") + +// coreCalculateFixups finds the target type that best matches all relocations. +// +// All relos must target the same type. +// +// The best target is determined by scoring: the less poisoning we have to do +// the better the target is. +func coreCalculateFixups(relos []*CORERelocation, targets []Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { + bestScore := len(relos) + var bestFixups []COREFixup + for _, target := range targets { + score := 0 // lower is better + fixups := make([]COREFixup, 0, len(relos)) + for _, relo := range relos { + fixup, err := coreCalculateFixup(relo, target, bo, resolveTargetTypeID) + if err != nil { + return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err) + } + if fixup.poison || fixup.isNonExistant() { + score++ + } + fixups = append(fixups, fixup) + } + + if score > bestScore { + // We have a better target already, ignore this one. + continue + } + + if score < bestScore { + // This is the best target yet, use it. + bestScore = score + bestFixups = fixups + continue + } + + // Some other target has the same score as the current one. Make sure + // the fixups agree with each other. + for i, fixup := range bestFixups { + if !fixup.equal(fixups[i]) { + return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation) + } + } + } + + if bestFixups == nil { + // Nothing at all matched, probably because there are no suitable + // targets at all. + // + // Poison everything except checksForExistence. + bestFixups = make([]COREFixup, len(relos)) + for i, relo := range relos { + if relo.kind.checksForExistence() { + bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0} + } else { + bestFixups[i] = COREFixup{kind: relo.kind, poison: true} + } + } + } + + return bestFixups, nil +} + +var errNoSignedness = errors.New("no signedness") + +// coreCalculateFixup calculates the fixup given a relocation and a target type. +func coreCalculateFixup(relo *CORERelocation, target Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) (COREFixup, error) { + fixup := func(local, target uint64) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target}, nil + } + fixupWithoutValidation := func(local, target uint64) (COREFixup, error) { + return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil + } + poison := func() (COREFixup, error) { + if relo.kind.checksForExistence() { + return fixup(1, 0) + } + return COREFixup{kind: relo.kind, poison: true}, nil + } + zero := COREFixup{} + + local := relo.typ + + switch relo.kind { + case reloTypeMatches: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) + } + + err := coreTypesMatch(local, target, nil) + if errors.Is(err, errIncompatibleTypes) { + return poison() + } + if err != nil { + return zero, err + } + + return fixup(1, 1) + + case reloTypeIDTarget, reloTypeSize, reloTypeExists: + if len(relo.accessor) > 1 || relo.accessor[0] != 0 { + return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) + } + + err := CheckTypeCompatibility(local, target) + if errors.Is(err, errIncompatibleTypes) { + return poison() + } + if err != nil { + return zero, err + } + + switch relo.kind { + case reloTypeExists: + return fixup(1, 1) + + case reloTypeIDTarget: + targetID, err := resolveTargetTypeID(target) + if errors.Is(err, ErrNotFound) { + // Probably a relocation trying to get the ID + // of a type from a kmod. + return poison() + } + if err != nil { + return zero, err + } + return fixup(uint64(relo.id), uint64(targetID)) + + case reloTypeSize: + localSize, err := Sizeof(local) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(target) + if err != nil { + return zero, err + } + + return fixup(uint64(localSize), uint64(targetSize)) + } + + case reloEnumvalValue, reloEnumvalExists: + localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, err + } + + switch relo.kind { + case reloEnumvalExists: + return fixup(1, 1) + + case reloEnumvalValue: + return fixup(localValue.Value, targetValue.Value) + } + + case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned: + if _, ok := As[*Fwd](target); ok { + // We can't relocate fields using a forward declaration, so + // skip it. If a non-forward declaration is present in the BTF + // we'll find it in one of the other iterations. + return poison() + } + + localField, targetField, err := coreFindField(local, relo.accessor, target) + if errors.Is(err, errImpossibleRelocation) { + return poison() + } + if err != nil { + return zero, err + } + + maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) { + f.skipLocalValidation = localField.bitfieldSize > 0 + return f, err + } + + switch relo.kind { + case reloFieldExists: + return fixup(1, 1) + + case reloFieldByteOffset: + return maybeSkipValidation(fixup(uint64(localField.offset), uint64(targetField.offset))) + + case reloFieldByteSize: + localSize, err := Sizeof(localField.Type) + if err != nil { + return zero, err + } + + targetSize, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + return maybeSkipValidation(fixup(uint64(localSize), uint64(targetSize))) + + case reloFieldLShiftU64: + var target uint64 + if bo == binary.LittleEndian { + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + + target = uint64(64 - targetField.bitfieldOffset - targetSize) + } else { + loadWidth, err := Sizeof(targetField.Type) + if err != nil { + return zero, err + } + + target = uint64(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) + } + return fixupWithoutValidation(0, target) + + case reloFieldRShiftU64: + targetSize, err := targetField.sizeBits() + if err != nil { + return zero, err + } + + return fixupWithoutValidation(0, uint64(64-targetSize)) + + case reloFieldSigned: + switch local := UnderlyingType(localField.Type).(type) { + case *Enum: + target, ok := As[*Enum](targetField.Type) + if !ok { + return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type) + } + + return fixup(boolToUint64(local.Signed), boolToUint64(target.Signed)) + case *Int: + target, ok := As[*Int](targetField.Type) + if !ok { + return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type) + } + + return fixup( + uint64(local.Encoding&Signed), + uint64(target.Encoding&Signed), + ) + default: + return zero, fmt.Errorf("type %T: %w", local, errNoSignedness) + } + } + } + + return zero, ErrNotSupported +} + +func boolToUint64(val bool) uint64 { + if val { + return 1 + } + return 0 +} + +/* coreAccessor contains a path through a struct. It contains at least one index. + * + * The interpretation depends on the kind of the relocation. The following is + * taken from struct bpf_core_relo in libbpf_internal.h: + * + * - for field-based relocations, string encodes an accessed field using + * a sequence of field and array indices, separated by colon (:). It's + * conceptually very close to LLVM's getelementptr ([0]) instruction's + * arguments for identifying offset to a field. + * - for type-based relocations, strings is expected to be just "0"; + * - for enum value-based relocations, string contains an index of enum + * value within its enum type; + * + * Example to provide a better feel. + * + * struct sample { + * int a; + * struct { + * int b[10]; + * }; + * }; + * + * struct sample s = ...; + * int x = &s->a; // encoded as "0:0" (a is field #0) + * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, + * // b is field #0 inside anon struct, accessing elem #5) + * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) + */ +type coreAccessor []int + +func parseCOREAccessor(accessor string) (coreAccessor, error) { + if accessor == "" { + return nil, fmt.Errorf("empty accessor") + } + + parts := strings.Split(accessor, ":") + result := make(coreAccessor, 0, len(parts)) + for _, part := range parts { + // 31 bits to avoid overflowing int on 32 bit platforms. + index, err := strconv.ParseUint(part, 10, 31) + if err != nil { + return nil, fmt.Errorf("accessor index %q: %s", part, err) + } + + result = append(result, int(index)) + } + + return result, nil +} + +func (ca coreAccessor) String() string { + strs := make([]string, 0, len(ca)) + for _, i := range ca { + strs = append(strs, strconv.Itoa(i)) + } + return strings.Join(strs, ":") +} + +func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { + e, ok := As[*Enum](t) + if !ok { + return nil, fmt.Errorf("not an enum: %s", t) + } + + if len(ca) > 1 { + return nil, fmt.Errorf("invalid accessor %s for enum", ca) + } + + i := ca[0] + if i >= len(e.Values) { + return nil, fmt.Errorf("invalid index %d for %s", i, e) + } + + return &e.Values[i], nil +} + +// coreField represents the position of a "child" of a composite type from the +// start of that type. +// +// /- start of composite +// | offset * 8 | bitfieldOffset | bitfieldSize | ... | +// \- start of field end of field -/ +type coreField struct { + Type Type + + // The position of the field from the start of the composite type in bytes. + offset uint32 + + // The offset of the bitfield in bits from the start of the field. + bitfieldOffset Bits + + // The size of the bitfield in bits. + // + // Zero if the field is not a bitfield. + bitfieldSize Bits +} + +func (cf *coreField) adjustOffsetToNthElement(n int) error { + if n == 0 { + return nil + } + + size, err := Sizeof(cf.Type) + if err != nil { + return err + } + + cf.offset += uint32(n) * uint32(size) + return nil +} + +func (cf *coreField) adjustOffsetBits(offset Bits) error { + align, err := alignof(cf.Type) + if err != nil { + return err + } + + // We can compute the load offset by: + // 1) converting the bit offset to bytes with a flooring division. + // 2) dividing and multiplying that offset by the alignment, yielding the + // load size aligned offset. + offsetBytes := uint32(offset/8) / uint32(align) * uint32(align) + + // The number of bits remaining is the bit offset less the number of bits + // we can "skip" with the aligned offset. + cf.bitfieldOffset = offset - Bits(offsetBytes*8) + + // We know that cf.offset is aligned at to at least align since we get it + // from the compiler via BTF. Adding an aligned offsetBytes preserves the + // alignment. + cf.offset += offsetBytes + return nil +} + +func (cf *coreField) sizeBits() (Bits, error) { + if cf.bitfieldSize > 0 { + return cf.bitfieldSize, nil + } + + // Someone is trying to access a non-bitfield via a bit shift relocation. + // This happens when a field changes from a bitfield to a regular field + // between kernel versions. Synthesise the size to make the shifts work. + size, err := Sizeof(cf.Type) + if err != nil { + return 0, err + } + return Bits(size * 8), nil +} + +// coreFindField descends into the local type using the accessor and tries to +// find an equivalent field in target at each step. +// +// Returns the field and the offset of the field from the start of +// target in bits. +func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) { + local := coreField{Type: localT} + target := coreField{Type: targetT} + + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { + return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) + } + + // The first index is used to offset a pointer of the base type like + // when accessing an array. + if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil { + return coreField{}, coreField{}, err + } + + var localMaybeFlex, targetMaybeFlex bool + for i, acc := range localAcc[1:] { + switch localType := UnderlyingType(local.Type).(type) { + case composite: + // For composite types acc is used to find the field in the local type, + // and then we try to find a field in target with the same name. + localMembers := localType.members() + if acc >= len(localMembers) { + return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType) + } + + localMember := localMembers[acc] + if localMember.Name == "" { + localMemberType, ok := As[composite](localMember.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) + } + + // This is an anonymous struct or union, ignore it. + local = coreField{ + Type: localMemberType, + offset: local.offset + localMember.Offset.Bytes(), + } + localMaybeFlex = false + continue + } + + targetType, ok := As[composite](target.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) + } + + targetMember, last, err := coreFindMember(targetType, localMember.Name) + if err != nil { + return coreField{}, coreField{}, err + } + + local = coreField{ + Type: localMember.Type, + offset: local.offset, + bitfieldSize: localMember.BitfieldSize, + } + localMaybeFlex = acc == len(localMembers)-1 + + target = coreField{ + Type: targetMember.Type, + offset: target.offset, + bitfieldSize: targetMember.BitfieldSize, + } + targetMaybeFlex = last + + if local.bitfieldSize == 0 && target.bitfieldSize == 0 { + local.offset += localMember.Offset.Bytes() + target.offset += targetMember.Offset.Bytes() + break + } + + // Either of the members is a bitfield. Make sure we're at the + // end of the accessor. + if next := i + 1; next < len(localAcc[1:]) { + return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield") + } + + if err := local.adjustOffsetBits(localMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + if err := target.adjustOffsetBits(targetMember.Offset); err != nil { + return coreField{}, coreField{}, err + } + + case *Array: + // For arrays, acc is the index in the target. + targetType, ok := As[*Array](target.Type) + if !ok { + return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) + } + + if localType.Nelems == 0 && !localMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") + } + if targetType.Nelems == 0 && !targetMaybeFlex { + return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") + } + + if localType.Nelems > 0 && acc >= int(localType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) + } + if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { + return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) + } + + local = coreField{ + Type: localType.Type, + offset: local.offset, + } + localMaybeFlex = false + + if err := local.adjustOffsetToNthElement(acc); err != nil { + return coreField{}, coreField{}, err + } + + target = coreField{ + Type: targetType.Type, + offset: target.offset, + } + targetMaybeFlex = false + + if err := target.adjustOffsetToNthElement(acc); err != nil { + return coreField{}, coreField{}, err + } + + default: + return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) + } + + if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { + return coreField{}, coreField{}, err + } + } + + return local, target, nil +} + +// coreFindMember finds a member in a composite type while handling anonymous +// structs and unions. +func coreFindMember(typ composite, name string) (Member, bool, error) { + if name == "" { + return Member{}, false, errors.New("can't search for anonymous member") + } + + type offsetTarget struct { + composite + offset Bits + } + + targets := []offsetTarget{{typ, 0}} + visited := make(map[composite]bool) + + for i := 0; i < len(targets); i++ { + target := targets[i] + + // Only visit targets once to prevent infinite recursion. + if visited[target] { + continue + } + if len(visited) >= maxResolveDepth { + // This check is different than libbpf, which restricts the entire + // path to BPF_CORE_SPEC_MAX_LEN items. + return Member{}, false, fmt.Errorf("type is nested too deep") + } + visited[target] = true + + members := target.members() + for j, member := range members { + if member.Name == name { + // NB: This is safe because member is a copy. + member.Offset += target.offset + return member, j == len(members)-1, nil + } + + // The names don't match, but this member could be an anonymous struct + // or union. + if member.Name != "" { + continue + } + + comp, ok := As[composite](member.Type) + if !ok { + return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) + } + + targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) + } + } + + return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) +} + +// coreFindEnumValue follows localAcc to find the equivalent enum value in target. +func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { + localValue, err := localAcc.enumValue(local) + if err != nil { + return nil, nil, err + } + + targetEnum, ok := As[*Enum](target) + if !ok { + return nil, nil, errImpossibleRelocation + } + + localName := newEssentialName(localValue.Name) + for i, targetValue := range targetEnum.Values { + if newEssentialName(targetValue.Name) != localName { + continue + } + + return localValue, &targetEnum.Values[i], nil + } + + return nil, nil, errImpossibleRelocation +} + +// CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules. +// +// Only layout compatibility is checked, ignoring names of the root type. +func CheckTypeCompatibility(localType Type, targetType Type) error { + return coreAreTypesCompatible(localType, targetType, nil) +} + +type pair struct { + A, B Type +} + +/* The comment below is from bpf_core_types_are_compat in libbpf.c: + * + * Check local and target types for compatibility. This check is used for + * type-based CO-RE relocations and follow slightly different rules than + * field-based relocations. This function assumes that root types were already + * checked for name match. Beyond that initial root-level name check, names + * are completely ignored. Compatibility rules are as follows: + * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but + * kind should match for local and target types (i.e., STRUCT is not + * compatible with UNION); + * - for ENUMs, the size is ignored; + * - for INT, size and signedness are ignored; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * - CONST/VOLATILE/RESTRICT modifiers are ignored; + * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; + * - FUNC_PROTOs are compatible if they have compatible signature: same + * number of input args and compatible return and argument types. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errIncompatibleTypes if types are not compatible. + */ +func coreAreTypesCompatible(localType Type, targetType Type, visited map[pair]struct{}) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + if _, ok := visited[pair{localType, targetType}]; ok { + return nil + } + if visited == nil { + visited = make(map[pair]struct{}) + } + visited[pair{localType, targetType}] = struct{}{} + + switch lv := localType.(type) { + case *Void, *Struct, *Union, *Enum, *Fwd, *Int: + return nil + + case *Pointer: + tv := targetType.(*Pointer) + return coreAreTypesCompatible(lv.Target, tv.Target, visited) + + case *Array: + tv := targetType.(*Array) + if err := coreAreTypesCompatible(lv.Index, tv.Index, visited); err != nil { + return err + } + + return coreAreTypesCompatible(lv.Type, tv.Type, visited) + + case *FuncProto: + tv := targetType.(*FuncProto) + if err := coreAreTypesCompatible(lv.Return, tv.Return, visited); err != nil { + return err + } + + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) + } + + for i, localParam := range lv.Params { + targetParam := tv.Params[i] + if err := coreAreTypesCompatible(localParam.Type, targetParam.Type, visited); err != nil { + return err + } + } + + return nil + + default: + return fmt.Errorf("unsupported type %T", localType) + } +} + +/* coreAreMembersCompatible checks two types for field-based relocation compatibility. + * + * The comment below is from bpf_core_fields_are_compat in libbpf.c: + * + * Check two types for compatibility for the purpose of field access + * relocation. const/volatile/restrict and typedefs are skipped to ensure we + * are relocating semantically compatible entities: + * - any two STRUCTs/UNIONs are compatible and can be mixed; + * - any two FWDs are compatible, if their names match (modulo flavor suffix); + * - any two PTRs are always compatible; + * - for ENUMs, names should be the same (ignoring flavor suffix) or at + * least one of enums should be anonymous; + * - for ENUMs, check sizes, names are ignored; + * - for INT, size and signedness are ignored; + * - any two FLOATs are always compatible; + * - for ARRAY, dimensionality is ignored, element types are checked for + * compatibility recursively; + * [ NB: coreAreMembersCompatible doesn't recurse, this check is done + * by coreFindField. ] + * - everything else shouldn't be ever a target of relocation. + * These rules are not set in stone and probably will be adjusted as we get + * more experience with using BPF CO-RE relocations. + * + * Returns errImpossibleRelocation if the members are not compatible. + */ +func coreAreMembersCompatible(localType Type, targetType Type) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + _, lok := localType.(composite) + _, tok := targetType.(composite) + if lok && tok { + return nil + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) + } + + switch lv := localType.(type) { + case *Array, *Pointer, *Float, *Int: + return nil + + case *Enum: + tv := targetType.(*Enum) + if !coreEssentialNamesMatch(lv.Name, tv.Name) { + return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) + } + + return nil + + case *Fwd: + tv := targetType.(*Fwd) + if !coreEssentialNamesMatch(lv.Name, tv.Name) { + return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) + } + + return nil + + default: + return fmt.Errorf("type %s: %w", localType, ErrNotSupported) + } +} + +// coreEssentialNamesMatch compares two names while ignoring their flavour suffix. +// +// This should only be used on names which are in the global scope, like struct +// names, typedefs or enum values. +func coreEssentialNamesMatch(a, b string) bool { + if a == "" || b == "" { + // allow anonymous and named type to match + return true + } + + return newEssentialName(a) == newEssentialName(b) +} + +/* The comment below is from __bpf_core_types_match in relo_core.c: + * + * Check that two types "match". This function assumes that root types were + * already checked for name match. + * + * The matching relation is defined as follows: + * - modifiers and typedefs are stripped (and, hence, effectively ignored) + * - generally speaking types need to be of same kind (struct vs. struct, union + * vs. union, etc.) + * - exceptions are struct/union behind a pointer which could also match a + * forward declaration of a struct or union, respectively, and enum vs. + * enum64 (see below) + * Then, depending on type: + * - integers: + * - match if size and signedness match + * - arrays & pointers: + * - target types are recursively matched + * - structs & unions: + * - local members need to exist in target with the same name + * - for each member we recursively check match unless it is already behind a + * pointer, in which case we only check matching names and compatible kind + * - enums: + * - local variants have to have a match in target by symbolic name (but not + * numeric value) + * - size has to match (but enum may match enum64 and vice versa) + * - function pointers: + * - number and position of arguments in local type has to match target + * - for each argument and the return value we recursively check match + */ +func coreTypesMatch(localType Type, targetType Type, visited map[pair]struct{}) error { + localType = UnderlyingType(localType) + targetType = UnderlyingType(targetType) + + if !coreEssentialNamesMatch(localType.TypeName(), targetType.TypeName()) { + return fmt.Errorf("type name %q don't match %q: %w", localType.TypeName(), targetType.TypeName(), errIncompatibleTypes) + } + + if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { + return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + if _, ok := visited[pair{localType, targetType}]; ok { + return nil + } + if visited == nil { + visited = make(map[pair]struct{}) + } + visited[pair{localType, targetType}] = struct{}{} + + switch lv := (localType).(type) { + case *Void: + + case *Fwd: + if targetType.(*Fwd).Kind != lv.Kind { + return fmt.Errorf("fwd kind mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + case *Enum: + return coreEnumsMatch(lv, targetType.(*Enum)) + + case composite: + tv := targetType.(composite) + + if len(lv.members()) > len(tv.members()) { + return errIncompatibleTypes + } + + localMembers := lv.members() + targetMembers := map[string]Member{} + for _, member := range tv.members() { + targetMembers[member.Name] = member + } + + for _, localMember := range localMembers { + targetMember, found := targetMembers[localMember.Name] + if !found { + return fmt.Errorf("no field %q in %v: %w", localMember.Name, targetType, errIncompatibleTypes) + } + + err := coreTypesMatch(localMember.Type, targetMember.Type, visited) + if err != nil { + return err + } + } + + case *Int: + if !coreEncodingMatches(lv, targetType.(*Int)) { + return fmt.Errorf("int mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + case *Pointer: + tv := targetType.(*Pointer) + + // Allow a pointer to a forward declaration to match a struct + // or union. + if fwd, ok := As[*Fwd](lv.Target); ok && fwd.matches(tv.Target) { + return nil + } + + if fwd, ok := As[*Fwd](tv.Target); ok && fwd.matches(lv.Target) { + return nil + } + + return coreTypesMatch(lv.Target, tv.Target, visited) + + case *Array: + tv := targetType.(*Array) + + if lv.Nelems != tv.Nelems { + return fmt.Errorf("array mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) + } + + return coreTypesMatch(lv.Type, tv.Type, visited) + + case *FuncProto: + tv := targetType.(*FuncProto) + + if len(lv.Params) != len(tv.Params) { + return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) + } + + for i, lparam := range lv.Params { + if err := coreTypesMatch(lparam.Type, tv.Params[i].Type, visited); err != nil { + return err + } + } + + return coreTypesMatch(lv.Return, tv.Return, visited) + + default: + return fmt.Errorf("unsupported type %T", localType) + } + + return nil +} + +// coreEncodingMatches returns true if both ints have the same size and signedness. +// All encodings other than `Signed` are considered unsigned. +func coreEncodingMatches(local, target *Int) bool { + return local.Size == target.Size && (local.Encoding == Signed) == (target.Encoding == Signed) +} + +// coreEnumsMatch checks two enums match, which is considered to be the case if the following is true: +// - size has to match (but enum may match enum64 and vice versa) +// - local variants have to have a match in target by symbolic name (but not numeric value) +func coreEnumsMatch(local *Enum, target *Enum) error { + if local.Size != target.Size { + return fmt.Errorf("size mismatch between %v and %v: %w", local, target, errIncompatibleTypes) + } + + // If there are more values in the local than the target, there must be at least one value in the local + // that isn't in the target, and therefor the types are incompatible. + if len(local.Values) > len(target.Values) { + return fmt.Errorf("local has more values than target: %w", errIncompatibleTypes) + } + +outer: + for _, lv := range local.Values { + for _, rv := range target.Values { + if coreEssentialNamesMatch(lv.Name, rv.Name) { + continue outer + } + } + + return fmt.Errorf("no match for %v in %v: %w", lv, target, errIncompatibleTypes) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/doc.go b/vendor/github.com/cilium/ebpf/btf/doc.go new file mode 100644 index 0000000000..b1f4b1fc3e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/doc.go @@ -0,0 +1,5 @@ +// Package btf handles data encoded according to the BPF Type Format. +// +// The canonical documentation lives in the Linux kernel repository and is +// available at https://www.kernel.org/doc/html/latest/bpf/btf.html +package btf diff --git a/vendor/github.com/cilium/ebpf/btf/ext_info.go b/vendor/github.com/cilium/ebpf/btf/ext_info.go new file mode 100644 index 0000000000..2c684fe2a7 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/ext_info.go @@ -0,0 +1,835 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" +) + +// ExtInfos contains ELF section metadata. +type ExtInfos struct { + // The slices are sorted by offset in ascending order. + funcInfos map[string]FuncOffsets + lineInfos map[string]LineOffsets + relocationInfos map[string]CORERelocationInfos +} + +// loadExtInfosFromELF parses ext infos from the .BTF.ext section in an ELF. +// +// Returns an error wrapping ErrNotFound if no ext infos are present. +func loadExtInfosFromELF(file *internal.SafeELFFile, spec *Spec) (*ExtInfos, error) { + section := file.Section(".BTF.ext") + if section == nil { + return nil, fmt.Errorf("btf ext infos: %w", ErrNotFound) + } + + if section.ReaderAt == nil { + return nil, fmt.Errorf("compressed ext_info is not supported") + } + + return loadExtInfos(section.ReaderAt, file.ByteOrder, spec) +} + +// loadExtInfos parses bare ext infos. +func loadExtInfos(r io.ReaderAt, bo binary.ByteOrder, spec *Spec) (*ExtInfos, error) { + // Open unbuffered section reader. binary.Read() calls io.ReadFull on + // the header structs, resulting in one syscall per header. + headerRd := io.NewSectionReader(r, 0, math.MaxInt64) + extHeader, err := parseBTFExtHeader(headerRd, bo) + if err != nil { + return nil, fmt.Errorf("parsing BTF extension header: %w", err) + } + + coreHeader, err := parseBTFExtCOREHeader(headerRd, bo, extHeader) + if err != nil { + return nil, fmt.Errorf("parsing BTF CO-RE header: %w", err) + } + + buf := internal.NewBufferedSectionReader(r, extHeader.funcInfoStart(), int64(extHeader.FuncInfoLen)) + btfFuncInfos, err := parseFuncInfos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF function info: %w", err) + } + + funcInfos := make(map[string]FuncOffsets, len(btfFuncInfos)) + for section, bfis := range btfFuncInfos { + funcInfos[section], err = newFuncOffsets(bfis, spec) + if err != nil { + return nil, fmt.Errorf("section %s: func infos: %w", section, err) + } + } + + buf = internal.NewBufferedSectionReader(r, extHeader.lineInfoStart(), int64(extHeader.LineInfoLen)) + btfLineInfos, err := parseLineInfos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing BTF line info: %w", err) + } + + lineInfos := make(map[string]LineOffsets, len(btfLineInfos)) + for section, blis := range btfLineInfos { + lineInfos[section], err = newLineInfos(blis, spec.strings) + if err != nil { + return nil, fmt.Errorf("section %s: line infos: %w", section, err) + } + } + + if coreHeader == nil || coreHeader.COREReloLen == 0 { + return &ExtInfos{funcInfos, lineInfos, nil}, nil + } + + var btfCORERelos map[string][]bpfCORERelo + buf = internal.NewBufferedSectionReader(r, extHeader.coreReloStart(coreHeader), int64(coreHeader.COREReloLen)) + btfCORERelos, err = parseCORERelos(buf, bo, spec.strings) + if err != nil { + return nil, fmt.Errorf("parsing CO-RE relocation info: %w", err) + } + + coreRelos := make(map[string]CORERelocationInfos, len(btfCORERelos)) + for section, brs := range btfCORERelos { + coreRelos[section], err = newRelocationInfos(brs, spec, spec.strings) + if err != nil { + return nil, fmt.Errorf("section %s: CO-RE relocations: %w", section, err) + } + } + + return &ExtInfos{funcInfos, lineInfos, coreRelos}, nil +} + +type ( + funcInfoMeta struct{} + coreRelocationMeta struct{} +) + +// Assign per-section metadata from BTF to a section's instructions. +func (ei *ExtInfos) Assign(insns asm.Instructions, section string) { + funcInfos := ei.funcInfos[section] + lineInfos := ei.lineInfos[section] + reloInfos := ei.relocationInfos[section] + + AssignMetadataToInstructions(insns, funcInfos, lineInfos, reloInfos) +} + +// Assign per-instruction metadata to the instructions in insns. +func AssignMetadataToInstructions( + insns asm.Instructions, + funcInfos FuncOffsets, + lineInfos LineOffsets, + reloInfos CORERelocationInfos, +) { + iter := insns.Iterate() + for iter.Next() { + if len(funcInfos) > 0 && funcInfos[0].Offset == iter.Offset { + *iter.Ins = WithFuncMetadata(*iter.Ins, funcInfos[0].Func) + funcInfos = funcInfos[1:] + } + + if len(lineInfos) > 0 && lineInfos[0].Offset == iter.Offset { + *iter.Ins = iter.Ins.WithSource(lineInfos[0].Line) + lineInfos = lineInfos[1:] + } + + if len(reloInfos.infos) > 0 && reloInfos.infos[0].offset == iter.Offset { + iter.Ins.Metadata.Set(coreRelocationMeta{}, reloInfos.infos[0].relo) + reloInfos.infos = reloInfos.infos[1:] + } + } +} + +// MarshalExtInfos encodes function and line info embedded in insns into kernel +// wire format. +// +// If an instruction has an [asm.Comment], it will be synthesized into a mostly +// empty line info. +func MarshalExtInfos(insns asm.Instructions, b *Builder) (funcInfos, lineInfos []byte, _ error) { + iter := insns.Iterate() + for iter.Next() { + if iter.Ins.Source() != nil || FuncMetadata(iter.Ins) != nil { + goto marshal + } + } + + return nil, nil, nil + +marshal: + var fiBuf, liBuf bytes.Buffer + for { + if fn := FuncMetadata(iter.Ins); fn != nil { + fi := &FuncOffset{ + Func: fn, + Offset: iter.Offset, + } + if err := fi.marshal(&fiBuf, b); err != nil { + return nil, nil, fmt.Errorf("write func info: %w", err) + } + } + + if source := iter.Ins.Source(); source != nil { + var line *Line + if l, ok := source.(*Line); ok { + line = l + } else { + line = &Line{ + line: source.String(), + } + } + + li := &LineOffset{ + Offset: iter.Offset, + Line: line, + } + if err := li.marshal(&liBuf, b); err != nil { + return nil, nil, fmt.Errorf("write line info: %w", err) + } + } + + if !iter.Next() { + break + } + } + + return fiBuf.Bytes(), liBuf.Bytes(), nil +} + +// btfExtHeader is found at the start of the .BTF.ext section. +type btfExtHeader struct { + Magic uint16 + Version uint8 + Flags uint8 + + // HdrLen is larger than the size of struct btfExtHeader when it is + // immediately followed by a btfExtCOREHeader. + HdrLen uint32 + + FuncInfoOff uint32 + FuncInfoLen uint32 + LineInfoOff uint32 + LineInfoLen uint32 +} + +// parseBTFExtHeader parses the header of the .BTF.ext section. +func parseBTFExtHeader(r io.Reader, bo binary.ByteOrder) (*btfExtHeader, error) { + var header btfExtHeader + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + if header.Magic != btfMagic { + return nil, fmt.Errorf("incorrect magic value %v", header.Magic) + } + + if header.Version != 1 { + return nil, fmt.Errorf("unexpected version %v", header.Version) + } + + if header.Flags != 0 { + return nil, fmt.Errorf("unsupported flags %v", header.Flags) + } + + if int64(header.HdrLen) < int64(binary.Size(&header)) { + return nil, fmt.Errorf("header length shorter than btfExtHeader size") + } + + return &header, nil +} + +// funcInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its func_info entries. +func (h *btfExtHeader) funcInfoStart() int64 { + return int64(h.HdrLen + h.FuncInfoOff) +} + +// lineInfoStart returns the offset from the beginning of the .BTF.ext section +// to the start of its line_info entries. +func (h *btfExtHeader) lineInfoStart() int64 { + return int64(h.HdrLen + h.LineInfoOff) +} + +// coreReloStart returns the offset from the beginning of the .BTF.ext section +// to the start of its CO-RE relocation entries. +func (h *btfExtHeader) coreReloStart(ch *btfExtCOREHeader) int64 { + return int64(h.HdrLen + ch.COREReloOff) +} + +// btfExtCOREHeader is found right after the btfExtHeader when its HdrLen +// field is larger than its size. +type btfExtCOREHeader struct { + COREReloOff uint32 + COREReloLen uint32 +} + +// parseBTFExtCOREHeader parses the tail of the .BTF.ext header. If additional +// header bytes are present, extHeader.HdrLen will be larger than the struct, +// indicating the presence of a CO-RE extension header. +func parseBTFExtCOREHeader(r io.Reader, bo binary.ByteOrder, extHeader *btfExtHeader) (*btfExtCOREHeader, error) { + extHdrSize := int64(binary.Size(&extHeader)) + remainder := int64(extHeader.HdrLen) - extHdrSize + + if remainder == 0 { + return nil, nil + } + + var coreHeader btfExtCOREHeader + if err := binary.Read(r, bo, &coreHeader); err != nil { + return nil, fmt.Errorf("can't read header: %v", err) + } + + return &coreHeader, nil +} + +type btfExtInfoSec struct { + SecNameOff uint32 + NumInfo uint32 +} + +// parseExtInfoSec parses a btf_ext_info_sec header within .BTF.ext, +// appearing within func_info and line_info sub-sections. +// These headers appear once for each program section in the ELF and are +// followed by one or more func/line_info records for the section. +func parseExtInfoSec(r io.Reader, bo binary.ByteOrder, strings *stringTable) (string, *btfExtInfoSec, error) { + var infoHeader btfExtInfoSec + if err := binary.Read(r, bo, &infoHeader); err != nil { + return "", nil, fmt.Errorf("read ext info header: %w", err) + } + + secName, err := strings.Lookup(infoHeader.SecNameOff) + if err != nil { + return "", nil, fmt.Errorf("get section name: %w", err) + } + if secName == "" { + return "", nil, fmt.Errorf("extinfo header refers to empty section name") + } + + if infoHeader.NumInfo == 0 { + return "", nil, fmt.Errorf("section %s has zero records", secName) + } + + return secName, &infoHeader, nil +} + +// parseExtInfoRecordSize parses the uint32 at the beginning of a func_infos +// or line_infos segment that describes the length of all extInfoRecords in +// that segment. +func parseExtInfoRecordSize(r io.Reader, bo binary.ByteOrder) (uint32, error) { + const maxRecordSize = 256 + + var recordSize uint32 + if err := binary.Read(r, bo, &recordSize); err != nil { + return 0, fmt.Errorf("can't read record size: %v", err) + } + + if recordSize < 4 { + // Need at least InsnOff worth of bytes per record. + return 0, errors.New("record size too short") + } + if recordSize > maxRecordSize { + return 0, fmt.Errorf("record size %v exceeds %v", recordSize, maxRecordSize) + } + + return recordSize, nil +} + +// FuncOffsets is a sorted slice of FuncOffset. +type FuncOffsets []FuncOffset + +// The size of a FuncInfo in BTF wire format. +var FuncInfoSize = uint32(binary.Size(bpfFuncInfo{})) + +// FuncOffset represents a [btf.Func] and its raw instruction offset within a +// BPF program. +type FuncOffset struct { + Offset asm.RawInstructionOffset + Func *Func +} + +type bpfFuncInfo struct { + // Instruction offset of the function within an ELF section. + InsnOff uint32 + TypeID TypeID +} + +func newFuncOffset(fi bpfFuncInfo, spec *Spec) (*FuncOffset, error) { + typ, err := spec.TypeByID(fi.TypeID) + if err != nil { + return nil, err + } + + fn, ok := typ.(*Func) + if !ok { + return nil, fmt.Errorf("type ID %d is a %T, but expected a Func", fi.TypeID, typ) + } + + // C doesn't have anonymous functions, but check just in case. + if fn.Name == "" { + return nil, fmt.Errorf("func with type ID %d doesn't have a name", fi.TypeID) + } + + return &FuncOffset{ + asm.RawInstructionOffset(fi.InsnOff), + fn, + }, nil +} + +func newFuncOffsets(bfis []bpfFuncInfo, spec *Spec) (FuncOffsets, error) { + fos := make(FuncOffsets, 0, len(bfis)) + + for _, bfi := range bfis { + fi, err := newFuncOffset(bfi, spec) + if err != nil { + return FuncOffsets{}, fmt.Errorf("offset %d: %w", bfi.InsnOff, err) + } + fos = append(fos, *fi) + } + sort.Slice(fos, func(i, j int) bool { + return fos[i].Offset <= fos[j].Offset + }) + return fos, nil +} + +// LoadFuncInfos parses BTF func info from kernel wire format into a +// [FuncOffsets], a sorted slice of [btf.Func]s of (sub)programs within a BPF +// program with their corresponding raw instruction offsets. +func LoadFuncInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (FuncOffsets, error) { + fis, err := parseFuncInfoRecords( + reader, + bo, + FuncInfoSize, + recordNum, + false, + ) + if err != nil { + return FuncOffsets{}, fmt.Errorf("parsing BTF func info: %w", err) + } + + return newFuncOffsets(fis, spec) +} + +// marshal into the BTF wire format. +func (fi *FuncOffset) marshal(w *bytes.Buffer, b *Builder) error { + id, err := b.Add(fi.Func) + if err != nil { + return err + } + bfi := bpfFuncInfo{ + InsnOff: uint32(fi.Offset), + TypeID: id, + } + buf := make([]byte, FuncInfoSize) + internal.NativeEndian.PutUint32(buf, bfi.InsnOff) + internal.NativeEndian.PutUint32(buf[4:], uint32(bfi.TypeID)) + _, err = w.Write(buf) + return err +} + +// parseFuncInfos parses a func_info sub-section within .BTF.ext ito a map of +// func infos indexed by section name. +func parseFuncInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfFuncInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfFuncInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseFuncInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseFuncInfoRecords parses a stream of func_infos into a funcInfos. +// These records appear after a btf_ext_info_sec header in the func_info +// sub-section of .BTF.ext. +func parseFuncInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfFuncInfo, error) { + var out []bpfFuncInfo + var fi bpfFuncInfo + + if exp, got := FuncInfoSize, recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected FuncInfo record size %d, but BTF blob contains %d", exp, got) + } + + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &fi); err != nil { + return nil, fmt.Errorf("can't read function info: %v", err) + } + + if offsetInBytes { + if fi.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", fi.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + fi.InsnOff /= asm.InstructionSize + } + + out = append(out, fi) + } + + return out, nil +} + +var LineInfoSize = uint32(binary.Size(bpfLineInfo{})) + +// Line represents the location and contents of a single line of source +// code a BPF ELF was compiled from. +type Line struct { + fileName string + line string + lineNumber uint32 + lineColumn uint32 +} + +func (li *Line) FileName() string { + return li.fileName +} + +func (li *Line) Line() string { + return li.line +} + +func (li *Line) LineNumber() uint32 { + return li.lineNumber +} + +func (li *Line) LineColumn() uint32 { + return li.lineColumn +} + +func (li *Line) String() string { + return li.line +} + +// LineOffsets contains a sorted list of line infos. +type LineOffsets []LineOffset + +// LineOffset represents a line info and its raw instruction offset. +type LineOffset struct { + Offset asm.RawInstructionOffset + Line *Line +} + +// Constants for the format of bpfLineInfo.LineCol. +const ( + bpfLineShift = 10 + bpfLineMax = (1 << (32 - bpfLineShift)) - 1 + bpfColumnMax = (1 << bpfLineShift) - 1 +) + +type bpfLineInfo struct { + // Instruction offset of the line within the whole instruction stream, in instructions. + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +// LoadLineInfos parses BTF line info in kernel wire format. +func LoadLineInfos(reader io.Reader, bo binary.ByteOrder, recordNum uint32, spec *Spec) (LineOffsets, error) { + lis, err := parseLineInfoRecords( + reader, + bo, + LineInfoSize, + recordNum, + false, + ) + if err != nil { + return LineOffsets{}, fmt.Errorf("parsing BTF line info: %w", err) + } + + return newLineInfos(lis, spec.strings) +} + +func newLineInfo(li bpfLineInfo, strings *stringTable) (LineOffset, error) { + line, err := strings.Lookup(li.LineOff) + if err != nil { + return LineOffset{}, fmt.Errorf("lookup of line: %w", err) + } + + fileName, err := strings.Lookup(li.FileNameOff) + if err != nil { + return LineOffset{}, fmt.Errorf("lookup of filename: %w", err) + } + + lineNumber := li.LineCol >> bpfLineShift + lineColumn := li.LineCol & bpfColumnMax + + return LineOffset{ + asm.RawInstructionOffset(li.InsnOff), + &Line{ + fileName, + line, + lineNumber, + lineColumn, + }, + }, nil +} + +func newLineInfos(blis []bpfLineInfo, strings *stringTable) (LineOffsets, error) { + lis := make([]LineOffset, 0, len(blis)) + for _, bli := range blis { + li, err := newLineInfo(bli, strings) + if err != nil { + return LineOffsets{}, fmt.Errorf("offset %d: %w", bli.InsnOff, err) + } + lis = append(lis, li) + } + sort.Slice(lis, func(i, j int) bool { + return lis[i].Offset <= lis[j].Offset + }) + return lis, nil +} + +// marshal writes the binary representation of the LineInfo to w. +func (li *LineOffset) marshal(w *bytes.Buffer, b *Builder) error { + line := li.Line + if line.lineNumber > bpfLineMax { + return fmt.Errorf("line %d exceeds %d", line.lineNumber, bpfLineMax) + } + + if line.lineColumn > bpfColumnMax { + return fmt.Errorf("column %d exceeds %d", line.lineColumn, bpfColumnMax) + } + + fileNameOff, err := b.addString(line.fileName) + if err != nil { + return fmt.Errorf("file name %q: %w", line.fileName, err) + } + + lineOff, err := b.addString(line.line) + if err != nil { + return fmt.Errorf("line %q: %w", line.line, err) + } + + bli := bpfLineInfo{ + uint32(li.Offset), + fileNameOff, + lineOff, + (line.lineNumber << bpfLineShift) | line.lineColumn, + } + + buf := make([]byte, LineInfoSize) + internal.NativeEndian.PutUint32(buf, bli.InsnOff) + internal.NativeEndian.PutUint32(buf[4:], bli.FileNameOff) + internal.NativeEndian.PutUint32(buf[8:], bli.LineOff) + internal.NativeEndian.PutUint32(buf[12:], bli.LineCol) + _, err = w.Write(buf) + return err +} + +// parseLineInfos parses a line_info sub-section within .BTF.ext ito a map of +// line infos indexed by section name. +func parseLineInfos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfLineInfo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + result := make(map[string][]bpfLineInfo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseLineInfoRecords(r, bo, recordSize, infoHeader.NumInfo, true) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseLineInfoRecords parses a stream of line_infos into a lineInfos. +// These records appear after a btf_ext_info_sec header in the line_info +// sub-section of .BTF.ext. +func parseLineInfoRecords(r io.Reader, bo binary.ByteOrder, recordSize uint32, recordNum uint32, offsetInBytes bool) ([]bpfLineInfo, error) { + var li bpfLineInfo + + if exp, got := uint32(binary.Size(li)), recordSize; exp != got { + // BTF blob's record size is longer than we know how to parse. + return nil, fmt.Errorf("expected LineInfo record size %d, but BTF blob contains %d", exp, got) + } + + out := make([]bpfLineInfo, 0, recordNum) + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &li); err != nil { + return nil, fmt.Errorf("can't read line info: %v", err) + } + + if offsetInBytes { + if li.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", li.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + li.InsnOff /= asm.InstructionSize + } + + out = append(out, li) + } + + return out, nil +} + +// bpfCORERelo matches the kernel's struct bpf_core_relo. +type bpfCORERelo struct { + InsnOff uint32 + TypeID TypeID + AccessStrOff uint32 + Kind coreKind +} + +type CORERelocation struct { + // The local type of the relocation, stripped of typedefs and qualifiers. + typ Type + accessor coreAccessor + kind coreKind + // The ID of the local type in the source BTF. + id TypeID +} + +func (cr *CORERelocation) String() string { + return fmt.Sprintf("CORERelocation(%s, %s[%s], local_id=%d)", cr.kind, cr.typ, cr.accessor, cr.id) +} + +func CORERelocationMetadata(ins *asm.Instruction) *CORERelocation { + relo, _ := ins.Metadata.Get(coreRelocationMeta{}).(*CORERelocation) + return relo +} + +// CORERelocationInfos contains a sorted list of co:re relocation infos. +type CORERelocationInfos struct { + infos []coreRelocationInfo +} + +type coreRelocationInfo struct { + relo *CORERelocation + offset asm.RawInstructionOffset +} + +func newRelocationInfo(relo bpfCORERelo, spec *Spec, strings *stringTable) (*coreRelocationInfo, error) { + typ, err := spec.TypeByID(relo.TypeID) + if err != nil { + return nil, err + } + + accessorStr, err := strings.Lookup(relo.AccessStrOff) + if err != nil { + return nil, err + } + + accessor, err := parseCOREAccessor(accessorStr) + if err != nil { + return nil, fmt.Errorf("accessor %q: %s", accessorStr, err) + } + + return &coreRelocationInfo{ + &CORERelocation{ + typ, + accessor, + relo.Kind, + relo.TypeID, + }, + asm.RawInstructionOffset(relo.InsnOff), + }, nil +} + +func newRelocationInfos(brs []bpfCORERelo, spec *Spec, strings *stringTable) (CORERelocationInfos, error) { + rs := CORERelocationInfos{ + infos: make([]coreRelocationInfo, 0, len(brs)), + } + for _, br := range brs { + relo, err := newRelocationInfo(br, spec, strings) + if err != nil { + return CORERelocationInfos{}, fmt.Errorf("offset %d: %w", br.InsnOff, err) + } + rs.infos = append(rs.infos, *relo) + } + sort.Slice(rs.infos, func(i, j int) bool { + return rs.infos[i].offset < rs.infos[j].offset + }) + return rs, nil +} + +var extInfoReloSize = binary.Size(bpfCORERelo{}) + +// parseCORERelos parses a core_relos sub-section within .BTF.ext ito a map of +// CO-RE relocations indexed by section name. +func parseCORERelos(r io.Reader, bo binary.ByteOrder, strings *stringTable) (map[string][]bpfCORERelo, error) { + recordSize, err := parseExtInfoRecordSize(r, bo) + if err != nil { + return nil, err + } + + if recordSize != uint32(extInfoReloSize) { + return nil, fmt.Errorf("expected record size %d, got %d", extInfoReloSize, recordSize) + } + + result := make(map[string][]bpfCORERelo) + for { + secName, infoHeader, err := parseExtInfoSec(r, bo, strings) + if errors.Is(err, io.EOF) { + return result, nil + } + if err != nil { + return nil, err + } + + records, err := parseCOREReloRecords(r, bo, infoHeader.NumInfo) + if err != nil { + return nil, fmt.Errorf("section %v: %w", secName, err) + } + + result[secName] = records + } +} + +// parseCOREReloRecords parses a stream of CO-RE relocation entries into a +// coreRelos. These records appear after a btf_ext_info_sec header in the +// core_relos sub-section of .BTF.ext. +func parseCOREReloRecords(r io.Reader, bo binary.ByteOrder, recordNum uint32) ([]bpfCORERelo, error) { + var out []bpfCORERelo + + var relo bpfCORERelo + for i := uint32(0); i < recordNum; i++ { + if err := binary.Read(r, bo, &relo); err != nil { + return nil, fmt.Errorf("can't read CO-RE relocation: %v", err) + } + + if relo.InsnOff%asm.InstructionSize != 0 { + return nil, fmt.Errorf("offset %v is not aligned with instruction size", relo.InsnOff) + } + + // ELF tracks offset in bytes, the kernel expects raw BPF instructions. + // Convert as early as possible. + relo.InsnOff /= asm.InstructionSize + + out = append(out, relo) + } + + return out, nil +} diff --git a/vendor/github.com/cilium/ebpf/btf/feature.go b/vendor/github.com/cilium/ebpf/btf/feature.go new file mode 100644 index 0000000000..e71c707fe4 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/feature.go @@ -0,0 +1,158 @@ +package btf + +import ( + "errors" + "math" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// haveBTF attempts to load a BTF blob containing an Int. It should pass on any +// kernel that supports BPF_BTF_LOAD. +var haveBTF = internal.NewFeatureTest("BTF", func() error { + // 0-length anonymous integer + err := probeBTF(&Int{}) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + return internal.ErrNotSupported + } + return err +}, "4.18") + +// haveMapBTF attempts to load a minimal BTF blob containing a Var. It is +// used as a proxy for .bss, .data and .rodata map support, which generally +// come with a Var and Datasec. These were introduced in Linux 5.2. +var haveMapBTF = internal.NewFeatureTest("Map BTF (Var/Datasec)", func() error { + if err := haveBTF(); err != nil { + return err + } + + v := &Var{ + Name: "a", + Type: &Pointer{(*Void)(nil)}, + } + + err := probeBTF(v) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + // Treat both EINVAL and EPERM as not supported: creating the map may still + // succeed without Btf* attrs. + return internal.ErrNotSupported + } + return err +}, "5.2") + +// haveProgBTF attempts to load a BTF blob containing a Func and FuncProto. It +// is used as a proxy for ext_info (func_info) support, which depends on +// Func(Proto) by definition. +var haveProgBTF = internal.NewFeatureTest("Program BTF (func/line_info)", func() error { + if err := haveBTF(); err != nil { + return err + } + + fn := &Func{ + Name: "a", + Type: &FuncProto{Return: (*Void)(nil)}, + } + + err := probeBTF(fn) + if errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM) { + return internal.ErrNotSupported + } + return err +}, "5.0") + +var haveFuncLinkage = internal.NewFeatureTest("BTF func linkage", func() error { + if err := haveProgBTF(); err != nil { + return err + } + + fn := &Func{ + Name: "a", + Type: &FuncProto{Return: (*Void)(nil)}, + Linkage: GlobalFunc, + } + + err := probeBTF(fn) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.6") + +var haveDeclTags = internal.NewFeatureTest("BTF decl tags", func() error { + if err := haveBTF(); err != nil { + return err + } + + t := &Typedef{ + Name: "a", + Type: &Int{}, + Tags: []string{"a"}, + } + + err := probeBTF(t) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.16") + +var haveTypeTags = internal.NewFeatureTest("BTF type tags", func() error { + if err := haveBTF(); err != nil { + return err + } + + t := &TypeTag{ + Type: &Int{}, + Value: "a", + } + + err := probeBTF(t) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "5.17") + +var haveEnum64 = internal.NewFeatureTest("ENUM64", func() error { + if err := haveBTF(); err != nil { + return err + } + + enum := &Enum{ + Size: 8, + Values: []EnumValue{ + {"TEST", math.MaxUint32 + 1}, + }, + } + + err := probeBTF(enum) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + return err +}, "6.0") + +func probeBTF(typ Type) error { + b, err := NewBuilder([]Type{typ}) + if err != nil { + return err + } + + buf, err := b.Marshal(nil, nil) + if err != nil { + return err + } + + fd, err := sys.BtfLoad(&sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(buf), + BtfSize: uint32(len(buf)), + }) + + if err == nil { + fd.Close() + } + + return err +} diff --git a/vendor/github.com/cilium/ebpf/btf/format.go b/vendor/github.com/cilium/ebpf/btf/format.go new file mode 100644 index 0000000000..3e0dedaa2b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/format.go @@ -0,0 +1,353 @@ +package btf + +import ( + "errors" + "fmt" + "strings" +) + +var errNestedTooDeep = errors.New("nested too deep") + +// GoFormatter converts a Type to Go syntax. +// +// A zero GoFormatter is valid to use. +type GoFormatter struct { + w strings.Builder + + // Types present in this map are referred to using the given name if they + // are encountered when outputting another type. + Names map[Type]string + + // Identifier is called for each field of struct-like types. By default the + // field name is used as is. + Identifier func(string) string + + // EnumIdentifier is called for each element of an enum. By default the + // name of the enum type is concatenated with Identifier(element). + EnumIdentifier func(name, element string) string +} + +// TypeDeclaration generates a Go type declaration for a BTF type. +func (gf *GoFormatter) TypeDeclaration(name string, typ Type) (string, error) { + gf.w.Reset() + if err := gf.writeTypeDecl(name, typ); err != nil { + return "", err + } + return gf.w.String(), nil +} + +func (gf *GoFormatter) identifier(s string) string { + if gf.Identifier != nil { + return gf.Identifier(s) + } + + return s +} + +func (gf *GoFormatter) enumIdentifier(name, element string) string { + if gf.EnumIdentifier != nil { + return gf.EnumIdentifier(name, element) + } + + return name + gf.identifier(element) +} + +// writeTypeDecl outputs a declaration of the given type. +// +// It encodes https://golang.org/ref/spec#Type_declarations: +// +// type foo struct { bar uint32; } +// type bar int32 +func (gf *GoFormatter) writeTypeDecl(name string, typ Type) error { + if name == "" { + return fmt.Errorf("need a name for type %s", typ) + } + + typ = skipQualifiers(typ) + fmt.Fprintf(&gf.w, "type %s ", name) + if err := gf.writeTypeLit(typ, 0); err != nil { + return err + } + + e, ok := typ.(*Enum) + if !ok || len(e.Values) == 0 { + return nil + } + + gf.w.WriteString("; const ( ") + for _, ev := range e.Values { + id := gf.enumIdentifier(name, ev.Name) + var value any + if e.Signed { + value = int64(ev.Value) + } else { + value = ev.Value + } + fmt.Fprintf(&gf.w, "%s %s = %d; ", id, name, value) + } + gf.w.WriteString(")") + + return nil +} + +// writeType outputs the name of a named type or a literal describing the type. +// +// It encodes https://golang.org/ref/spec#Types. +// +// foo (if foo is a named type) +// uint32 +func (gf *GoFormatter) writeType(typ Type, depth int) error { + typ = skipQualifiers(typ) + + name := gf.Names[typ] + if name != "" { + gf.w.WriteString(name) + return nil + } + + return gf.writeTypeLit(typ, depth) +} + +// writeTypeLit outputs a literal describing the type. +// +// The function ignores named types. +// +// It encodes https://golang.org/ref/spec#TypeLit. +// +// struct { bar uint32; } +// uint32 +func (gf *GoFormatter) writeTypeLit(typ Type, depth int) error { + depth++ + if depth > maxResolveDepth { + return errNestedTooDeep + } + + var err error + switch v := skipQualifiers(typ).(type) { + case *Int: + err = gf.writeIntLit(v) + + case *Enum: + if !v.Signed { + gf.w.WriteRune('u') + } + switch v.Size { + case 1: + gf.w.WriteString("int8") + case 2: + gf.w.WriteString("int16") + case 4: + gf.w.WriteString("int32") + case 8: + gf.w.WriteString("int64") + default: + err = fmt.Errorf("invalid enum size %d", v.Size) + } + + case *Typedef: + err = gf.writeType(v.Type, depth) + + case *Array: + fmt.Fprintf(&gf.w, "[%d]", v.Nelems) + err = gf.writeType(v.Type, depth) + + case *Struct: + err = gf.writeStructLit(v.Size, v.Members, depth) + + case *Union: + // Always choose the first member to represent the union in Go. + err = gf.writeStructLit(v.Size, v.Members[:1], depth) + + case *Datasec: + err = gf.writeDatasecLit(v, depth) + + case *Var: + err = gf.writeTypeLit(v.Type, depth) + + default: + return fmt.Errorf("type %T: %w", v, ErrNotSupported) + } + + if err != nil { + return fmt.Errorf("%s: %w", typ, err) + } + + return nil +} + +func (gf *GoFormatter) writeIntLit(i *Int) error { + bits := i.Size * 8 + switch i.Encoding { + case Bool: + if i.Size != 1 { + return fmt.Errorf("bool with size %d", i.Size) + } + gf.w.WriteString("bool") + case Char: + if i.Size != 1 { + return fmt.Errorf("char with size %d", i.Size) + } + // BTF doesn't have a way to specify the signedness of a char. Assume + // we are dealing with unsigned, since this works nicely with []byte + // in Go code. + fallthrough + case Unsigned, Signed: + stem := "uint" + if i.Encoding == Signed { + stem = "int" + } + if i.Size > 8 { + fmt.Fprintf(&gf.w, "[%d]byte /* %s%d */", i.Size, stem, i.Size*8) + } else { + fmt.Fprintf(&gf.w, "%s%d", stem, bits) + } + default: + return fmt.Errorf("can't encode %s", i.Encoding) + } + return nil +} + +func (gf *GoFormatter) writeStructLit(size uint32, members []Member, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + skippedBitfield := false + for i, m := range members { + if m.BitfieldSize > 0 { + skippedBitfield = true + continue + } + + offset := m.Offset.Bytes() + if n := offset - prevOffset; skippedBitfield && n > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte /* unsupported bitfield */; ", n) + } else { + gf.writePadding(n) + } + + fieldSize, err := Sizeof(m.Type) + if err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + + prevOffset = offset + uint32(fieldSize) + if prevOffset > size { + return fmt.Errorf("field %d of size %d exceeds type size %d", i, fieldSize, size) + } + + if err := gf.writeStructField(m, depth); err != nil { + return fmt.Errorf("field %d: %w", i, err) + } + } + + gf.writePadding(size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writeStructField(m Member, depth int) error { + if m.BitfieldSize > 0 { + return fmt.Errorf("bitfields are not supported") + } + if m.Offset%8 != 0 { + return fmt.Errorf("unsupported offset %d", m.Offset) + } + + if m.Name == "" { + // Special case a nested anonymous union like + // struct foo { union { int bar; int baz }; } + // by replacing the whole union with its first member. + union, ok := m.Type.(*Union) + if !ok { + return fmt.Errorf("anonymous fields are not supported") + + } + + if len(union.Members) == 0 { + return errors.New("empty anonymous union") + } + + depth++ + if depth > maxResolveDepth { + return errNestedTooDeep + } + + m := union.Members[0] + size, err := Sizeof(m.Type) + if err != nil { + return err + } + + if err := gf.writeStructField(m, depth); err != nil { + return err + } + + gf.writePadding(union.Size - uint32(size)) + return nil + + } + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(m.Name)) + + if err := gf.writeType(m.Type, depth); err != nil { + return err + } + + gf.w.WriteString("; ") + return nil +} + +func (gf *GoFormatter) writeDatasecLit(ds *Datasec, depth int) error { + gf.w.WriteString("struct { ") + + prevOffset := uint32(0) + for i, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + return fmt.Errorf("can't format %s as part of data section", vsi.Type) + } + + if v.Linkage != GlobalVar { + // Ignore static, extern, etc. for now. + continue + } + + if v.Name == "" { + return fmt.Errorf("variable %d: empty name", i) + } + + gf.writePadding(vsi.Offset - prevOffset) + prevOffset = vsi.Offset + vsi.Size + + fmt.Fprintf(&gf.w, "%s ", gf.identifier(v.Name)) + + if err := gf.writeType(v.Type, depth); err != nil { + return fmt.Errorf("variable %d: %w", i, err) + } + + gf.w.WriteString("; ") + } + + gf.writePadding(ds.Size - prevOffset) + gf.w.WriteString("}") + return nil +} + +func (gf *GoFormatter) writePadding(bytes uint32) { + if bytes > 0 { + fmt.Fprintf(&gf.w, "_ [%d]byte; ", bytes) + } +} + +func skipQualifiers(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} diff --git a/vendor/github.com/cilium/ebpf/btf/handle.go b/vendor/github.com/cilium/ebpf/btf/handle.go new file mode 100644 index 0000000000..adfa6fed4b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/handle.go @@ -0,0 +1,317 @@ +package btf + +import ( + "bytes" + "errors" + "fmt" + "math" + "os" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// Handle is a reference to BTF loaded into the kernel. +type Handle struct { + fd *sys.FD + + // Size of the raw BTF in bytes. + size uint32 + + needsKernelBase bool +} + +// NewHandle loads the contents of a [Builder] into the kernel. +// +// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF. +func NewHandle(b *Builder) (*Handle, error) { + small := getByteSlice() + defer putByteSlice(small) + + buf, err := b.Marshal(*small, KernelMarshalOptions()) + if err != nil { + return nil, fmt.Errorf("marshal BTF: %w", err) + } + + return NewHandleFromRawBTF(buf) +} + +// NewHandleFromRawBTF loads raw BTF into the kernel. +// +// Returns an error wrapping ErrNotSupported if the kernel doesn't support BTF. +func NewHandleFromRawBTF(btf []byte) (*Handle, error) { + const minLogSize = 64 * 1024 + + if uint64(len(btf)) > math.MaxUint32 { + return nil, errors.New("BTF exceeds the maximum size") + } + + attr := &sys.BtfLoadAttr{ + Btf: sys.NewSlicePointer(btf), + BtfSize: uint32(len(btf)), + } + + var ( + logBuf []byte + err error + ) + for { + var fd *sys.FD + fd, err = sys.BtfLoad(attr) + if err == nil { + return &Handle{fd, attr.BtfSize, false}, nil + } + + if attr.BtfLogTrueSize != 0 && attr.BtfLogSize >= attr.BtfLogTrueSize { + // The log buffer already has the correct size. + break + } + + if attr.BtfLogSize != 0 && !errors.Is(err, unix.ENOSPC) { + // Up until at least kernel 6.0, the BTF verifier does not return ENOSPC + // if there are other verification errors. ENOSPC is only returned when + // the BTF blob is correct, a log was requested, and the provided buffer + // is too small. We're therefore not sure whether we got the full + // log or not. + break + } + + // Make an educated guess how large the buffer should be. Start + // at a reasonable minimum and then double the size. + logSize := uint32(max(len(logBuf)*2, minLogSize)) + if int(logSize) < len(logBuf) { + return nil, errors.New("overflow while probing log buffer size") + } + + if attr.BtfLogTrueSize != 0 { + // The kernel has given us a hint how large the log buffer has to be. + logSize = attr.BtfLogTrueSize + } + + logBuf = make([]byte, logSize) + attr.BtfLogSize = logSize + attr.BtfLogBuf = sys.NewSlicePointer(logBuf) + attr.BtfLogLevel = 1 + } + + if err := haveBTF(); err != nil { + return nil, err + } + + return nil, internal.ErrorWithLog("load btf", err, logBuf) +} + +// NewHandleFromID returns the BTF handle for a given id. +// +// Prefer calling [ebpf.Program.Handle] or [ebpf.Map.Handle] if possible. +// +// Returns ErrNotExist, if there is no BTF with the given id. +// +// Requires CAP_SYS_ADMIN. +func NewHandleFromID(id ID) (*Handle, error) { + fd, err := sys.BtfGetFdById(&sys.BtfGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get FD for ID %d: %w", id, err) + } + + info, err := newHandleInfoFromFD(fd) + if err != nil { + _ = fd.Close() + return nil, err + } + + return &Handle{fd, info.size, info.IsModule()}, nil +} + +// Spec parses the kernel BTF into Go types. +// +// base must contain type information for vmlinux if the handle is for +// a kernel module. It may be nil otherwise. +func (h *Handle) Spec(base *Spec) (*Spec, error) { + var btfInfo sys.BtfInfo + btfBuffer := make([]byte, h.size) + btfInfo.Btf, btfInfo.BtfSize = sys.NewSlicePointerLen(btfBuffer) + + if err := sys.ObjInfo(h.fd, &btfInfo); err != nil { + return nil, err + } + + if h.needsKernelBase && base == nil { + return nil, fmt.Errorf("missing base types") + } + + return loadRawSpec(bytes.NewReader(btfBuffer), internal.NativeEndian, base) +} + +// Close destroys the handle. +// +// Subsequent calls to FD will return an invalid value. +func (h *Handle) Close() error { + if h == nil { + return nil + } + + return h.fd.Close() +} + +// FD returns the file descriptor for the handle. +func (h *Handle) FD() int { + return h.fd.Int() +} + +// Info returns metadata about the handle. +func (h *Handle) Info() (*HandleInfo, error) { + return newHandleInfoFromFD(h.fd) +} + +// HandleInfo describes a Handle. +type HandleInfo struct { + // ID of this handle in the kernel. The ID is only valid as long as the + // associated handle is kept alive. + ID ID + + // Name is an identifying name for the BTF, currently only used by the + // kernel. + Name string + + // IsKernel is true if the BTF originated with the kernel and not + // userspace. + IsKernel bool + + // Size of the raw BTF in bytes. + size uint32 +} + +func newHandleInfoFromFD(fd *sys.FD) (*HandleInfo, error) { + // We invoke the syscall once with a empty BTF and name buffers to get size + // information to allocate buffers. Then we invoke it a second time with + // buffers to receive the data. + var btfInfo sys.BtfInfo + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, fmt.Errorf("get BTF info for fd %s: %w", fd, err) + } + + if btfInfo.NameLen > 0 { + // NameLen doesn't account for the terminating NUL. + btfInfo.NameLen++ + } + + // Don't pull raw BTF by default, since it may be quite large. + btfSize := btfInfo.BtfSize + btfInfo.BtfSize = 0 + + nameBuffer := make([]byte, btfInfo.NameLen) + btfInfo.Name, btfInfo.NameLen = sys.NewSlicePointerLen(nameBuffer) + if err := sys.ObjInfo(fd, &btfInfo); err != nil { + return nil, err + } + + return &HandleInfo{ + ID: ID(btfInfo.Id), + Name: unix.ByteSliceToString(nameBuffer), + IsKernel: btfInfo.KernelBtf != 0, + size: btfSize, + }, nil +} + +// IsVmlinux returns true if the BTF is for the kernel itself. +func (i *HandleInfo) IsVmlinux() bool { + return i.IsKernel && i.Name == "vmlinux" +} + +// IsModule returns true if the BTF is for a kernel module. +func (i *HandleInfo) IsModule() bool { + return i.IsKernel && i.Name != "vmlinux" +} + +// HandleIterator allows enumerating BTF blobs loaded into the kernel. +type HandleIterator struct { + // The ID of the current handle. Only valid after a call to Next. + ID ID + // The current Handle. Only valid until a call to Next. + // See Take if you want to retain the handle. + Handle *Handle + err error +} + +// Next retrieves a handle for the next BTF object. +// +// Returns true if another BTF object was found. Call [HandleIterator.Err] after +// the function returns false. +func (it *HandleIterator) Next() bool { + id := it.ID + for { + attr := &sys.BtfGetNextIdAttr{Id: id} + err := sys.BtfGetNextId(attr) + if errors.Is(err, os.ErrNotExist) { + // There are no more BTF objects. + break + } else if err != nil { + it.err = fmt.Errorf("get next BTF ID: %w", err) + break + } + + id = attr.NextId + handle, err := NewHandleFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Try again with the next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("retrieve handle for ID %d: %w", id, err) + break + } + + it.Handle.Close() + it.ID, it.Handle = id, handle + return true + } + + // No more handles or we encountered an error. + it.Handle.Close() + it.Handle = nil + return false +} + +// Take the ownership of the current handle. +// +// It's the callers responsibility to close the handle. +func (it *HandleIterator) Take() *Handle { + handle := it.Handle + it.Handle = nil + return handle +} + +// Err returns an error if iteration failed for some reason. +func (it *HandleIterator) Err() error { + return it.err +} + +// FindHandle returns the first handle for which predicate returns true. +// +// Requires CAP_SYS_ADMIN. +// +// Returns an error wrapping ErrNotFound if predicate never returns true or if +// there is no BTF loaded into the kernel. +func FindHandle(predicate func(info *HandleInfo) bool) (*Handle, error) { + it := new(HandleIterator) + defer it.Handle.Close() + + for it.Next() { + info, err := it.Handle.Info() + if err != nil { + return nil, fmt.Errorf("info for ID %d: %w", it.ID, err) + } + + if predicate(info) { + return it.Take(), nil + } + } + if err := it.Err(); err != nil { + return nil, fmt.Errorf("iterate handles: %w", err) + } + + return nil, fmt.Errorf("find handle: %w", ErrNotFound) +} diff --git a/vendor/github.com/cilium/ebpf/btf/kernel.go b/vendor/github.com/cilium/ebpf/btf/kernel.go new file mode 100644 index 0000000000..1a9321f054 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/kernel.go @@ -0,0 +1,157 @@ +package btf + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" +) + +var kernelBTF = struct { + sync.RWMutex + kernel *Spec + modules map[string]*Spec +}{ + modules: make(map[string]*Spec), +} + +// FlushKernelSpec removes any cached kernel type information. +func FlushKernelSpec() { + kernelBTF.Lock() + defer kernelBTF.Unlock() + + kernelBTF.kernel = nil + kernelBTF.modules = make(map[string]*Spec) +} + +// LoadKernelSpec returns the current kernel's BTF information. +// +// Defaults to /sys/kernel/btf/vmlinux and falls back to scanning the file system +// for vmlinux ELFs. Returns an error wrapping ErrNotSupported if BTF is not enabled. +func LoadKernelSpec() (*Spec, error) { + kernelBTF.RLock() + spec := kernelBTF.kernel + kernelBTF.RUnlock() + + if spec == nil { + kernelBTF.Lock() + defer kernelBTF.Unlock() + + spec = kernelBTF.kernel + } + + if spec != nil { + return spec.Copy(), nil + } + + spec, _, err := loadKernelSpec() + if err != nil { + return nil, err + } + + kernelBTF.kernel = spec + return spec.Copy(), nil +} + +// LoadKernelModuleSpec returns the BTF information for the named kernel module. +// +// Defaults to /sys/kernel/btf/. +// Returns an error wrapping ErrNotSupported if BTF is not enabled. +// Returns an error wrapping fs.ErrNotExist if BTF for the specific module doesn't exist. +func LoadKernelModuleSpec(module string) (*Spec, error) { + kernelBTF.RLock() + spec := kernelBTF.modules[module] + kernelBTF.RUnlock() + + if spec != nil { + return spec.Copy(), nil + } + + base, err := LoadKernelSpec() + if err != nil { + return nil, fmt.Errorf("load kernel spec: %w", err) + } + + kernelBTF.Lock() + defer kernelBTF.Unlock() + + if spec = kernelBTF.modules[module]; spec != nil { + return spec.Copy(), nil + } + + spec, err = loadKernelModuleSpec(module, base) + if err != nil { + return nil, err + } + + kernelBTF.modules[module] = spec + return spec.Copy(), nil +} + +func loadKernelSpec() (_ *Spec, fallback bool, _ error) { + fh, err := os.Open("/sys/kernel/btf/vmlinux") + if err == nil { + defer fh.Close() + + spec, err := loadRawSpec(fh, internal.NativeEndian, nil) + return spec, false, err + } + + file, err := findVMLinux() + if err != nil { + return nil, false, err + } + defer file.Close() + + spec, err := LoadSpecFromReader(file) + return spec, true, err +} + +func loadKernelModuleSpec(module string, base *Spec) (*Spec, error) { + dir, file := filepath.Split(module) + if dir != "" || filepath.Ext(file) != "" { + return nil, fmt.Errorf("invalid module name %q", module) + } + + fh, err := os.Open(filepath.Join("/sys/kernel/btf", module)) + if err != nil { + return nil, err + } + defer fh.Close() + + return loadRawSpec(fh, internal.NativeEndian, base) +} + +// findVMLinux scans multiple well-known paths for vmlinux kernel images. +func findVMLinux() (*os.File, error) { + release, err := linux.KernelRelease() + if err != nil { + return nil, err + } + + // use same list of locations as libbpf + // https://github.com/libbpf/libbpf/blob/9a3a42608dbe3731256a5682a125ac1e23bced8f/src/btf.c#L3114-L3122 + locations := []string{ + "/boot/vmlinux-%s", + "/lib/modules/%s/vmlinux-%[1]s", + "/lib/modules/%s/build/vmlinux", + "/usr/lib/modules/%s/kernel/vmlinux", + "/usr/lib/debug/boot/vmlinux-%s", + "/usr/lib/debug/boot/vmlinux-%s.debug", + "/usr/lib/debug/lib/modules/%s/vmlinux", + } + + for _, loc := range locations { + file, err := os.Open(fmt.Sprintf(loc, release)) + if errors.Is(err, os.ErrNotExist) { + continue + } + return file, err + } + + return nil, fmt.Errorf("no BTF found for kernel version %s: %w", release, internal.ErrNotSupported) +} diff --git a/vendor/github.com/cilium/ebpf/btf/marshal.go b/vendor/github.com/cilium/ebpf/btf/marshal.go new file mode 100644 index 0000000000..d7204e6247 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/marshal.go @@ -0,0 +1,654 @@ +package btf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "maps" + "math" + "slices" + "sync" + + "github.com/cilium/ebpf/internal" +) + +type MarshalOptions struct { + // Target byte order. Defaults to the system's native endianness. + Order binary.ByteOrder + // Remove function linkage information for compatibility with <5.6 kernels. + StripFuncLinkage bool + // Replace decl tags with a placeholder for compatibility with <5.16 kernels. + ReplaceDeclTags bool + // Replace TypeTags with a placeholder for compatibility with <5.17 kernels. + ReplaceTypeTags bool + // Replace Enum64 with a placeholder for compatibility with <6.0 kernels. + ReplaceEnum64 bool + // Prevent the "No type found" error when loading BTF without any types. + PreventNoTypeFound bool +} + +// KernelMarshalOptions will generate BTF suitable for the current kernel. +func KernelMarshalOptions() *MarshalOptions { + return &MarshalOptions{ + Order: internal.NativeEndian, + StripFuncLinkage: haveFuncLinkage() != nil, + ReplaceDeclTags: haveDeclTags() != nil, + ReplaceTypeTags: haveTypeTags() != nil, + ReplaceEnum64: haveEnum64() != nil, + PreventNoTypeFound: true, // All current kernels require this. + } +} + +// encoder turns Types into raw BTF. +type encoder struct { + MarshalOptions + + pending internal.Deque[Type] + buf *bytes.Buffer + strings *stringTableBuilder + ids map[Type]TypeID + visited map[Type]struct{} + lastID TypeID +} + +var bufferPool = sync.Pool{ + New: func() any { + buf := make([]byte, btfHeaderLen+128) + return &buf + }, +} + +func getByteSlice() *[]byte { + return bufferPool.Get().(*[]byte) +} + +func putByteSlice(buf *[]byte) { + *buf = (*buf)[:0] + bufferPool.Put(buf) +} + +// Builder turns Types into raw BTF. +// +// The default value may be used and represents an empty BTF blob. Void is +// added implicitly if necessary. +type Builder struct { + // Explicitly added types. + types []Type + // IDs for all added types which the user knows about. + stableIDs map[Type]TypeID + // Explicitly added strings. + strings *stringTableBuilder +} + +// NewBuilder creates a Builder from a list of types. +// +// It is more efficient than calling [Add] individually. +// +// Returns an error if adding any of the types fails. +func NewBuilder(types []Type) (*Builder, error) { + b := &Builder{ + make([]Type, 0, len(types)), + make(map[Type]TypeID, len(types)), + nil, + } + + for _, typ := range types { + _, err := b.Add(typ) + if err != nil { + return nil, fmt.Errorf("add %s: %w", typ, err) + } + } + + return b, nil +} + +// Empty returns true if neither types nor strings have been added. +func (b *Builder) Empty() bool { + return len(b.types) == 0 && (b.strings == nil || b.strings.Length() == 0) +} + +// Add a Type and allocate a stable ID for it. +// +// Adding the identical Type multiple times is valid and will return the same ID. +// +// See [Type] for details on identity. +func (b *Builder) Add(typ Type) (TypeID, error) { + if b.stableIDs == nil { + b.stableIDs = make(map[Type]TypeID) + } + + if _, ok := typ.(*Void); ok { + // Equality is weird for void, since it is a zero sized type. + return 0, nil + } + + if ds, ok := typ.(*Datasec); ok { + if err := datasecResolveWorkaround(b, ds); err != nil { + return 0, err + } + } + + id, ok := b.stableIDs[typ] + if ok { + return id, nil + } + + b.types = append(b.types, typ) + + id = TypeID(len(b.types)) + if int(id) != len(b.types) { + return 0, fmt.Errorf("no more type IDs") + } + + b.stableIDs[typ] = id + return id, nil +} + +// Marshal encodes all types in the Marshaler into BTF wire format. +// +// opts may be nil. +func (b *Builder) Marshal(buf []byte, opts *MarshalOptions) ([]byte, error) { + stb := b.strings + if stb == nil { + // Assume that most types are named. This makes encoding large BTF like + // vmlinux a lot cheaper. + stb = newStringTableBuilder(len(b.types)) + } else { + // Avoid modifying the Builder's string table. + stb = b.strings.Copy() + } + + if opts == nil { + opts = &MarshalOptions{Order: internal.NativeEndian} + } + + // Reserve space for the BTF header. + buf = slices.Grow(buf, btfHeaderLen)[:btfHeaderLen] + + w := internal.NewBuffer(buf) + defer internal.PutBuffer(w) + + e := encoder{ + MarshalOptions: *opts, + buf: w, + strings: stb, + lastID: TypeID(len(b.types)), + visited: make(map[Type]struct{}, len(b.types)), + ids: maps.Clone(b.stableIDs), + } + + if e.ids == nil { + e.ids = make(map[Type]TypeID) + } + + types := b.types + if len(types) == 0 && stb.Length() > 0 && opts.PreventNoTypeFound { + // We have strings that need to be written out, + // but no types (besides the implicit Void). + // Kernels as recent as v6.7 refuse to load such BTF + // with a "No type found" error in the log. + // Fix this by adding a dummy type. + types = []Type{&Int{Size: 0}} + } + + // Ensure that types are marshaled in the exact order they were Add()ed. + // Otherwise the ID returned from Add() won't match. + e.pending.Grow(len(types)) + for _, typ := range types { + e.pending.Push(typ) + } + + if err := e.deflatePending(); err != nil { + return nil, err + } + + length := e.buf.Len() + typeLen := uint32(length - btfHeaderLen) + + stringLen := e.strings.Length() + buf = e.strings.AppendEncoded(e.buf.Bytes()) + + // Fill out the header, and write it out. + header := &btfHeader{ + Magic: btfMagic, + Version: 1, + Flags: 0, + HdrLen: uint32(btfHeaderLen), + TypeOff: 0, + TypeLen: typeLen, + StringOff: typeLen, + StringLen: uint32(stringLen), + } + + err := binary.Write(sliceWriter(buf[:btfHeaderLen]), e.Order, header) + if err != nil { + return nil, fmt.Errorf("write header: %v", err) + } + + return buf, nil +} + +// addString adds a string to the resulting BTF. +// +// Adding the same string multiple times will return the same result. +// +// Returns an identifier into the string table or an error if the string +// contains invalid characters. +func (b *Builder) addString(str string) (uint32, error) { + if b.strings == nil { + b.strings = newStringTableBuilder(0) + } + + return b.strings.Add(str) +} + +func (e *encoder) allocateIDs(root Type) (err error) { + visitInPostorder(root, e.visited, func(typ Type) bool { + if _, ok := typ.(*Void); ok { + return true + } + + if _, ok := e.ids[typ]; ok { + return true + } + + id := e.lastID + 1 + if id < e.lastID { + err = errors.New("type ID overflow") + return false + } + + e.pending.Push(typ) + e.ids[typ] = id + e.lastID = id + return true + }) + return +} + +// id returns the ID for the given type or panics with an error. +func (e *encoder) id(typ Type) TypeID { + if _, ok := typ.(*Void); ok { + return 0 + } + + id, ok := e.ids[typ] + if !ok { + panic(fmt.Errorf("no ID for type %v", typ)) + } + + return id +} + +func (e *encoder) deflatePending() error { + // Declare root outside of the loop to avoid repeated heap allocations. + var root Type + + for !e.pending.Empty() { + root = e.pending.Shift() + + // Allocate IDs for all children of typ, including transitive dependencies. + if err := e.allocateIDs(root); err != nil { + return err + } + + if err := e.deflateType(root); err != nil { + id := e.ids[root] + return fmt.Errorf("deflate %v with ID %d: %w", root, id, err) + } + } + + return nil +} + +func (e *encoder) deflateType(typ Type) (err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + err, ok = r.(error) + if !ok { + panic(r) + } + } + }() + + var raw rawType + raw.NameOff, err = e.strings.Add(typ.TypeName()) + if err != nil { + return err + } + + switch v := typ.(type) { + case *Void: + return errors.New("Void is implicit in BTF wire format") + + case *Int: + e.deflateInt(&raw, v) + + case *Pointer: + raw.SetKind(kindPointer) + raw.SetType(e.id(v.Target)) + + case *Array: + raw.SetKind(kindArray) + raw.data = &btfArray{ + e.id(v.Type), + e.id(v.Index), + v.Nelems, + } + + case *Struct: + raw.SetKind(kindStruct) + raw.SetSize(v.Size) + raw.data, err = e.convertMembers(&raw.btfType, v.Members) + + case *Union: + err = e.deflateUnion(&raw, v) + + case *Enum: + if v.Size == 8 { + err = e.deflateEnum64(&raw, v) + } else { + err = e.deflateEnum(&raw, v) + } + + case *Fwd: + raw.SetKind(kindForward) + raw.SetFwdKind(v.Kind) + + case *Typedef: + raw.SetKind(kindTypedef) + raw.SetType(e.id(v.Type)) + + case *Volatile: + raw.SetKind(kindVolatile) + raw.SetType(e.id(v.Type)) + + case *Const: + e.deflateConst(&raw, v) + + case *Restrict: + raw.SetKind(kindRestrict) + raw.SetType(e.id(v.Type)) + + case *Func: + raw.SetKind(kindFunc) + raw.SetType(e.id(v.Type)) + if !e.StripFuncLinkage { + raw.SetLinkage(v.Linkage) + } + + case *FuncProto: + raw.SetKind(kindFuncProto) + raw.SetType(e.id(v.Return)) + raw.SetVlen(len(v.Params)) + raw.data, err = e.deflateFuncParams(v.Params) + + case *Var: + raw.SetKind(kindVar) + raw.SetType(e.id(v.Type)) + raw.data = btfVariable{uint32(v.Linkage)} + + case *Datasec: + raw.SetKind(kindDatasec) + raw.SetSize(v.Size) + raw.SetVlen(len(v.Vars)) + raw.data = e.deflateVarSecinfos(v.Vars) + + case *Float: + raw.SetKind(kindFloat) + raw.SetSize(v.Size) + + case *declTag: + err = e.deflateDeclTag(&raw, v) + + case *TypeTag: + err = e.deflateTypeTag(&raw, v) + + default: + return fmt.Errorf("don't know how to deflate %T", v) + } + + if err != nil { + return err + } + + return raw.Marshal(e.buf, e.Order) +} + +func (e *encoder) deflateInt(raw *rawType, i *Int) { + raw.SetKind(kindInt) + raw.SetSize(i.Size) + + var bi btfInt + bi.SetEncoding(i.Encoding) + // We need to set bits in addition to size, since btf_type_int_is_regular + // otherwise flags this as a bitfield. + bi.SetBits(byte(i.Size) * 8) + raw.data = bi +} + +func (e *encoder) deflateDeclTag(raw *rawType, tag *declTag) (err error) { + // Replace a decl tag with an integer for compatibility with <5.16 kernels, + // following libbpf behaviour. + if e.ReplaceDeclTags { + typ := &Int{"decl_tag_placeholder", 1, Unsigned} + e.deflateInt(raw, typ) + + // Add the placeholder type name to the string table. The encoder added the + // original type name before this call. + raw.NameOff, err = e.strings.Add(typ.TypeName()) + return + } + + raw.SetKind(kindDeclTag) + raw.SetType(e.id(tag.Type)) + raw.data = &btfDeclTag{uint32(tag.Index)} + raw.NameOff, err = e.strings.Add(tag.Value) + return +} + +func (e *encoder) deflateConst(raw *rawType, c *Const) { + raw.SetKind(kindConst) + raw.SetType(e.id(c.Type)) +} + +func (e *encoder) deflateTypeTag(raw *rawType, tag *TypeTag) (err error) { + // Replace a type tag with a const qualifier for compatibility with <5.17 + // kernels, following libbpf behaviour. + if e.ReplaceTypeTags { + e.deflateConst(raw, &Const{tag.Type}) + return + } + + raw.SetKind(kindTypeTag) + raw.SetType(e.id(tag.Type)) + raw.NameOff, err = e.strings.Add(tag.Value) + return +} + +func (e *encoder) deflateUnion(raw *rawType, union *Union) (err error) { + raw.SetKind(kindUnion) + raw.SetSize(union.Size) + raw.data, err = e.convertMembers(&raw.btfType, union.Members) + return +} + +func (e *encoder) convertMembers(header *btfType, members []Member) ([]btfMember, error) { + bms := make([]btfMember, 0, len(members)) + isBitfield := false + for _, member := range members { + isBitfield = isBitfield || member.BitfieldSize > 0 + + offset := member.Offset + if isBitfield { + offset = member.BitfieldSize<<24 | (member.Offset & 0xffffff) + } + + nameOff, err := e.strings.Add(member.Name) + if err != nil { + return nil, err + } + + bms = append(bms, btfMember{ + nameOff, + e.id(member.Type), + uint32(offset), + }) + } + + header.SetVlen(len(members)) + header.SetBitfield(isBitfield) + return bms, nil +} + +func (e *encoder) deflateEnum(raw *rawType, enum *Enum) (err error) { + raw.SetKind(kindEnum) + raw.SetSize(enum.Size) + raw.SetVlen(len(enum.Values)) + // Signedness appeared together with ENUM64 support. + raw.SetSigned(enum.Signed && !e.ReplaceEnum64) + raw.data, err = e.deflateEnumValues(enum) + return +} + +func (e *encoder) deflateEnumValues(enum *Enum) ([]btfEnum, error) { + bes := make([]btfEnum, 0, len(enum.Values)) + for _, value := range enum.Values { + nameOff, err := e.strings.Add(value.Name) + if err != nil { + return nil, err + } + + if enum.Signed { + if signedValue := int64(value.Value); signedValue < math.MinInt32 || signedValue > math.MaxInt32 { + return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", signedValue, value.Name) + } + } else { + if value.Value > math.MaxUint32 { + return nil, fmt.Errorf("value %d of enum %q exceeds 32 bits", value.Value, value.Name) + } + } + + bes = append(bes, btfEnum{ + nameOff, + uint32(value.Value), + }) + } + + return bes, nil +} + +func (e *encoder) deflateEnum64(raw *rawType, enum *Enum) (err error) { + if e.ReplaceEnum64 { + // Replace the ENUM64 with a union of fields with the correct size. + // This matches libbpf behaviour on purpose. + placeholder := &Int{ + "enum64_placeholder", + enum.Size, + Unsigned, + } + if enum.Signed { + placeholder.Encoding = Signed + } + if err := e.allocateIDs(placeholder); err != nil { + return fmt.Errorf("add enum64 placeholder: %w", err) + } + + members := make([]Member, 0, len(enum.Values)) + for _, v := range enum.Values { + members = append(members, Member{ + Name: v.Name, + Type: placeholder, + }) + } + + return e.deflateUnion(raw, &Union{enum.Name, enum.Size, members, nil}) + } + + raw.SetKind(kindEnum64) + raw.SetSize(enum.Size) + raw.SetVlen(len(enum.Values)) + raw.SetSigned(enum.Signed) + raw.data, err = e.deflateEnum64Values(enum.Values) + return +} + +func (e *encoder) deflateEnum64Values(values []EnumValue) ([]btfEnum64, error) { + bes := make([]btfEnum64, 0, len(values)) + for _, value := range values { + nameOff, err := e.strings.Add(value.Name) + if err != nil { + return nil, err + } + + bes = append(bes, btfEnum64{ + nameOff, + uint32(value.Value), + uint32(value.Value >> 32), + }) + } + + return bes, nil +} + +func (e *encoder) deflateFuncParams(params []FuncParam) ([]btfParam, error) { + bps := make([]btfParam, 0, len(params)) + for _, param := range params { + nameOff, err := e.strings.Add(param.Name) + if err != nil { + return nil, err + } + + bps = append(bps, btfParam{ + nameOff, + e.id(param.Type), + }) + } + return bps, nil +} + +func (e *encoder) deflateVarSecinfos(vars []VarSecinfo) []btfVarSecinfo { + vsis := make([]btfVarSecinfo, 0, len(vars)) + for _, v := range vars { + vsis = append(vsis, btfVarSecinfo{ + e.id(v.Type), + v.Offset, + v.Size, + }) + } + return vsis +} + +// MarshalMapKV creates a BTF object containing a map key and value. +// +// The function is intended for the use of the ebpf package and may be removed +// at any point in time. +func MarshalMapKV(key, value Type) (_ *Handle, keyID, valueID TypeID, err error) { + var b Builder + + if key != nil { + keyID, err = b.Add(key) + if err != nil { + return nil, 0, 0, fmt.Errorf("add key type: %w", err) + } + } + + if value != nil { + valueID, err = b.Add(value) + if err != nil { + return nil, 0, 0, fmt.Errorf("add value type: %w", err) + } + } + + handle, err := NewHandle(&b) + if err != nil { + // Check for 'full' map BTF support, since kernels between 4.18 and 5.2 + // already support BTF blobs for maps without Var or Datasec just fine. + if err := haveMapBTF(); err != nil { + return nil, 0, 0, err + } + } + return handle, keyID, valueID, err +} diff --git a/vendor/github.com/cilium/ebpf/btf/strings.go b/vendor/github.com/cilium/ebpf/btf/strings.go new file mode 100644 index 0000000000..7c31461c30 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/strings.go @@ -0,0 +1,198 @@ +package btf + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "maps" + "slices" + "strings" +) + +type stringTable struct { + base *stringTable + offsets []uint32 + prevIdx int + strings []string +} + +// sizedReader is implemented by bytes.Reader, io.SectionReader, strings.Reader, etc. +type sizedReader interface { + io.Reader + Size() int64 +} + +func readStringTable(r sizedReader, base *stringTable) (*stringTable, error) { + // When parsing split BTF's string table, the first entry offset is derived + // from the last entry offset of the base BTF. + firstStringOffset := uint32(0) + if base != nil { + idx := len(base.offsets) - 1 + firstStringOffset = base.offsets[idx] + uint32(len(base.strings[idx])) + 1 + } + + // Derived from vmlinux BTF. + const averageStringLength = 16 + + n := int(r.Size() / averageStringLength) + offsets := make([]uint32, 0, n) + strings := make([]string, 0, n) + + offset := firstStringOffset + scanner := bufio.NewScanner(r) + scanner.Split(splitNull) + for scanner.Scan() { + str := scanner.Text() + offsets = append(offsets, offset) + strings = append(strings, str) + offset += uint32(len(str)) + 1 + } + if err := scanner.Err(); err != nil { + return nil, err + } + + if len(strings) == 0 { + return nil, errors.New("string table is empty") + } + + if firstStringOffset == 0 && strings[0] != "" { + return nil, errors.New("first item in string table is non-empty") + } + + return &stringTable{base, offsets, 0, strings}, nil +} + +func splitNull(data []byte, atEOF bool) (advance int, token []byte, err error) { + i := bytes.IndexByte(data, 0) + if i == -1 { + if atEOF && len(data) > 0 { + return 0, nil, errors.New("string table isn't null terminated") + } + return 0, nil, nil + } + + return i + 1, data[:i], nil +} + +func (st *stringTable) Lookup(offset uint32) (string, error) { + if st.base != nil && offset <= st.base.offsets[len(st.base.offsets)-1] { + return st.base.lookup(offset) + } + return st.lookup(offset) +} + +func (st *stringTable) lookup(offset uint32) (string, error) { + // Fast path: zero offset is the empty string, looked up frequently. + if offset == 0 && st.base == nil { + return "", nil + } + + // Accesses tend to be globally increasing, so check if the next string is + // the one we want. This skips the binary search in about 50% of cases. + if st.prevIdx+1 < len(st.offsets) && st.offsets[st.prevIdx+1] == offset { + st.prevIdx++ + return st.strings[st.prevIdx], nil + } + + i, found := slices.BinarySearch(st.offsets, offset) + if !found { + return "", fmt.Errorf("offset %d isn't start of a string", offset) + } + + // Set the new increment index, but only if its greater than the current. + if i > st.prevIdx+1 { + st.prevIdx = i + } + + return st.strings[i], nil +} + +// Num returns the number of strings in the table. +func (st *stringTable) Num() int { + return len(st.strings) +} + +// stringTableBuilder builds BTF string tables. +type stringTableBuilder struct { + length uint32 + strings map[string]uint32 +} + +// newStringTableBuilder creates a builder with the given capacity. +// +// capacity may be zero. +func newStringTableBuilder(capacity int) *stringTableBuilder { + var stb stringTableBuilder + + if capacity == 0 { + // Use the runtime's small default size. + stb.strings = make(map[string]uint32) + } else { + stb.strings = make(map[string]uint32, capacity) + } + + // Ensure that the empty string is at index 0. + stb.append("") + return &stb +} + +// Add a string to the table. +// +// Adding the same string multiple times will only store it once. +func (stb *stringTableBuilder) Add(str string) (uint32, error) { + if strings.IndexByte(str, 0) != -1 { + return 0, fmt.Errorf("string contains null: %q", str) + } + + offset, ok := stb.strings[str] + if ok { + return offset, nil + } + + return stb.append(str), nil +} + +func (stb *stringTableBuilder) append(str string) uint32 { + offset := stb.length + stb.length += uint32(len(str)) + 1 + stb.strings[str] = offset + return offset +} + +// Lookup finds the offset of a string in the table. +// +// Returns an error if str hasn't been added yet. +func (stb *stringTableBuilder) Lookup(str string) (uint32, error) { + offset, ok := stb.strings[str] + if !ok { + return 0, fmt.Errorf("string %q is not in table", str) + } + + return offset, nil +} + +// Length returns the length in bytes. +func (stb *stringTableBuilder) Length() int { + return int(stb.length) +} + +// AppendEncoded appends the string table to the end of the provided buffer. +func (stb *stringTableBuilder) AppendEncoded(buf []byte) []byte { + n := len(buf) + buf = append(buf, make([]byte, stb.Length())...) + strings := buf[n:] + for str, offset := range stb.strings { + copy(strings[offset:], str) + } + return buf +} + +// Copy the string table builder. +func (stb *stringTableBuilder) Copy() *stringTableBuilder { + return &stringTableBuilder{ + stb.length, + maps.Clone(stb.strings), + } +} diff --git a/vendor/github.com/cilium/ebpf/btf/traversal.go b/vendor/github.com/cilium/ebpf/btf/traversal.go new file mode 100644 index 0000000000..13647d931f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/traversal.go @@ -0,0 +1,159 @@ +package btf + +import ( + "fmt" +) + +// Functions to traverse a cyclic graph of types. The below was very useful: +// https://eli.thegreenplace.net/2015/directed-graph-traversal-orderings-and-applications-to-data-flow-analysis/#post-order-and-reverse-post-order + +// Visit all types reachable from root in postorder. +// +// Traversal stops if yield returns false. +// +// Returns false if traversal was aborted. +func visitInPostorder(root Type, visited map[Type]struct{}, yield func(typ Type) bool) bool { + if _, ok := visited[root]; ok { + return true + } + if visited == nil { + visited = make(map[Type]struct{}) + } + visited[root] = struct{}{} + + cont := children(root, func(child *Type) bool { + return visitInPostorder(*child, visited, yield) + }) + if !cont { + return false + } + + return yield(root) +} + +// children calls yield on each child of typ. +// +// Traversal stops if yield returns false. +// +// Returns false if traversal was aborted. +func children(typ Type, yield func(child *Type) bool) bool { + // Explicitly type switch on the most common types to allow the inliner to + // do its work. This avoids allocating intermediate slices from walk() on + // the heap. + var tags []string + switch v := typ.(type) { + case *Void, *Int, *Enum, *Fwd, *Float, *declTag: + // No children to traverse. + // declTags is declared as a leaf type since it's parsed into .Tags fields of other types + // during unmarshaling. + case *Pointer: + if !yield(&v.Target) { + return false + } + case *Array: + if !yield(&v.Index) { + return false + } + if !yield(&v.Type) { + return false + } + case *Struct: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return false + } + for _, t := range v.Members[i].Tags { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return false + } + } + } + tags = v.Tags + case *Union: + for i := range v.Members { + if !yield(&v.Members[i].Type) { + return false + } + for _, t := range v.Members[i].Tags { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return false + } + } + } + tags = v.Tags + case *Typedef: + if !yield(&v.Type) { + return false + } + tags = v.Tags + case *Volatile: + if !yield(&v.Type) { + return false + } + case *Const: + if !yield(&v.Type) { + return false + } + case *Restrict: + if !yield(&v.Type) { + return false + } + case *Func: + if !yield(&v.Type) { + return false + } + if fp, ok := v.Type.(*FuncProto); ok { + for i := range fp.Params { + if len(v.ParamTags) <= i { + continue + } + for _, t := range v.ParamTags[i] { + var tag Type = &declTag{v, t, i} + if !yield(&tag) { + return false + } + } + } + } + tags = v.Tags + case *FuncProto: + if !yield(&v.Return) { + return false + } + for i := range v.Params { + if !yield(&v.Params[i].Type) { + return false + } + } + case *Var: + if !yield(&v.Type) { + return false + } + tags = v.Tags + case *Datasec: + for i := range v.Vars { + if !yield(&v.Vars[i].Type) { + return false + } + } + case *TypeTag: + if !yield(&v.Type) { + return false + } + case *cycle: + // cycle has children, but we ignore them deliberately. + default: + panic(fmt.Sprintf("don't know how to walk Type %T", v)) + } + + for _, t := range tags { + var tag Type = &declTag{typ, t, -1} + if !yield(&tag) { + return false + } + } + + return true +} diff --git a/vendor/github.com/cilium/ebpf/btf/types.go b/vendor/github.com/cilium/ebpf/btf/types.go new file mode 100644 index 0000000000..dbcdf9dd7a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/types.go @@ -0,0 +1,1417 @@ +package btf + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "slices" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +// Mirrors MAX_RESOLVE_DEPTH in libbpf. +// https://github.com/libbpf/libbpf/blob/e26b84dc330c9644c07428c271ab491b0f01f4e1/src/btf.c#L761 +const maxResolveDepth = 32 + +// TypeID identifies a type in a BTF section. +type TypeID = sys.TypeID + +// Type represents a type described by BTF. +// +// Identity of Type follows the [Go specification]: two Types are considered +// equal if they have the same concrete type and the same dynamic value, aka +// they point at the same location in memory. This means that the following +// Types are considered distinct even though they have the same "shape". +// +// a := &Int{Size: 1} +// b := &Int{Size: 1} +// a != b +// +// [Go specification]: https://go.dev/ref/spec#Comparison_operators +type Type interface { + // Type can be formatted using the %s and %v verbs. %s outputs only the + // identity of the type, without any detail. %v outputs additional detail. + // + // Use the '+' flag to include the address of the type. + // + // Use the width to specify how many levels of detail to output, for example + // %1v will output detail for the root type and a short description of its + // children. %2v would output details of the root type and its children + // as well as a short description of the grandchildren. + fmt.Formatter + + // Name of the type, empty for anonymous types and types that cannot + // carry a name, like Void and Pointer. + TypeName() string + + // Make a copy of the type, without copying Type members. + copy() Type + + // New implementations must update walkType. +} + +var ( + _ Type = (*Int)(nil) + _ Type = (*Struct)(nil) + _ Type = (*Union)(nil) + _ Type = (*Enum)(nil) + _ Type = (*Fwd)(nil) + _ Type = (*Func)(nil) + _ Type = (*Typedef)(nil) + _ Type = (*Var)(nil) + _ Type = (*Datasec)(nil) + _ Type = (*Float)(nil) + _ Type = (*declTag)(nil) + _ Type = (*TypeTag)(nil) + _ Type = (*cycle)(nil) +) + +// Void is the unit type of BTF. +type Void struct{} + +func (v *Void) Format(fs fmt.State, verb rune) { formatType(fs, verb, v) } +func (v *Void) TypeName() string { return "" } +func (v *Void) size() uint32 { return 0 } +func (v *Void) copy() Type { return (*Void)(nil) } + +type IntEncoding byte + +// Valid IntEncodings. +// +// These may look like they are flags, but they aren't. +const ( + Unsigned IntEncoding = 0 + Signed IntEncoding = 1 + Char IntEncoding = 2 + Bool IntEncoding = 4 +) + +func (ie IntEncoding) String() string { + switch ie { + case Char: + // NB: There is no way to determine signedness for char. + return "char" + case Bool: + return "bool" + case Signed: + return "signed" + case Unsigned: + return "unsigned" + default: + return fmt.Sprintf("IntEncoding(%d)", byte(ie)) + } +} + +// Int is an integer of a given length. +// +// See https://www.kernel.org/doc/html/latest/bpf/btf.html#btf-kind-int +type Int struct { + Name string + + // The size of the integer in bytes. + Size uint32 + Encoding IntEncoding +} + +func (i *Int) Format(fs fmt.State, verb rune) { + formatType(fs, verb, i, i.Encoding, "size=", i.Size) +} + +func (i *Int) TypeName() string { return i.Name } +func (i *Int) size() uint32 { return i.Size } +func (i *Int) copy() Type { + cpy := *i + return &cpy +} + +// Pointer is a pointer to another type. +type Pointer struct { + Target Type +} + +func (p *Pointer) Format(fs fmt.State, verb rune) { + formatType(fs, verb, p, "target=", p.Target) +} + +func (p *Pointer) TypeName() string { return "" } +func (p *Pointer) size() uint32 { return 8 } +func (p *Pointer) copy() Type { + cpy := *p + return &cpy +} + +// Array is an array with a fixed number of elements. +type Array struct { + Index Type + Type Type + Nelems uint32 +} + +func (arr *Array) Format(fs fmt.State, verb rune) { + formatType(fs, verb, arr, "index=", arr.Index, "type=", arr.Type, "n=", arr.Nelems) +} + +func (arr *Array) TypeName() string { return "" } + +func (arr *Array) copy() Type { + cpy := *arr + return &cpy +} + +// Struct is a compound type of consecutive members. +type Struct struct { + Name string + // The size of the struct including padding, in bytes + Size uint32 + Members []Member + Tags []string +} + +func (s *Struct) Format(fs fmt.State, verb rune) { + formatType(fs, verb, s, "fields=", len(s.Members)) +} + +func (s *Struct) TypeName() string { return s.Name } + +func (s *Struct) size() uint32 { return s.Size } + +func (s *Struct) copy() Type { + cpy := *s + cpy.Members = copyMembers(s.Members) + cpy.Tags = copyTags(cpy.Tags) + return &cpy +} + +func (s *Struct) members() []Member { + return s.Members +} + +// Union is a compound type where members occupy the same memory. +type Union struct { + Name string + // The size of the union including padding, in bytes. + Size uint32 + Members []Member + Tags []string +} + +func (u *Union) Format(fs fmt.State, verb rune) { + formatType(fs, verb, u, "fields=", len(u.Members)) +} + +func (u *Union) TypeName() string { return u.Name } + +func (u *Union) size() uint32 { return u.Size } + +func (u *Union) copy() Type { + cpy := *u + cpy.Members = copyMembers(u.Members) + cpy.Tags = copyTags(cpy.Tags) + return &cpy +} + +func (u *Union) members() []Member { + return u.Members +} + +func copyMembers(orig []Member) []Member { + cpy := make([]Member, len(orig)) + copy(cpy, orig) + for i, member := range cpy { + cpy[i].Tags = copyTags(member.Tags) + } + return cpy +} + +func copyTags(orig []string) []string { + if orig == nil { // preserve nil vs zero-len slice distinction + return nil + } + cpy := make([]string, len(orig)) + copy(cpy, orig) + return cpy +} + +type composite interface { + Type + members() []Member +} + +var ( + _ composite = (*Struct)(nil) + _ composite = (*Union)(nil) +) + +// A value in bits. +type Bits uint32 + +// Bytes converts a bit value into bytes. +func (b Bits) Bytes() uint32 { + return uint32(b / 8) +} + +// Member is part of a Struct or Union. +// +// It is not a valid Type. +type Member struct { + Name string + Type Type + Offset Bits + BitfieldSize Bits + Tags []string +} + +// Enum lists possible values. +type Enum struct { + Name string + // Size of the enum value in bytes. + Size uint32 + // True if the values should be interpreted as signed integers. + Signed bool + Values []EnumValue +} + +func (e *Enum) Format(fs fmt.State, verb rune) { + formatType(fs, verb, e, "size=", e.Size, "values=", len(e.Values)) +} + +func (e *Enum) TypeName() string { return e.Name } + +// EnumValue is part of an Enum +// +// Is is not a valid Type +type EnumValue struct { + Name string + Value uint64 +} + +func (e *Enum) size() uint32 { return e.Size } +func (e *Enum) copy() Type { + cpy := *e + cpy.Values = make([]EnumValue, len(e.Values)) + copy(cpy.Values, e.Values) + return &cpy +} + +// FwdKind is the type of forward declaration. +type FwdKind int + +// Valid types of forward declaration. +const ( + FwdStruct FwdKind = iota + FwdUnion +) + +func (fk FwdKind) String() string { + switch fk { + case FwdStruct: + return "struct" + case FwdUnion: + return "union" + default: + return fmt.Sprintf("%T(%d)", fk, int(fk)) + } +} + +// Fwd is a forward declaration of a Type. +type Fwd struct { + Name string + Kind FwdKind +} + +func (f *Fwd) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Kind) +} + +func (f *Fwd) TypeName() string { return f.Name } + +func (f *Fwd) copy() Type { + cpy := *f + return &cpy +} + +func (f *Fwd) matches(typ Type) bool { + if _, ok := As[*Struct](typ); ok && f.Kind == FwdStruct { + return true + } + + if _, ok := As[*Union](typ); ok && f.Kind == FwdUnion { + return true + } + + return false +} + +// Typedef is an alias of a Type. +type Typedef struct { + Name string + Type Type + Tags []string +} + +func (td *Typedef) Format(fs fmt.State, verb rune) { + formatType(fs, verb, td, td.Type) +} + +func (td *Typedef) TypeName() string { return td.Name } + +func (td *Typedef) copy() Type { + cpy := *td + cpy.Tags = copyTags(td.Tags) + return &cpy +} + +// Volatile is a qualifier. +type Volatile struct { + Type Type +} + +func (v *Volatile) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Type) +} + +func (v *Volatile) TypeName() string { return "" } + +func (v *Volatile) qualify() Type { return v.Type } +func (v *Volatile) copy() Type { + cpy := *v + return &cpy +} + +// Const is a qualifier. +type Const struct { + Type Type +} + +func (c *Const) Format(fs fmt.State, verb rune) { + formatType(fs, verb, c, c.Type) +} + +func (c *Const) TypeName() string { return "" } + +func (c *Const) qualify() Type { return c.Type } +func (c *Const) copy() Type { + cpy := *c + return &cpy +} + +// Restrict is a qualifier. +type Restrict struct { + Type Type +} + +func (r *Restrict) Format(fs fmt.State, verb rune) { + formatType(fs, verb, r, r.Type) +} + +func (r *Restrict) TypeName() string { return "" } + +func (r *Restrict) qualify() Type { return r.Type } +func (r *Restrict) copy() Type { + cpy := *r + return &cpy +} + +// Func is a function definition. +type Func struct { + Name string + Type Type + Linkage FuncLinkage + Tags []string + // ParamTags holds a list of tags for each parameter of the FuncProto to which `Type` points. + // If no tags are present for any param, the outer slice will be nil/len(ParamTags)==0. + // If at least 1 param has a tag, the outer slice will have the same length as the number of params. + // The inner slice contains the tags and may be nil/len(ParamTags[i])==0 if no tags are present for that param. + ParamTags [][]string +} + +func FuncMetadata(ins *asm.Instruction) *Func { + fn, _ := ins.Metadata.Get(funcInfoMeta{}).(*Func) + return fn +} + +// WithFuncMetadata adds a btf.Func to the Metadata of asm.Instruction. +func WithFuncMetadata(ins asm.Instruction, fn *Func) asm.Instruction { + ins.Metadata.Set(funcInfoMeta{}, fn) + return ins +} + +func (f *Func) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, f.Linkage, "proto=", f.Type) +} + +func (f *Func) TypeName() string { return f.Name } + +func (f *Func) copy() Type { + cpy := *f + cpy.Tags = copyTags(f.Tags) + if f.ParamTags != nil { // preserve nil vs zero-len slice distinction + ptCopy := make([][]string, len(f.ParamTags)) + for i, tags := range f.ParamTags { + ptCopy[i] = copyTags(tags) + } + cpy.ParamTags = ptCopy + } + return &cpy +} + +// FuncProto is a function declaration. +type FuncProto struct { + Return Type + Params []FuncParam +} + +func (fp *FuncProto) Format(fs fmt.State, verb rune) { + formatType(fs, verb, fp, "args=", len(fp.Params), "return=", fp.Return) +} + +func (fp *FuncProto) TypeName() string { return "" } + +func (fp *FuncProto) copy() Type { + cpy := *fp + cpy.Params = make([]FuncParam, len(fp.Params)) + copy(cpy.Params, fp.Params) + return &cpy +} + +type FuncParam struct { + Name string + Type Type +} + +// Var is a global variable. +type Var struct { + Name string + Type Type + Linkage VarLinkage + Tags []string +} + +func (v *Var) Format(fs fmt.State, verb rune) { + formatType(fs, verb, v, v.Linkage) +} + +func (v *Var) TypeName() string { return v.Name } + +func (v *Var) copy() Type { + cpy := *v + cpy.Tags = copyTags(v.Tags) + return &cpy +} + +// Datasec is a global program section containing data. +type Datasec struct { + Name string + Size uint32 + Vars []VarSecinfo +} + +func (ds *Datasec) Format(fs fmt.State, verb rune) { + formatType(fs, verb, ds) +} + +func (ds *Datasec) TypeName() string { return ds.Name } + +func (ds *Datasec) size() uint32 { return ds.Size } + +func (ds *Datasec) copy() Type { + cpy := *ds + cpy.Vars = make([]VarSecinfo, len(ds.Vars)) + copy(cpy.Vars, ds.Vars) + return &cpy +} + +// VarSecinfo describes variable in a Datasec. +// +// It is not a valid Type. +type VarSecinfo struct { + // Var or Func. + Type Type + Offset uint32 + Size uint32 +} + +// Float is a float of a given length. +type Float struct { + Name string + + // The size of the float in bytes. + Size uint32 +} + +func (f *Float) Format(fs fmt.State, verb rune) { + formatType(fs, verb, f, "size=", f.Size*8) +} + +func (f *Float) TypeName() string { return f.Name } +func (f *Float) size() uint32 { return f.Size } +func (f *Float) copy() Type { + cpy := *f + return &cpy +} + +// declTag associates metadata with a declaration. +type declTag struct { + Type Type + Value string + // The index this tag refers to in the target type. For composite types, + // a value of -1 indicates that the tag refers to the whole type. Otherwise + // it indicates which member or argument the tag applies to. + Index int +} + +func (dt *declTag) Format(fs fmt.State, verb rune) { + formatType(fs, verb, dt, "type=", dt.Type, "value=", dt.Value, "index=", dt.Index) +} + +func (dt *declTag) TypeName() string { return "" } +func (dt *declTag) copy() Type { + cpy := *dt + return &cpy +} + +// TypeTag associates metadata with a pointer type. Tag types act as a custom +// modifier(const, restrict, volatile) for the target type. Unlike declTags, +// TypeTags are ordered so the order in which they are added matters. +// +// One of their uses is to mark pointers as `__kptr` meaning a pointer points +// to kernel memory. Adding a `__kptr` to pointers in map values allows you +// to store pointers to kernel memory in maps. +type TypeTag struct { + Type Type + Value string +} + +func (tt *TypeTag) Format(fs fmt.State, verb rune) { + formatType(fs, verb, tt, "type=", tt.Type, "value=", tt.Value) +} + +func (tt *TypeTag) TypeName() string { return "" } +func (tt *TypeTag) qualify() Type { return tt.Type } +func (tt *TypeTag) copy() Type { + cpy := *tt + return &cpy +} + +// cycle is a type which had to be elided since it exceeded maxTypeDepth. +type cycle struct { + root Type +} + +func (c *cycle) ID() TypeID { return math.MaxUint32 } +func (c *cycle) Format(fs fmt.State, verb rune) { formatType(fs, verb, c, "root=", c.root) } +func (c *cycle) TypeName() string { return "" } +func (c *cycle) copy() Type { + cpy := *c + return &cpy +} + +type sizer interface { + size() uint32 +} + +var ( + _ sizer = (*Int)(nil) + _ sizer = (*Pointer)(nil) + _ sizer = (*Struct)(nil) + _ sizer = (*Union)(nil) + _ sizer = (*Enum)(nil) + _ sizer = (*Datasec)(nil) +) + +type qualifier interface { + qualify() Type +} + +var ( + _ qualifier = (*Const)(nil) + _ qualifier = (*Restrict)(nil) + _ qualifier = (*Volatile)(nil) + _ qualifier = (*TypeTag)(nil) +) + +var errUnsizedType = errors.New("type is unsized") + +// Sizeof returns the size of a type in bytes. +// +// Returns an error if the size can't be computed. +func Sizeof(typ Type) (int, error) { + var ( + n = int64(1) + elem int64 + ) + + for i := 0; i < maxResolveDepth; i++ { + switch v := typ.(type) { + case *Array: + if n > 0 && int64(v.Nelems) > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + // Arrays may be of zero length, which allows + // n to be zero as well. + n *= int64(v.Nelems) + typ = v.Type + continue + + case sizer: + elem = int64(v.size()) + + case *Typedef: + typ = v.Type + continue + + case qualifier: + typ = v.qualify() + continue + + default: + return 0, fmt.Errorf("type %T: %w", typ, errUnsizedType) + } + + if n > 0 && elem > math.MaxInt64/n { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + size := n * elem + if int64(int(size)) != size { + return 0, fmt.Errorf("type %s: overflow", typ) + } + + return int(size), nil + } + + return 0, fmt.Errorf("type %s: exceeded type depth", typ) +} + +// alignof returns the alignment of a type. +// +// Returns an error if the Type can't be aligned, like an integer with an uneven +// size. Currently only supports the subset of types necessary for bitfield +// relocations. +func alignof(typ Type) (int, error) { + var n int + + switch t := UnderlyingType(typ).(type) { + case *Enum: + n = int(t.size()) + case *Int: + n = int(t.Size) + case *Array: + return alignof(t.Type) + default: + return 0, fmt.Errorf("can't calculate alignment of %T", t) + } + + if !internal.IsPow(n) { + return 0, fmt.Errorf("alignment value %d is not a power of two", n) + } + + return n, nil +} + +// Copy a Type recursively. +// +// typ may form a cycle. +func Copy(typ Type) Type { + return copyType(typ, nil, make(map[Type]Type), nil) +} + +func copyType(typ Type, ids map[Type]TypeID, copies map[Type]Type, copiedIDs map[Type]TypeID) Type { + if typ == nil { + return nil + } + + cpy, ok := copies[typ] + if ok { + // This has been copied previously, no need to continue. + return cpy + } + + cpy = typ.copy() + copies[typ] = cpy + + if id, ok := ids[typ]; ok { + copiedIDs[cpy] = id + } + + children(cpy, func(child *Type) bool { + *child = copyType(*child, ids, copies, copiedIDs) + return true + }) + + return cpy +} + +type typeDeque = internal.Deque[*Type] + +// readAndInflateTypes reads the raw btf type info and turns it into a graph +// of Types connected via pointers. +// +// If base is provided, then the types are considered to be of a split BTF +// (e.g., a kernel module). +// +// Returns a slice of types indexed by TypeID. Since BTF ignores compilation +// units, multiple types may share the same name. A Type may form a cyclic graph +// by pointing at itself. +func readAndInflateTypes(r io.Reader, bo binary.ByteOrder, typeLen uint32, rawStrings *stringTable, base *Spec) ([]Type, error) { + // because of the interleaving between types and struct members it is difficult to + // precompute the numbers of raw types this will parse + // this "guess" is a good first estimation + sizeOfbtfType := uintptr(btfTypeLen) + tyMaxCount := uintptr(typeLen) / sizeOfbtfType / 2 + types := make([]Type, 0, tyMaxCount) + + // Void is defined to always be type ID 0, and is thus omitted from BTF. + types = append(types, (*Void)(nil)) + + firstTypeID := TypeID(0) + if base != nil { + var err error + firstTypeID, err = base.nextTypeID() + if err != nil { + return nil, err + } + + // Split BTF doesn't contain Void. + types = types[:0] + } + + type fixupDef struct { + id TypeID + typ *Type + } + + var fixups []fixupDef + fixup := func(id TypeID, typ *Type) { + if id < firstTypeID { + if baseType, err := base.TypeByID(id); err == nil { + *typ = baseType + return + } + } + + idx := int(id - firstTypeID) + if idx < len(types) { + // We've already inflated this type, fix it up immediately. + *typ = types[idx] + return + } + + fixups = append(fixups, fixupDef{id, typ}) + } + + type bitfieldFixupDef struct { + id TypeID + m *Member + } + + var ( + legacyBitfields = make(map[TypeID][2]Bits) // offset, size + bitfieldFixups []bitfieldFixupDef + ) + convertMembers := func(raw []btfMember, kindFlag bool) ([]Member, error) { + // NB: The fixup below relies on pre-allocating this array to + // work, since otherwise append might re-allocate members. + members := make([]Member, 0, len(raw)) + for i, btfMember := range raw { + name, err := rawStrings.Lookup(btfMember.NameOff) + if err != nil { + return nil, fmt.Errorf("can't get name for member %d: %w", i, err) + } + + members = append(members, Member{ + Name: name, + Offset: Bits(btfMember.Offset), + }) + + m := &members[i] + fixup(raw[i].Type, &m.Type) + + if kindFlag { + m.BitfieldSize = Bits(btfMember.Offset >> 24) + m.Offset &= 0xffffff + // We ignore legacy bitfield definitions if the current composite + // is a new-style bitfield. This is kind of safe since offset and + // size on the type of the member must be zero if kindFlat is set + // according to spec. + continue + } + + // This may be a legacy bitfield, try to fix it up. + data, ok := legacyBitfields[raw[i].Type] + if ok { + // Bingo! + m.Offset += data[0] + m.BitfieldSize = data[1] + continue + } + + if m.Type != nil { + // We couldn't find a legacy bitfield, but we know that the member's + // type has already been inflated. Hence we know that it can't be + // a legacy bitfield and there is nothing left to do. + continue + } + + // We don't have fixup data, and the type we're pointing + // at hasn't been inflated yet. No choice but to defer + // the fixup. + bitfieldFixups = append(bitfieldFixups, bitfieldFixupDef{ + raw[i].Type, + m, + }) + } + return members, nil + } + + var ( + buf = make([]byte, 1024) + header btfType + bInt btfInt + bArr btfArray + bMembers []btfMember + bEnums []btfEnum + bParams []btfParam + bVariable btfVariable + bSecInfos []btfVarSecinfo + bDeclTag btfDeclTag + bEnums64 []btfEnum64 + ) + + var declTags []*declTag + for { + var ( + id = firstTypeID + TypeID(len(types)) + typ Type + ) + + if _, err := io.ReadFull(r, buf[:btfTypeLen]); err == io.EOF { + break + } else if err != nil { + return nil, fmt.Errorf("can't read type info for id %v: %v", id, err) + } + + if _, err := unmarshalBtfType(&header, buf[:btfTypeLen], bo); err != nil { + return nil, fmt.Errorf("can't unmarshal type info for id %v: %v", id, err) + } + + if id < firstTypeID { + return nil, fmt.Errorf("no more type IDs") + } + + name, err := rawStrings.Lookup(header.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for type id %d: %w", id, err) + } + + switch header.Kind() { + case kindInt: + size := header.Size() + buf = buf[:btfIntLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfInt, id: %d: %w", id, err) + } + if _, err := unmarshalBtfInt(&bInt, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfInt, id: %d: %w", id, err) + } + if bInt.Offset() > 0 || bInt.Bits().Bytes() != size { + legacyBitfields[id] = [2]Bits{bInt.Offset(), bInt.Bits()} + } + typ = &Int{name, header.Size(), bInt.Encoding()} + + case kindPointer: + ptr := &Pointer{nil} + fixup(header.Type(), &ptr.Target) + typ = ptr + + case kindArray: + buf = buf[:btfArrayLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfArray, id: %d: %w", id, err) + } + if _, err := unmarshalBtfArray(&bArr, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfArray, id: %d: %w", id, err) + } + + arr := &Array{nil, nil, bArr.Nelems} + fixup(bArr.IndexType, &arr.Index) + fixup(bArr.Type, &arr.Type) + typ = arr + + case kindStruct: + vlen := header.Vlen() + bMembers = slices.Grow(bMembers[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err) + } + if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err) + } + + members, err := convertMembers(bMembers, header.Bitfield()) + if err != nil { + return nil, fmt.Errorf("struct %s (id %d): %w", name, id, err) + } + typ = &Struct{name, header.Size(), members, nil} + + case kindUnion: + vlen := header.Vlen() + bMembers = slices.Grow(bMembers[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfMemberLen)[:vlen*btfMemberLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfMembers, id: %d: %w", id, err) + } + if _, err := unmarshalBtfMembers(bMembers, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfMembers, id: %d: %w", id, err) + } + + members, err := convertMembers(bMembers, header.Bitfield()) + if err != nil { + return nil, fmt.Errorf("union %s (id %d): %w", name, id, err) + } + typ = &Union{name, header.Size(), members, nil} + + case kindEnum: + vlen := header.Vlen() + bEnums = slices.Grow(bEnums[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfEnumLen)[:vlen*btfEnumLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfEnums, id: %d: %w", id, err) + } + if _, err := unmarshalBtfEnums(bEnums, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnums, id: %d: %w", id, err) + } + + vals := make([]EnumValue, 0, vlen) + signed := header.Signed() + for i, btfVal := range bEnums { + name, err := rawStrings.Lookup(btfVal.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum value %d: %s", i, err) + } + value := uint64(btfVal.Val) + if signed { + // Sign extend values to 64 bit. + value = uint64(int32(btfVal.Val)) + } + vals = append(vals, EnumValue{name, value}) + } + typ = &Enum{name, header.Size(), signed, vals} + + case kindForward: + typ = &Fwd{name, header.FwdKind()} + + case kindTypedef: + typedef := &Typedef{name, nil, nil} + fixup(header.Type(), &typedef.Type) + typ = typedef + + case kindVolatile: + volatile := &Volatile{nil} + fixup(header.Type(), &volatile.Type) + typ = volatile + + case kindConst: + cnst := &Const{nil} + fixup(header.Type(), &cnst.Type) + typ = cnst + + case kindRestrict: + restrict := &Restrict{nil} + fixup(header.Type(), &restrict.Type) + typ = restrict + + case kindFunc: + fn := &Func{name, nil, header.Linkage(), nil, nil} + fixup(header.Type(), &fn.Type) + typ = fn + + case kindFuncProto: + vlen := header.Vlen() + bParams = slices.Grow(bParams[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfParamLen)[:vlen*btfParamLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfParams, id: %d: %w", id, err) + } + if _, err := unmarshalBtfParams(bParams, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfParams, id: %d: %w", id, err) + } + + params := make([]FuncParam, 0, vlen) + for i, param := range bParams { + name, err := rawStrings.Lookup(param.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for func proto parameter %d: %s", i, err) + } + params = append(params, FuncParam{ + Name: name, + }) + } + for i := range params { + fixup(bParams[i].Type, ¶ms[i].Type) + } + + fp := &FuncProto{nil, params} + fixup(header.Type(), &fp.Return) + typ = fp + + case kindVar: + buf = buf[:btfVariableLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + if _, err := unmarshalBtfVariable(&bVariable, buf, bo); err != nil { + return nil, fmt.Errorf("can't read btfVariable, id: %d: %w", id, err) + } + + v := &Var{name, nil, VarLinkage(bVariable.Linkage), nil} + fixup(header.Type(), &v.Type) + typ = v + + case kindDatasec: + vlen := header.Vlen() + bSecInfos = slices.Grow(bSecInfos[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfVarSecinfoLen)[:vlen*btfVarSecinfoLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfVarSecInfos, id: %d: %w", id, err) + } + if _, err := unmarshalBtfVarSecInfos(bSecInfos, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfVarSecInfos, id: %d: %w", id, err) + } + + vars := make([]VarSecinfo, 0, vlen) + for _, btfVar := range bSecInfos { + vars = append(vars, VarSecinfo{ + Offset: btfVar.Offset, + Size: btfVar.Size, + }) + } + for i := range vars { + fixup(bSecInfos[i].Type, &vars[i].Type) + } + typ = &Datasec{name, header.Size(), vars} + + case kindFloat: + typ = &Float{name, header.Size()} + + case kindDeclTag: + buf = buf[:btfDeclTagLen] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + if _, err := unmarshalBtfDeclTag(&bDeclTag, buf, bo); err != nil { + return nil, fmt.Errorf("can't read btfDeclTag, id: %d: %w", id, err) + } + + btfIndex := bDeclTag.ComponentIdx + if uint64(btfIndex) > math.MaxInt { + return nil, fmt.Errorf("type id %d: index exceeds int", id) + } + + dt := &declTag{nil, name, int(int32(btfIndex))} + fixup(header.Type(), &dt.Type) + typ = dt + + declTags = append(declTags, dt) + + case kindTypeTag: + tt := &TypeTag{nil, name} + fixup(header.Type(), &tt.Type) + typ = tt + + case kindEnum64: + vlen := header.Vlen() + bEnums64 = slices.Grow(bEnums64[:0], vlen)[:vlen] + buf = slices.Grow(buf[:0], vlen*btfEnum64Len)[:vlen*btfEnum64Len] + if _, err := io.ReadFull(r, buf); err != nil { + return nil, fmt.Errorf("can't read btfEnum64s, id: %d: %w", id, err) + } + if _, err := unmarshalBtfEnums64(bEnums64, buf, bo); err != nil { + return nil, fmt.Errorf("can't unmarshal btfEnum64s, id: %d: %w", id, err) + } + + vals := make([]EnumValue, 0, vlen) + for i, btfVal := range bEnums64 { + name, err := rawStrings.Lookup(btfVal.NameOff) + if err != nil { + return nil, fmt.Errorf("get name for enum64 value %d: %s", i, err) + } + value := (uint64(btfVal.ValHi32) << 32) | uint64(btfVal.ValLo32) + vals = append(vals, EnumValue{name, value}) + } + typ = &Enum{name, header.Size(), header.Signed(), vals} + + default: + return nil, fmt.Errorf("type id %d: unknown kind: %v", id, header.Kind()) + } + + types = append(types, typ) + } + + for _, fixup := range fixups { + if fixup.id < firstTypeID { + return nil, fmt.Errorf("fixup for base type id %d is not expected", fixup.id) + } + + idx := int(fixup.id - firstTypeID) + if idx >= len(types) { + return nil, fmt.Errorf("reference to invalid type id: %d", fixup.id) + } + + *fixup.typ = types[idx] + } + + for _, bitfieldFixup := range bitfieldFixups { + if bitfieldFixup.id < firstTypeID { + return nil, fmt.Errorf("bitfield fixup from split to base types is not expected") + } + + data, ok := legacyBitfields[bitfieldFixup.id] + if ok { + // This is indeed a legacy bitfield, fix it up. + bitfieldFixup.m.Offset += data[0] + bitfieldFixup.m.BitfieldSize = data[1] + } + } + + for _, dt := range declTags { + switch t := dt.Type.(type) { + case *Var: + if dt.Index != -1 { + return nil, fmt.Errorf("type %s: component idx %d is not -1", dt, dt.Index) + } + t.Tags = append(t.Tags, dt.Value) + + case *Typedef: + if dt.Index != -1 { + return nil, fmt.Errorf("type %s: component idx %d is not -1", dt, dt.Index) + } + t.Tags = append(t.Tags, dt.Value) + + case composite: + if dt.Index >= 0 { + members := t.members() + if dt.Index >= len(members) { + return nil, fmt.Errorf("type %s: component idx %d exceeds members of %s", dt, dt.Index, t) + } + + members[dt.Index].Tags = append(members[dt.Index].Tags, dt.Value) + continue + } + + if dt.Index == -1 { + switch t2 := t.(type) { + case *Struct: + t2.Tags = append(t2.Tags, dt.Value) + case *Union: + t2.Tags = append(t2.Tags, dt.Value) + } + + continue + } + + return nil, fmt.Errorf("type %s: decl tag for type %s has invalid component idx", dt, t) + + case *Func: + fp, ok := t.Type.(*FuncProto) + if !ok { + return nil, fmt.Errorf("type %s: %s is not a FuncProto", dt, t.Type) + } + + // Ensure the number of argument tag lists equals the number of arguments + if len(t.ParamTags) == 0 { + t.ParamTags = make([][]string, len(fp.Params)) + } + + if dt.Index >= 0 { + if dt.Index >= len(fp.Params) { + return nil, fmt.Errorf("type %s: component idx %d exceeds params of %s", dt, dt.Index, t) + } + + t.ParamTags[dt.Index] = append(t.ParamTags[dt.Index], dt.Value) + continue + } + + if dt.Index == -1 { + t.Tags = append(t.Tags, dt.Value) + continue + } + + return nil, fmt.Errorf("type %s: decl tag for type %s has invalid component idx", dt, t) + + default: + return nil, fmt.Errorf("type %s: decl tag for type %s is not supported", dt, t) + } + } + + return types, nil +} + +// essentialName represents the name of a BTF type stripped of any flavor +// suffixes after a ___ delimiter. +type essentialName string + +// newEssentialName returns name without a ___ suffix. +// +// CO-RE has the concept of 'struct flavors', which are used to deal with +// changes in kernel data structures. Anything after three underscores +// in a type name is ignored for the purpose of finding a candidate type +// in the kernel's BTF. +func newEssentialName(name string) essentialName { + if name == "" { + return "" + } + lastIdx := strings.LastIndex(name, "___") + if lastIdx > 0 { + return essentialName(name[:lastIdx]) + } + return essentialName(name) +} + +// UnderlyingType skips qualifiers and Typedefs. +func UnderlyingType(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + case *Typedef: + result = v.Type + default: + return result + } + } + return &cycle{typ} +} + +// QualifiedType returns the type with all qualifiers removed. +func QualifiedType(typ Type) Type { + result := typ + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (result).(type) { + case qualifier: + result = v.qualify() + default: + return result + } + } + return &cycle{typ} +} + +// As returns typ if is of type T. Otherwise it peels qualifiers and Typedefs +// until it finds a T. +// +// Returns the zero value and false if there is no T or if the type is nested +// too deeply. +func As[T Type](typ Type) (T, bool) { + // NB: We can't make this function return (*T) since then + // we can't assert that a type matches an interface which + // embeds Type: as[composite](T). + for depth := 0; depth <= maxResolveDepth; depth++ { + switch v := (typ).(type) { + case T: + return v, true + case qualifier: + typ = v.qualify() + case *Typedef: + typ = v.Type + default: + goto notFound + } + } +notFound: + var zero T + return zero, false +} + +type formatState struct { + fmt.State + depth int +} + +// formattableType is a subset of Type, to ease unit testing of formatType. +type formattableType interface { + fmt.Formatter + TypeName() string +} + +// formatType formats a type in a canonical form. +// +// Handles cyclical types by only printing cycles up to a certain depth. Elements +// in extra are separated by spaces unless the preceding element is a string +// ending in '='. +func formatType(f fmt.State, verb rune, t formattableType, extra ...interface{}) { + if verb != 'v' && verb != 's' { + fmt.Fprintf(f, "{UNRECOGNIZED: %c}", verb) + return + } + + _, _ = io.WriteString(f, internal.GoTypeName(t)) + + if name := t.TypeName(); name != "" { + // Output BTF type name if present. + fmt.Fprintf(f, ":%q", name) + } + + if f.Flag('+') { + // Output address if requested. + fmt.Fprintf(f, ":%#p", t) + } + + if verb == 's' { + // %s omits details. + return + } + + var depth int + if ps, ok := f.(*formatState); ok { + depth = ps.depth + f = ps.State + } + + maxDepth, ok := f.Width() + if !ok { + maxDepth = 0 + } + + if depth > maxDepth { + // We've reached the maximum depth. This avoids infinite recursion even + // for cyclical types. + return + } + + if len(extra) == 0 { + return + } + + wantSpace := false + _, _ = io.WriteString(f, "[") + for _, arg := range extra { + if wantSpace { + _, _ = io.WriteString(f, " ") + } + + switch v := arg.(type) { + case string: + _, _ = io.WriteString(f, v) + wantSpace = len(v) > 0 && v[len(v)-1] != '=' + continue + + case formattableType: + v.Format(&formatState{f, depth + 1}, verb) + + default: + fmt.Fprint(f, arg) + } + + wantSpace = true + } + _, _ = io.WriteString(f, "]") +} diff --git a/vendor/github.com/cilium/ebpf/btf/workarounds.go b/vendor/github.com/cilium/ebpf/btf/workarounds.go new file mode 100644 index 0000000000..eb09047fb3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/btf/workarounds.go @@ -0,0 +1,26 @@ +package btf + +// datasecResolveWorkaround ensures that certain vars in a Datasec are added +// to a Spec before the Datasec. This avoids a bug in kernel BTF validation. +// +// See https://lore.kernel.org/bpf/20230302123440.1193507-1-lmb@isovalent.com/ +func datasecResolveWorkaround(b *Builder, ds *Datasec) error { + for _, vsi := range ds.Vars { + v, ok := vsi.Type.(*Var) + if !ok { + continue + } + + switch v.Type.(type) { + case *Typedef, *Volatile, *Const, *Restrict, *TypeTag: + // NB: We must never call Add on a Datasec, otherwise we risk + // infinite recursion. + _, err := b.Add(v.Type) + if err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/collection.go b/vendor/github.com/cilium/ebpf/collection.go new file mode 100644 index 0000000000..1bda110a40 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/collection.go @@ -0,0 +1,1036 @@ +package ebpf + +import ( + "encoding/binary" + "errors" + "fmt" + "reflect" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/kconfig" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/sys" +) + +// CollectionOptions control loading a collection into the kernel. +// +// Maps and Programs are passed to NewMapWithOptions and NewProgramsWithOptions. +type CollectionOptions struct { + Maps MapOptions + Programs ProgramOptions + + // MapReplacements takes a set of Maps that will be used instead of + // creating new ones when loading the CollectionSpec. + // + // For each given Map, there must be a corresponding MapSpec in + // CollectionSpec.Maps, and its type, key/value size, max entries and flags + // must match the values of the MapSpec. + // + // The given Maps are Clone()d before being used in the Collection, so the + // caller can Close() them freely when they are no longer needed. + MapReplacements map[string]*Map +} + +// CollectionSpec describes a collection. +type CollectionSpec struct { + Maps map[string]*MapSpec + Programs map[string]*ProgramSpec + + // Variables refer to global variables declared in the ELF. They can be read + // and modified freely before loading the Collection. Modifying them after + // loading has no effect on a running eBPF program. + Variables map[string]*VariableSpec + + // Types holds type information about Maps and Programs. + // Modifications to Types are currently undefined behaviour. + Types *btf.Spec + + // ByteOrder specifies whether the ELF was compiled for + // big-endian or little-endian architectures. + ByteOrder binary.ByteOrder +} + +// Copy returns a recursive copy of the spec. +func (cs *CollectionSpec) Copy() *CollectionSpec { + if cs == nil { + return nil + } + + cpy := CollectionSpec{ + Maps: make(map[string]*MapSpec, len(cs.Maps)), + Programs: make(map[string]*ProgramSpec, len(cs.Programs)), + Variables: make(map[string]*VariableSpec, len(cs.Variables)), + ByteOrder: cs.ByteOrder, + Types: cs.Types.Copy(), + } + + for name, spec := range cs.Maps { + cpy.Maps[name] = spec.Copy() + } + + for name, spec := range cs.Programs { + cpy.Programs[name] = spec.Copy() + } + + for name, spec := range cs.Variables { + cpy.Variables[name] = spec.copy(&cpy) + } + + return &cpy +} + +// RewriteMaps replaces all references to specific maps. +// +// Use this function to use pre-existing maps instead of creating new ones +// when calling NewCollection. Any named maps are removed from CollectionSpec.Maps. +// +// Returns an error if a named map isn't used in at least one program. +// +// Deprecated: Pass CollectionOptions.MapReplacements when loading the Collection +// instead. +func (cs *CollectionSpec) RewriteMaps(maps map[string]*Map) error { + for symbol, m := range maps { + // have we seen a program that uses this symbol / map + seen := false + for progName, progSpec := range cs.Programs { + err := progSpec.Instructions.AssociateMap(symbol, m) + + switch { + case err == nil: + seen = true + + case errors.Is(err, asm.ErrUnreferencedSymbol): + // Not all programs need to use the map + + default: + return fmt.Errorf("program %s: %w", progName, err) + } + } + + if !seen { + return fmt.Errorf("map %s not referenced by any programs", symbol) + } + + // Prevent NewCollection from creating rewritten maps + delete(cs.Maps, symbol) + } + + return nil +} + +// MissingConstantsError is returned by [CollectionSpec.RewriteConstants]. +type MissingConstantsError struct { + // The constants missing from .rodata. + Constants []string +} + +func (m *MissingConstantsError) Error() string { + return fmt.Sprintf("some constants are missing from .rodata: %s", strings.Join(m.Constants, ", ")) +} + +// RewriteConstants replaces the value of multiple constants. +// +// The constant must be defined like so in the C program: +// +// volatile const type foobar; +// volatile const type foobar = default; +// +// Replacement values must be of the same length as the C sizeof(type). +// If necessary, they are marshalled according to the same rules as +// map values. +// +// From Linux 5.5 the verifier will use constants to eliminate dead code. +// +// Returns an error wrapping [MissingConstantsError] if a constant doesn't exist. +// +// Deprecated: Use [CollectionSpec.Variables] to interact with constants instead. +// RewriteConstants is now a wrapper around the VariableSpec API. +func (cs *CollectionSpec) RewriteConstants(consts map[string]interface{}) error { + var missing []string + for n, c := range consts { + v, ok := cs.Variables[n] + if !ok { + missing = append(missing, n) + continue + } + + if !v.Constant() { + return fmt.Errorf("variable %s is not a constant", n) + } + + if err := v.Set(c); err != nil { + return fmt.Errorf("rewriting constant %s: %w", n, err) + } + } + + if len(missing) != 0 { + return fmt.Errorf("rewrite constants: %w", &MissingConstantsError{Constants: missing}) + } + + return nil +} + +// Assign the contents of a CollectionSpec to a struct. +// +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs, Maps or Variables if it +// has an `ebpf` tag and its type is *ProgramSpec, *MapSpec or *VariableSpec. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.ProgramSpec `ebpf:"xdp_foo"` +// Bar *ebpf.MapSpec `ebpf:"bar_map"` +// Var *ebpf.VariableSpec `ebpf:"some_var"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same Spec is assigned multiple times. +func (cs *CollectionSpec) Assign(to interface{}) error { + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + case reflect.TypeOf((*ProgramSpec)(nil)): + if p := cs.Programs[name]; p != nil { + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*MapSpec)(nil)): + if m := cs.Maps[name]; m != nil { + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + case reflect.TypeOf((*VariableSpec)(nil)): + if v := cs.Variables[name]; v != nil { + return v, nil + } + return nil, fmt.Errorf("missing variable %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + return assignValues(to, getValue) +} + +// LoadAndAssign loads Maps and Programs into the kernel and assigns them +// to a struct. +// +// Omitting Map/Program.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +// +// This function is a shortcut to manually checking the presence +// of maps and programs in a CollectionSpec. Consider using bpf2go +// if this sounds useful. +// +// 'to' must be a pointer to a struct. A field of the struct is updated with +// a Program or Map if it has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as found in the +// CollectionSpec. Before updating the struct, the requested objects and their +// dependent resources are loaded into the kernel and populated with values if +// specified. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// opts may be nil. +// +// Returns an error if any of the fields can't be found, or +// if the same Map or Program is assigned multiple times. +func (cs *CollectionSpec) LoadAndAssign(to interface{}, opts *CollectionOptions) error { + loader, err := newCollectionLoader(cs, opts) + if err != nil { + return err + } + defer loader.close() + + // Support assigning Programs and Maps, lazy-loading the required objects. + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + assignedVars := make(map[string]bool) + + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + assignedProgs[name] = true + return loader.loadProgram(name) + + case reflect.TypeOf((*Map)(nil)): + assignedMaps[name] = true + return loader.loadMap(name) + + case reflect.TypeOf((*Variable)(nil)): + assignedVars[name] = true + return loader.loadVariable(name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + // Load the Maps and Programs requested by the annotated struct. + if err := assignValues(to, getValue); err != nil { + return err + } + + // Populate the requested maps. Has a chance of lazy-loading other dependent maps. + if err := loader.populateDeferredMaps(); err != nil { + return err + } + + // Evaluate the loader's objects after all (lazy)loading has taken place. + for n, m := range loader.maps { + switch m.typ { + case ProgramArray: + // Require all lazy-loaded ProgramArrays to be assigned to the given object. + // The kernel empties a ProgramArray once the last user space reference + // to it closes, which leads to failed tail calls. Combined with the library + // closing map fds via GC finalizers this can lead to surprising behaviour. + // Only allow unassigned ProgramArrays when the library hasn't pre-populated + // any entries from static value declarations. At this point, we know the map + // is empty and there's no way for the caller to interact with the map going + // forward. + if !assignedMaps[n] && len(cs.Maps[n].Contents) > 0 { + return fmt.Errorf("ProgramArray %s must be assigned to prevent missed tail calls", n) + } + } + } + + // Prevent loader.cleanup() from closing assigned Maps and Programs. + for m := range assignedMaps { + delete(loader.maps, m) + } + for p := range assignedProgs { + delete(loader.programs, p) + } + for p := range assignedVars { + delete(loader.vars, p) + } + + return nil +} + +// Collection is a collection of live BPF resources present in the kernel. +type Collection struct { + Programs map[string]*Program + Maps map[string]*Map + + // Variables contains global variables used by the Collection's program(s). On + // kernels older than 5.5, most interactions with Variables return + // [ErrNotSupported]. + Variables map[string]*Variable +} + +// NewCollection creates a Collection from the given spec, creating and +// loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func NewCollection(spec *CollectionSpec) (*Collection, error) { + return NewCollectionWithOptions(spec, CollectionOptions{}) +} + +// NewCollectionWithOptions creates a Collection from the given spec using +// options, creating and loading its declared resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func NewCollectionWithOptions(spec *CollectionSpec, opts CollectionOptions) (*Collection, error) { + loader, err := newCollectionLoader(spec, &opts) + if err != nil { + return nil, err + } + defer loader.close() + + // Create maps first, as their fds need to be linked into programs. + for mapName := range spec.Maps { + if _, err := loader.loadMap(mapName); err != nil { + return nil, err + } + } + + for progName, prog := range spec.Programs { + if prog.Type == UnspecifiedProgram { + continue + } + + if _, err := loader.loadProgram(progName); err != nil { + return nil, err + } + } + + for varName := range spec.Variables { + if _, err := loader.loadVariable(varName); err != nil { + return nil, err + } + } + + // Maps can contain Program and Map stubs, so populate them after + // all Maps and Programs have been successfully loaded. + if err := loader.populateDeferredMaps(); err != nil { + return nil, err + } + + // Prevent loader.cleanup from closing maps, programs and vars. + maps, progs, vars := loader.maps, loader.programs, loader.vars + loader.maps, loader.programs, loader.vars = nil, nil, nil + + return &Collection{ + progs, + maps, + vars, + }, nil +} + +type collectionLoader struct { + coll *CollectionSpec + opts *CollectionOptions + maps map[string]*Map + programs map[string]*Program + vars map[string]*Variable +} + +func newCollectionLoader(coll *CollectionSpec, opts *CollectionOptions) (*collectionLoader, error) { + if opts == nil { + opts = &CollectionOptions{} + } + + // Check for existing MapSpecs in the CollectionSpec for all provided replacement maps. + for name, m := range opts.MapReplacements { + spec, ok := coll.Maps[name] + if !ok { + return nil, fmt.Errorf("replacement map %s not found in CollectionSpec", name) + } + + if err := spec.Compatible(m); err != nil { + return nil, fmt.Errorf("using replacement map %s: %w", spec.Name, err) + } + } + + if err := populateKallsyms(coll.Programs); err != nil { + return nil, fmt.Errorf("populating kallsyms caches: %w", err) + } + + return &collectionLoader{ + coll, + opts, + make(map[string]*Map), + make(map[string]*Program), + make(map[string]*Variable), + }, nil +} + +// populateKallsyms populates kallsyms caches, making lookups cheaper later on +// during individual program loading. Since we have less context available +// at those stages, we batch the lookups here instead to avoid redundant work. +func populateKallsyms(progs map[string]*ProgramSpec) error { + // Look up associated kernel modules for all symbols referenced by + // ProgramSpec.AttachTo for program types that support attaching to kmods. + mods := make(map[string]string) + for _, p := range progs { + if p.AttachTo != "" && p.targetsKernelModule() { + mods[p.AttachTo] = "" + } + } + if len(mods) != 0 { + if err := kallsyms.AssignModules(mods); err != nil { + return fmt.Errorf("getting modules from kallsyms: %w", err) + } + } + + // Look up addresses of all kernel symbols referenced by all programs. + addrs := make(map[string]uint64) + for _, p := range progs { + iter := p.Instructions.Iterate() + for iter.Next() { + ins := iter.Ins + meta, _ := ins.Metadata.Get(ksymMetaKey{}).(*ksymMeta) + if meta != nil { + addrs[meta.Name] = 0 + } + } + } + if len(addrs) != 0 { + if err := kallsyms.AssignAddresses(addrs); err != nil { + return fmt.Errorf("getting addresses from kallsyms: %w", err) + } + } + + return nil +} + +// close all resources left over in the collectionLoader. +func (cl *collectionLoader) close() { + for _, m := range cl.maps { + m.Close() + } + for _, p := range cl.programs { + p.Close() + } +} + +func (cl *collectionLoader) loadMap(mapName string) (*Map, error) { + if m := cl.maps[mapName]; m != nil { + return m, nil + } + + mapSpec := cl.coll.Maps[mapName] + if mapSpec == nil { + return nil, fmt.Errorf("missing map %s", mapName) + } + + if replaceMap, ok := cl.opts.MapReplacements[mapName]; ok { + // Clone the map to avoid closing user's map later on. + m, err := replaceMap.Clone() + if err != nil { + return nil, err + } + + cl.maps[mapName] = m + return m, nil + } + + // Defer setting the mmapable flag on maps until load time. This avoids the + // MapSpec having different flags on some kernel versions. Also avoid running + // syscalls during ELF loading, so platforms like wasm can also parse an ELF. + if isDataSection(mapSpec.Name) && haveMmapableMaps() == nil { + mapSpec.Flags |= sys.BPF_F_MMAPABLE + } + + m, err := newMapWithOptions(mapSpec, cl.opts.Maps) + if err != nil { + return nil, fmt.Errorf("map %s: %w", mapName, err) + } + + // Finalize 'scalar' maps that don't refer to any other eBPF resources + // potentially pending creation. This is needed for frozen maps like .rodata + // that need to be finalized before invoking the verifier. + if !mapSpec.Type.canStoreMapOrProgram() { + if err := m.finalize(mapSpec); err != nil { + return nil, fmt.Errorf("finalizing map %s: %w", mapName, err) + } + } + + cl.maps[mapName] = m + return m, nil +} + +func (cl *collectionLoader) loadProgram(progName string) (*Program, error) { + if prog := cl.programs[progName]; prog != nil { + return prog, nil + } + + progSpec := cl.coll.Programs[progName] + if progSpec == nil { + return nil, fmt.Errorf("unknown program %s", progName) + } + + // Bail out early if we know the kernel is going to reject the program. + // This skips loading map dependencies, saving some cleanup work later. + if progSpec.Type == UnspecifiedProgram { + return nil, fmt.Errorf("cannot load program %s: program type is unspecified", progName) + } + + progSpec = progSpec.Copy() + + // Rewrite any reference to a valid map in the program's instructions, + // which includes all of its dependencies. + for i := range progSpec.Instructions { + ins := &progSpec.Instructions[i] + + if !ins.IsLoadFromMap() || ins.Reference() == "" { + continue + } + + // Don't overwrite map loads containing non-zero map fd's, + // they can be manually included by the caller. + // Map FDs/IDs are placed in the lower 32 bits of Constant. + if int32(ins.Constant) > 0 { + continue + } + + m, err := cl.loadMap(ins.Reference()) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + if err := ins.AssociateMap(m); err != nil { + return nil, fmt.Errorf("program %s: map %s: %w", progName, ins.Reference(), err) + } + } + + prog, err := newProgramWithOptions(progSpec, cl.opts.Programs) + if err != nil { + return nil, fmt.Errorf("program %s: %w", progName, err) + } + + cl.programs[progName] = prog + return prog, nil +} + +func (cl *collectionLoader) loadVariable(varName string) (*Variable, error) { + if v := cl.vars[varName]; v != nil { + return v, nil + } + + varSpec := cl.coll.Variables[varName] + if varSpec == nil { + return nil, fmt.Errorf("unknown variable %s", varName) + } + + // Get the key of the VariableSpec's MapSpec in the CollectionSpec. + var mapName string + for n, ms := range cl.coll.Maps { + if ms == varSpec.m { + mapName = n + break + } + } + if mapName == "" { + return nil, fmt.Errorf("variable %s: underlying MapSpec %s was removed from CollectionSpec", varName, varSpec.m.Name) + } + + m, err := cl.loadMap(mapName) + if err != nil { + return nil, fmt.Errorf("variable %s: %w", varName, err) + } + + // If the kernel is too old or the underlying map was created without + // BPF_F_MMAPABLE, [Map.Memory] will return ErrNotSupported. In this case, + // emit a Variable with a nil Memory. This keeps Collection{Spec}.Variables + // consistent across systems with different feature sets without breaking + // LoadAndAssign. + mm, err := m.Memory() + if err != nil && !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("variable %s: getting memory for map %s: %w", varName, mapName, err) + } + + v, err := newVariable( + varSpec.name, + varSpec.offset, + varSpec.size, + varSpec.t, + mm, + ) + if err != nil { + return nil, fmt.Errorf("variable %s: %w", varName, err) + } + + cl.vars[varName] = v + return v, nil +} + +// populateDeferredMaps iterates maps holding programs or other maps and loads +// any dependencies. Populates all maps in cl and freezes them if specified. +func (cl *collectionLoader) populateDeferredMaps() error { + for mapName, m := range cl.maps { + mapSpec, ok := cl.coll.Maps[mapName] + if !ok { + return fmt.Errorf("missing map spec %s", mapName) + } + + // Scalar maps without Map or Program references are finalized during + // creation. Don't finalize them again. + if !mapSpec.Type.canStoreMapOrProgram() { + continue + } + + mapSpec = mapSpec.Copy() + + // MapSpecs that refer to inner maps or programs within the same + // CollectionSpec do so using strings. These strings are used as the key + // to look up the respective object in the Maps or Programs fields. + // Resolve those references to actual Map or Program resources that + // have been loaded into the kernel. + for i, kv := range mapSpec.Contents { + objName, ok := kv.Value.(string) + if !ok { + continue + } + + switch t := mapSpec.Type; { + case t.canStoreProgram(): + // loadProgram is idempotent and could return an existing Program. + prog, err := cl.loadProgram(objName) + if err != nil { + return fmt.Errorf("loading program %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, prog} + + case t.canStoreMap(): + // loadMap is idempotent and could return an existing Map. + innerMap, err := cl.loadMap(objName) + if err != nil { + return fmt.Errorf("loading inner map %s, for map %s: %w", objName, mapName, err) + } + mapSpec.Contents[i] = MapKV{kv.Key, innerMap} + } + } + + // Populate and freeze the map if specified. + if err := m.finalize(mapSpec); err != nil { + return fmt.Errorf("populating map %s: %w", mapName, err) + } + } + + return nil +} + +// resolveKconfig resolves all variables declared in .kconfig and populates +// m.Contents. Does nothing if the given m.Contents is non-empty. +func resolveKconfig(m *MapSpec) error { + ds, ok := m.Value.(*btf.Datasec) + if !ok { + return errors.New("map value is not a Datasec") + } + + type configInfo struct { + offset uint32 + size uint32 + typ btf.Type + } + + configs := make(map[string]configInfo) + + data := make([]byte, ds.Size) + for _, vsi := range ds.Vars { + v := vsi.Type.(*btf.Var) + n := v.TypeName() + + switch n { + case "LINUX_KERNEL_VERSION": + if integer, ok := v.Type.(*btf.Int); !ok || integer.Size != 4 { + return fmt.Errorf("variable %s must be a 32 bits integer, got %s", n, v.Type) + } + + kv, err := linux.KernelVersion() + if err != nil { + return fmt.Errorf("getting kernel version: %w", err) + } + internal.NativeEndian.PutUint32(data[vsi.Offset:], kv.Kernel()) + + case "LINUX_HAS_SYSCALL_WRAPPER": + integer, ok := v.Type.(*btf.Int) + if !ok { + return fmt.Errorf("variable %s must be an integer, got %s", n, v.Type) + } + var value uint64 = 1 + if err := haveSyscallWrapper(); errors.Is(err, ErrNotSupported) { + value = 0 + } else if err != nil { + return fmt.Errorf("unable to derive a value for LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } + + if err := kconfig.PutInteger(data[vsi.Offset:], integer, value); err != nil { + return fmt.Errorf("set LINUX_HAS_SYSCALL_WRAPPER: %w", err) + } + + default: // Catch CONFIG_*. + configs[n] = configInfo{ + offset: vsi.Offset, + size: vsi.Size, + typ: v.Type, + } + } + } + + // We only parse kconfig file if a CONFIG_* variable was found. + if len(configs) > 0 { + f, err := linux.FindKConfig() + if err != nil { + return fmt.Errorf("cannot find a kconfig file: %w", err) + } + defer f.Close() + + filter := make(map[string]struct{}, len(configs)) + for config := range configs { + filter[config] = struct{}{} + } + + kernelConfig, err := kconfig.Parse(f, filter) + if err != nil { + return fmt.Errorf("cannot parse kconfig file: %w", err) + } + + for n, info := range configs { + value, ok := kernelConfig[n] + if !ok { + return fmt.Errorf("config option %q does not exist on this kernel", n) + } + + err := kconfig.PutValue(data[info.offset:info.offset+info.size], info.typ, value) + if err != nil { + return fmt.Errorf("problem adding value for %s: %w", n, err) + } + } + } + + m.Contents = []MapKV{{uint32(0), data}} + + return nil +} + +// LoadCollection reads an object file and creates and loads its declared +// resources into the kernel. +// +// Omitting Collection.Close() during application shutdown is an error. +// See the package documentation for details around Map and Program lifecycle. +func LoadCollection(file string) (*Collection, error) { + spec, err := LoadCollectionSpec(file) + if err != nil { + return nil, err + } + return NewCollection(spec) +} + +// Assign the contents of a Collection to a struct. +// +// This function bridges functionality between bpf2go generated +// code and any functionality better implemented in Collection. +// +// 'to' must be a pointer to a struct. A field of the +// struct is updated with values from Programs or Maps if it +// has an `ebpf` tag and its type is *Program or *Map. +// The tag's value specifies the name of the program or map as +// found in the CollectionSpec. +// +// struct { +// Foo *ebpf.Program `ebpf:"xdp_foo"` +// Bar *ebpf.Map `ebpf:"bar_map"` +// Ignored int +// } +// +// Returns an error if any of the eBPF objects can't be found, or +// if the same Map or Program is assigned multiple times. +// +// Ownership and Close()ing responsibility is transferred to `to` +// for any successful assigns. On error `to` is left in an undefined state. +func (coll *Collection) Assign(to interface{}) error { + assignedMaps := make(map[string]bool) + assignedProgs := make(map[string]bool) + assignedVars := make(map[string]bool) + + // Assign() only transfers already-loaded Maps and Programs. No extra + // loading is done. + getValue := func(typ reflect.Type, name string) (interface{}, error) { + switch typ { + + case reflect.TypeOf((*Program)(nil)): + if p := coll.Programs[name]; p != nil { + assignedProgs[name] = true + return p, nil + } + return nil, fmt.Errorf("missing program %q", name) + + case reflect.TypeOf((*Map)(nil)): + if m := coll.Maps[name]; m != nil { + assignedMaps[name] = true + return m, nil + } + return nil, fmt.Errorf("missing map %q", name) + + case reflect.TypeOf((*Variable)(nil)): + if v := coll.Variables[name]; v != nil { + assignedVars[name] = true + return v, nil + } + return nil, fmt.Errorf("missing variable %q", name) + + default: + return nil, fmt.Errorf("unsupported type %s", typ) + } + } + + if err := assignValues(to, getValue); err != nil { + return err + } + + // Finalize ownership transfer + for p := range assignedProgs { + delete(coll.Programs, p) + } + for m := range assignedMaps { + delete(coll.Maps, m) + } + for s := range assignedVars { + delete(coll.Variables, s) + } + + return nil +} + +// Close frees all maps and programs associated with the collection. +// +// The collection mustn't be used afterwards. +func (coll *Collection) Close() { + for _, prog := range coll.Programs { + prog.Close() + } + for _, m := range coll.Maps { + m.Close() + } +} + +// DetachMap removes the named map from the Collection. +// +// This means that a later call to Close() will not affect this map. +// +// Returns nil if no map of that name exists. +func (coll *Collection) DetachMap(name string) *Map { + m := coll.Maps[name] + delete(coll.Maps, name) + return m +} + +// DetachProgram removes the named program from the Collection. +// +// This means that a later call to Close() will not affect this program. +// +// Returns nil if no program of that name exists. +func (coll *Collection) DetachProgram(name string) *Program { + p := coll.Programs[name] + delete(coll.Programs, name) + return p +} + +// structField represents a struct field containing the ebpf struct tag. +type structField struct { + reflect.StructField + value reflect.Value +} + +// ebpfFields extracts field names tagged with 'ebpf' from a struct type. +// Keep track of visited types to avoid infinite recursion. +func ebpfFields(structVal reflect.Value, visited map[reflect.Type]bool) ([]structField, error) { + if visited == nil { + visited = make(map[reflect.Type]bool) + } + + structType := structVal.Type() + if structType.Kind() != reflect.Struct { + return nil, fmt.Errorf("%s is not a struct", structType) + } + + if visited[structType] { + return nil, fmt.Errorf("recursion on type %s", structType) + } + + fields := make([]structField, 0, structType.NumField()) + for i := 0; i < structType.NumField(); i++ { + field := structField{structType.Field(i), structVal.Field(i)} + + // If the field is tagged, gather it and move on. + name := field.Tag.Get("ebpf") + if name != "" { + fields = append(fields, field) + continue + } + + // If the field does not have an ebpf tag, but is a struct or a pointer + // to a struct, attempt to gather its fields as well. + var v reflect.Value + switch field.Type.Kind() { + case reflect.Ptr: + if field.Type.Elem().Kind() != reflect.Struct { + continue + } + + if field.value.IsNil() { + return nil, fmt.Errorf("nil pointer to %s", structType) + } + + // Obtain the destination type of the pointer. + v = field.value.Elem() + + case reflect.Struct: + // Reference the value's type directly. + v = field.value + + default: + continue + } + + inner, err := ebpfFields(v, visited) + if err != nil { + return nil, fmt.Errorf("field %s: %w", field.Name, err) + } + + fields = append(fields, inner...) + } + + return fields, nil +} + +// assignValues attempts to populate all fields of 'to' tagged with 'ebpf'. +// +// getValue is called for every tagged field of 'to' and must return the value +// to be assigned to the field with the given typ and name. +func assignValues(to interface{}, + getValue func(typ reflect.Type, name string) (interface{}, error)) error { + + toValue := reflect.ValueOf(to) + if toValue.Type().Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer to struct", to) + } + + if toValue.IsNil() { + return fmt.Errorf("nil pointer to %T", to) + } + + fields, err := ebpfFields(toValue.Elem(), nil) + if err != nil { + return err + } + + type elem struct { + // Either *Map or *Program + typ reflect.Type + name string + } + + assigned := make(map[elem]string) + for _, field := range fields { + // Get string value the field is tagged with. + tag := field.Tag.Get("ebpf") + if strings.Contains(tag, ",") { + return fmt.Errorf("field %s: ebpf tag contains a comma", field.Name) + } + + // Check if the eBPF object with the requested + // type and tag was already assigned elsewhere. + e := elem{field.Type, tag} + if af := assigned[e]; af != "" { + return fmt.Errorf("field %s: object %q was already assigned to %s", field.Name, tag, af) + } + + // Get the eBPF object referred to by the tag. + value, err := getValue(field.Type, tag) + if err != nil { + return fmt.Errorf("field %s: %w", field.Name, err) + } + + if !field.value.CanSet() { + return fmt.Errorf("field %s: can't set value", field.Name) + } + field.value.Set(reflect.ValueOf(value)) + + assigned[e] = field.Name + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/cpu.go b/vendor/github.com/cilium/ebpf/cpu.go new file mode 100644 index 0000000000..07e959efdc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/cpu.go @@ -0,0 +1,66 @@ +package ebpf + +import ( + "fmt" + "os" + "strings" + "sync" +) + +var possibleCPU = sync.OnceValues(func() (int, error) { + return parseCPUsFromFile("/sys/devices/system/cpu/possible") +}) + +// PossibleCPU returns the max number of CPUs a system may possibly have +// Logical CPU numbers must be of the form 0-n +func PossibleCPU() (int, error) { + return possibleCPU() +} + +// MustPossibleCPU is a helper that wraps a call to PossibleCPU and panics if +// the error is non-nil. +func MustPossibleCPU() int { + cpus, err := PossibleCPU() + if err != nil { + panic(err) + } + return cpus +} + +func parseCPUsFromFile(path string) (int, error) { + spec, err := os.ReadFile(path) + if err != nil { + return 0, err + } + + n, err := parseCPUs(string(spec)) + if err != nil { + return 0, fmt.Errorf("can't parse %s: %v", path, err) + } + + return n, nil +} + +// parseCPUs parses the number of cpus from a string produced +// by bitmap_list_string() in the Linux kernel. +// Multiple ranges are rejected, since they can't be unified +// into a single number. +// This is the format of /sys/devices/system/cpu/possible, it +// is not suitable for /sys/devices/system/cpu/online, etc. +func parseCPUs(spec string) (int, error) { + if strings.Trim(spec, "\n") == "0" { + return 1, nil + } + + var low, high int + n, err := fmt.Sscanf(spec, "%d-%d\n", &low, &high) + if n != 2 || err != nil { + return 0, fmt.Errorf("invalid format: %s", spec) + } + if low != 0 { + return 0, fmt.Errorf("CPU spec doesn't start at zero: %s", spec) + } + + // cpus is 0 indexed + return high + 1, nil +} diff --git a/vendor/github.com/cilium/ebpf/doc.go b/vendor/github.com/cilium/ebpf/doc.go new file mode 100644 index 0000000000..396b3394d3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/doc.go @@ -0,0 +1,25 @@ +// Package ebpf is a toolkit for working with eBPF programs. +// +// eBPF programs are small snippets of code which are executed directly +// in a VM in the Linux kernel, which makes them very fast and flexible. +// Many Linux subsystems now accept eBPF programs. This makes it possible +// to implement highly application specific logic inside the kernel, +// without having to modify the actual kernel itself. +// +// This package is designed for long-running processes which +// want to use eBPF to implement part of their application logic. It has no +// run-time dependencies outside of the library and the Linux kernel itself. +// eBPF code should be compiled ahead of time using clang, and shipped with +// your application as any other resource. +// +// Use the link subpackage to attach a loaded program to a hook in the kernel. +// +// Note that losing all references to Map and Program resources will cause +// their underlying file descriptors to be closed, potentially removing those +// objects from the kernel. Always retain a reference by e.g. deferring a +// Close() of a Collection or LoadAndAssign object until application exit. +// +// Special care needs to be taken when handling maps of type ProgramArray, +// as the kernel erases its contents when the last userspace or bpffs +// reference disappears, regardless of the map being in active use. +package ebpf diff --git a/vendor/github.com/cilium/ebpf/elf_reader.go b/vendor/github.com/cilium/ebpf/elf_reader.go new file mode 100644 index 0000000000..9e8dbc7ae5 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/elf_reader.go @@ -0,0 +1,1457 @@ +package ebpf + +import ( + "bufio" + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +type kconfigMetaKey struct{} + +type kconfigMeta struct { + Map *MapSpec + Offset uint32 +} + +type kfuncMetaKey struct{} + +type kfuncMeta struct { + Binding elf.SymBind + Func *btf.Func +} + +type ksymMetaKey struct{} + +type ksymMeta struct { + Binding elf.SymBind + Name string +} + +// elfCode is a convenience to reduce the amount of arguments that have to +// be passed around explicitly. You should treat its contents as immutable. +type elfCode struct { + *internal.SafeELFFile + sections map[elf.SectionIndex]*elfSection + license string + version uint32 + btf *btf.Spec + extInfo *btf.ExtInfos + maps map[string]*MapSpec + vars map[string]*VariableSpec + kfuncs map[string]*btf.Func + ksyms map[string]struct{} + kconfig *MapSpec +} + +// LoadCollectionSpec parses an ELF file into a CollectionSpec. +func LoadCollectionSpec(file string) (*CollectionSpec, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + spec, err := LoadCollectionSpecFromReader(f) + if err != nil { + return nil, fmt.Errorf("file %s: %w", file, err) + } + return spec, nil +} + +// LoadCollectionSpecFromReader parses an ELF file into a CollectionSpec. +func LoadCollectionSpecFromReader(rd io.ReaderAt) (*CollectionSpec, error) { + f, err := internal.NewSafeELFFile(rd) + if err != nil { + return nil, err + } + + // Checks if the ELF file is for BPF data. + // Old LLVM versions set e_machine to EM_NONE. + if f.File.Machine != elf.EM_NONE && f.File.Machine != elf.EM_BPF { + return nil, fmt.Errorf("unexpected machine type for BPF ELF: %s", f.File.Machine) + } + + var ( + licenseSection *elf.Section + versionSection *elf.Section + sections = make(map[elf.SectionIndex]*elfSection) + relSections = make(map[elf.SectionIndex]*elf.Section) + ) + + // This is the target of relocations generated by inline assembly. + sections[elf.SHN_UNDEF] = newElfSection(new(elf.Section), undefSection) + + // Collect all the sections we're interested in. This includes relocations + // which we parse later. + // + // Keep the documentation at docs/ebpf/loading/elf-sections.md up-to-date. + for i, sec := range f.Sections { + idx := elf.SectionIndex(i) + + switch { + case strings.HasPrefix(sec.Name, "license"): + licenseSection = sec + case strings.HasPrefix(sec.Name, "version"): + versionSection = sec + case strings.HasPrefix(sec.Name, "maps"): + sections[idx] = newElfSection(sec, mapSection) + case sec.Name == ".maps": + sections[idx] = newElfSection(sec, btfMapSection) + case isDataSection(sec.Name): + sections[idx] = newElfSection(sec, dataSection) + case sec.Type == elf.SHT_REL: + // Store relocations under the section index of the target + relSections[elf.SectionIndex(sec.Info)] = sec + case sec.Type == elf.SHT_PROGBITS && (sec.Flags&elf.SHF_EXECINSTR) != 0 && sec.Size > 0: + sections[idx] = newElfSection(sec, programSection) + } + } + + license, err := loadLicense(licenseSection) + if err != nil { + return nil, fmt.Errorf("load license: %w", err) + } + + version, err := loadVersion(versionSection, f.ByteOrder) + if err != nil { + return nil, fmt.Errorf("load version: %w", err) + } + + btfSpec, btfExtInfo, err := btf.LoadSpecAndExtInfosFromReader(rd) + if err != nil && !errors.Is(err, btf.ErrNotFound) { + return nil, fmt.Errorf("load BTF: %w", err) + } + + ec := &elfCode{ + SafeELFFile: f, + sections: sections, + license: license, + version: version, + btf: btfSpec, + extInfo: btfExtInfo, + maps: make(map[string]*MapSpec), + vars: make(map[string]*VariableSpec), + kfuncs: make(map[string]*btf.Func), + ksyms: make(map[string]struct{}), + } + + symbols, err := f.Symbols() + if err != nil { + return nil, fmt.Errorf("load symbols: %v", err) + } + + ec.assignSymbols(symbols) + + if err := ec.loadRelocations(relSections, symbols); err != nil { + return nil, fmt.Errorf("load relocations: %w", err) + } + + if err := ec.loadMaps(); err != nil { + return nil, fmt.Errorf("load maps: %w", err) + } + + if err := ec.loadBTFMaps(); err != nil { + return nil, fmt.Errorf("load BTF maps: %w", err) + } + + if err := ec.loadDataSections(); err != nil { + return nil, fmt.Errorf("load data sections: %w", err) + } + + if err := ec.loadKconfigSection(); err != nil { + return nil, fmt.Errorf("load virtual .kconfig section: %w", err) + } + + if err := ec.loadKsymsSection(); err != nil { + return nil, fmt.Errorf("load virtual .ksyms section: %w", err) + } + + // Finally, collect programs and link them. + progs, err := ec.loadProgramSections() + if err != nil { + return nil, fmt.Errorf("load programs: %w", err) + } + + return &CollectionSpec{ec.maps, progs, ec.vars, btfSpec, ec.ByteOrder}, nil +} + +func loadLicense(sec *elf.Section) (string, error) { + if sec == nil { + return "", nil + } + + data, err := sec.Data() + if err != nil { + return "", fmt.Errorf("section %s: %v", sec.Name, err) + } + return string(bytes.TrimRight(data, "\000")), nil +} + +func loadVersion(sec *elf.Section, bo binary.ByteOrder) (uint32, error) { + if sec == nil { + return 0, nil + } + + var version uint32 + if err := binary.Read(sec.Open(), bo, &version); err != nil { + return 0, fmt.Errorf("section %s: %v", sec.Name, err) + } + return version, nil +} + +func isDataSection(name string) bool { + return name == ".bss" || strings.HasPrefix(name, ".data") || strings.HasPrefix(name, ".rodata") +} + +func isConstantDataSection(name string) bool { + return strings.HasPrefix(name, ".rodata") +} + +func isKconfigSection(name string) bool { + return name == ".kconfig" +} + +type elfSectionKind int + +const ( + undefSection elfSectionKind = iota + mapSection + btfMapSection + programSection + dataSection +) + +type elfSection struct { + *elf.Section + kind elfSectionKind + // Offset from the start of the section to a symbol + symbols map[uint64]elf.Symbol + // Offset from the start of the section to a relocation, which points at + // a symbol in another section. + relocations map[uint64]elf.Symbol + // The number of relocations pointing at this section. + references int +} + +func newElfSection(section *elf.Section, kind elfSectionKind) *elfSection { + return &elfSection{ + section, + kind, + make(map[uint64]elf.Symbol), + make(map[uint64]elf.Symbol), + 0, + } +} + +// assignSymbols takes a list of symbols and assigns them to their +// respective sections, indexed by name. +func (ec *elfCode) assignSymbols(symbols []elf.Symbol) { + for _, symbol := range symbols { + symType := elf.ST_TYPE(symbol.Info) + symSection := ec.sections[symbol.Section] + if symSection == nil { + continue + } + + // Anonymous symbols only occur in debug sections which we don't process + // relocations for. Anonymous symbols are not referenced from other sections. + if symbol.Name == "" { + continue + } + + // Older versions of LLVM don't tag symbols correctly, so keep + // all NOTYPE ones. + switch symSection.kind { + case mapSection, btfMapSection, dataSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_OBJECT { + continue + } + case programSection: + if symType != elf.STT_NOTYPE && symType != elf.STT_FUNC { + continue + } + // LLVM emits LBB_ (Local Basic Block) symbols that seem to be jump + // targets within sections, but BPF has no use for them. + if symType == elf.STT_NOTYPE && elf.ST_BIND(symbol.Info) == elf.STB_LOCAL && + strings.HasPrefix(symbol.Name, "LBB") { + continue + } + // Only collect symbols that occur in program/maps/data sections. + default: + continue + } + + symSection.symbols[symbol.Value] = symbol + } +} + +// loadRelocations iterates .rel* sections and extracts relocation entries for +// sections of interest. Makes sure relocations point at valid sections. +func (ec *elfCode) loadRelocations(relSections map[elf.SectionIndex]*elf.Section, symbols []elf.Symbol) error { + for idx, relSection := range relSections { + section := ec.sections[idx] + if section == nil { + continue + } + + rels, err := ec.loadSectionRelocations(relSection, symbols) + if err != nil { + return fmt.Errorf("relocation for section %q: %w", section.Name, err) + } + + for _, rel := range rels { + target := ec.sections[rel.Section] + if target == nil { + return fmt.Errorf("section %q: reference to %q in section %s: %w", section.Name, rel.Name, rel.Section, ErrNotSupported) + } + + target.references++ + } + + section.relocations = rels + } + + return nil +} + +// loadProgramSections iterates ec's sections and emits a ProgramSpec +// for each function it finds. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadProgramSections() (map[string]*ProgramSpec, error) { + + progs := make(map[string]*ProgramSpec) + + // Generate a ProgramSpec for each function found in each program section. + var export []string + for _, sec := range ec.sections { + if sec.kind != programSection { + continue + } + + if len(sec.symbols) == 0 { + return nil, fmt.Errorf("section %v: missing symbols", sec.Name) + } + + funcs, err := ec.loadFunctions(sec) + if err != nil { + return nil, fmt.Errorf("section %v: %w", sec.Name, err) + } + + progType, attachType, progFlags, attachTo := getProgType(sec.Name) + + for name, insns := range funcs { + spec := &ProgramSpec{ + Name: name, + Type: progType, + Flags: progFlags, + AttachType: attachType, + AttachTo: attachTo, + SectionName: sec.Name, + License: ec.license, + KernelVersion: ec.version, + Instructions: insns, + ByteOrder: ec.ByteOrder, + } + + // Function names must be unique within a single ELF blob. + if progs[name] != nil { + return nil, fmt.Errorf("duplicate program name %s", name) + } + progs[name] = spec + + if spec.SectionName != ".text" { + export = append(export, name) + } + } + } + + flattenPrograms(progs, export) + + // Hide programs (e.g. library functions) that were not explicitly emitted + // to an ELF section. These could be exposed in a separate CollectionSpec + // field later to allow them to be modified. + for n, p := range progs { + if p.SectionName == ".text" { + delete(progs, n) + } + } + + return progs, nil +} + +// loadFunctions extracts instruction streams from the given program section +// starting at each symbol in the section. The section's symbols must already +// be narrowed down to STT_NOTYPE (emitted by clang <8) or STT_FUNC. +// +// The resulting map is indexed by function name. +func (ec *elfCode) loadFunctions(section *elfSection) (map[string]asm.Instructions, error) { + r := bufio.NewReader(section.Open()) + + // Decode the section's instruction stream. + insns := make(asm.Instructions, 0, section.Size/asm.InstructionSize) + if err := insns.Unmarshal(r, ec.ByteOrder); err != nil { + return nil, fmt.Errorf("decoding instructions for section %s: %w", section.Name, err) + } + if len(insns) == 0 { + return nil, fmt.Errorf("no instructions found in section %s", section.Name) + } + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + offset := iter.Offset.Bytes() + + // Tag Symbol Instructions. + if sym, ok := section.symbols[offset]; ok { + *ins = ins.WithSymbol(sym.Name) + } + + // Apply any relocations for the current instruction. + // If no relocation is present, resolve any section-relative function calls. + if rel, ok := section.relocations[offset]; ok { + if err := ec.relocateInstruction(ins, rel); err != nil { + return nil, fmt.Errorf("offset %d: relocating instruction: %w", offset, err) + } + } else { + if err := referenceRelativeJump(ins, offset, section.symbols); err != nil { + return nil, fmt.Errorf("offset %d: resolving relative jump: %w", offset, err) + } + } + } + + if ec.extInfo != nil { + ec.extInfo.Assign(insns, section.Name) + } + + return splitSymbols(insns) +} + +// referenceRelativeJump turns a relative jump to another bpf subprogram within +// the same ELF section into a Reference Instruction. +// +// Up to LLVM 9, calls to subprograms within the same ELF section are sometimes +// encoded using relative jumps instead of relocation entries. These jumps go +// out of bounds of the current program, so their targets must be memoized +// before the section's instruction stream is split. +// +// The relative jump Constant is blinded to -1 and the target Symbol is set as +// the Instruction's Reference so it can be resolved by the linker. +func referenceRelativeJump(ins *asm.Instruction, offset uint64, symbols map[uint64]elf.Symbol) error { + if !ins.IsFunctionReference() || ins.Constant == -1 { + return nil + } + + tgt := jumpTarget(offset, *ins) + sym := symbols[tgt].Name + if sym == "" { + return fmt.Errorf("no jump target found at offset %d", tgt) + } + + *ins = ins.WithReference(sym) + ins.Constant = -1 + + return nil +} + +// jumpTarget takes ins' offset within an instruction stream (in bytes) +// and returns its absolute jump destination (in bytes) within the +// instruction stream. +func jumpTarget(offset uint64, ins asm.Instruction) uint64 { + // A relative jump instruction describes the amount of raw BPF instructions + // to jump, convert the offset into bytes. + dest := ins.Constant * asm.InstructionSize + + // The starting point of the jump is the end of the current instruction. + dest += int64(offset + asm.InstructionSize) + + if dest < 0 { + return 0 + } + + return uint64(dest) +} + +var errUnsupportedBinding = errors.New("unsupported binding") + +func (ec *elfCode) relocateInstruction(ins *asm.Instruction, rel elf.Symbol) error { + var ( + typ = elf.ST_TYPE(rel.Info) + bind = elf.ST_BIND(rel.Info) + name = rel.Name + ) + + target := ec.sections[rel.Section] + + switch target.kind { + case mapSection, btfMapSection: + if bind == elf.STB_LOCAL { + return fmt.Errorf("possible erroneous static qualifier on map definition: found reference to %q", name) + } + + if bind != elf.STB_GLOBAL { + return fmt.Errorf("map %q: %w: %s", name, errUnsupportedBinding, bind) + } + + if typ != elf.STT_OBJECT && typ != elf.STT_NOTYPE { + // STT_NOTYPE is generated on clang < 8 which doesn't tag + // relocations appropriately. + return fmt.Errorf("map load: incorrect relocation type %v", typ) + } + + ins.Src = asm.PseudoMapFD + + case dataSection: + var offset uint32 + switch typ { + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // This is really a reference to a static symbol, which clang doesn't + // emit a symbol table entry for. Instead it encodes the offset in + // the instruction itself. + offset = uint32(uint64(ins.Constant)) + + case elf.STT_OBJECT: + // LLVM 9 emits OBJECT-LOCAL symbols for anonymous constants. + if bind != elf.STB_GLOBAL && bind != elf.STB_LOCAL && bind != elf.STB_WEAK { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + offset = uint32(rel.Value) + + case elf.STT_NOTYPE: + // LLVM 7 emits NOTYPE-LOCAL symbols for anonymous constants. + if bind != elf.STB_LOCAL { + return fmt.Errorf("direct load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + offset = uint32(rel.Value) + + default: + return fmt.Errorf("incorrect relocation type %v for direct map load", typ) + } + + // We rely on using the name of the data section as the reference. It + // would be nicer to keep the real name in case of an STT_OBJECT, but + // it's not clear how to encode that into Instruction. + name = target.Name + + // The kernel expects the offset in the second basic BPF instruction. + ins.Constant = int64(uint64(offset) << 32) + ins.Src = asm.PseudoMapValue + + case programSection: + switch opCode := ins.OpCode; { + case opCode.JumpOp() == asm.Call: + if ins.Src != asm.PseudoCall { + return fmt.Errorf("call: %s: incorrect source register", name) + } + + switch typ { + case elf.STT_NOTYPE, elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("call: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // The function we want to call is in the indicated section, + // at the offset encoded in the instruction itself. Reverse + // the calculation to find the real function we're looking for. + // A value of -1 references the first instruction in the section. + offset := int64(int32(ins.Constant)+1) * asm.InstructionSize + sym, ok := target.symbols[uint64(offset)] + if !ok { + return fmt.Errorf("call: no symbol at offset %d", offset) + } + + name = sym.Name + ins.Constant = -1 + + default: + return fmt.Errorf("call: %s: invalid symbol type %s", name, typ) + } + case opCode.IsDWordLoad(): + switch typ { + case elf.STT_FUNC: + if bind != elf.STB_GLOBAL { + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + case elf.STT_SECTION: + if bind != elf.STB_LOCAL { + return fmt.Errorf("load: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + // ins.Constant already contains the offset in bytes from the + // start of the section. This is different than a call to a + // static function. + + default: + return fmt.Errorf("load: %s: invalid symbol type %s", name, typ) + } + + sym, ok := target.symbols[uint64(ins.Constant)] + if !ok { + return fmt.Errorf("load: no symbol at offset %d", ins.Constant) + } + + name = sym.Name + ins.Constant = -1 + ins.Src = asm.PseudoFunc + + default: + return fmt.Errorf("neither a call nor a load instruction: %v", ins) + } + + // The Undefined section is used for 'virtual' symbols that aren't backed by + // an ELF section. This includes symbol references from inline asm, forward + // function declarations, as well as extern kfunc declarations using __ksym + // and extern kconfig variables declared using __kconfig. + case undefSection: + if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + if typ != elf.STT_NOTYPE { + return fmt.Errorf("asm relocation: %s: unsupported type %s", name, typ) + } + + kf := ec.kfuncs[name] + _, ks := ec.ksyms[name] + + switch { + // If a Call / DWordLoad instruction is found and the datasec has a btf.Func with a Name + // that matches the symbol name we mark the instruction as a referencing a kfunc. + case kf != nil && ins.OpCode.JumpOp() == asm.Call: + ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{ + Func: kf, + Binding: bind, + }) + + ins.Src = asm.PseudoKfuncCall + ins.Constant = -1 + + case kf != nil && ins.OpCode.IsDWordLoad(): + ins.Metadata.Set(kfuncMetaKey{}, &kfuncMeta{ + Func: kf, + Binding: bind, + }) + + ins.Constant = 0 + + case ks && ins.OpCode.IsDWordLoad(): + if bind != elf.STB_GLOBAL && bind != elf.STB_WEAK { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + ins.Metadata.Set(ksymMetaKey{}, &ksymMeta{ + Binding: bind, + Name: name, + }) + + // If no kconfig map is found, this must be a symbol reference from inline + // asm (see testdata/loader.c:asm_relocation()) or a call to a forward + // function declaration (see testdata/fwd_decl.c). Don't interfere, These + // remain standard symbol references. + // extern __kconfig reads are represented as dword loads that need to be + // rewritten to pseudo map loads from .kconfig. If the map is present, + // require it to contain the symbol to disambiguate between inline asm + // relos and kconfigs. + case ec.kconfig != nil && ins.OpCode.IsDWordLoad(): + if bind != elf.STB_GLOBAL { + return fmt.Errorf("asm relocation: %s: %w: %s", name, errUnsupportedBinding, bind) + } + + for _, vsi := range ec.kconfig.Value.(*btf.Datasec).Vars { + if vsi.Type.(*btf.Var).Name != rel.Name { + continue + } + + ins.Src = asm.PseudoMapValue + ins.Metadata.Set(kconfigMetaKey{}, &kconfigMeta{ec.kconfig, vsi.Offset}) + return nil + } + + return fmt.Errorf("kconfig %s not found in .kconfig", rel.Name) + } + + default: + return fmt.Errorf("relocation to %q: %w", target.Name, ErrNotSupported) + } + + *ins = ins.WithReference(name) + return nil +} + +func (ec *elfCode) loadMaps() error { + for _, sec := range ec.sections { + if sec.kind != mapSection { + continue + } + + nSym := len(sec.symbols) + if nSym == 0 { + return fmt.Errorf("section %v: no symbols", sec.Name) + } + + if sec.Size%uint64(nSym) != 0 { + return fmt.Errorf("section %v: map descriptors are not of equal size", sec.Name) + } + + var ( + r = bufio.NewReader(sec.Open()) + size = sec.Size / uint64(nSym) + ) + for i, offset := 0, uint64(0); i < nSym; i, offset = i+1, offset+size { + mapSym, ok := sec.symbols[offset] + if !ok { + return fmt.Errorf("section %s: missing symbol for map at offset %d", sec.Name, offset) + } + + mapName := mapSym.Name + if ec.maps[mapName] != nil { + return fmt.Errorf("section %v: map %v already exists", sec.Name, mapSym) + } + + lr := io.LimitReader(r, int64(size)) + + spec := MapSpec{ + Name: SanitizeName(mapName, -1), + } + switch { + case binary.Read(lr, ec.ByteOrder, &spec.Type) != nil: + return fmt.Errorf("map %s: missing type", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.KeySize) != nil: + return fmt.Errorf("map %s: missing key size", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.ValueSize) != nil: + return fmt.Errorf("map %s: missing value size", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.MaxEntries) != nil: + return fmt.Errorf("map %s: missing max entries", mapName) + case binary.Read(lr, ec.ByteOrder, &spec.Flags) != nil: + return fmt.Errorf("map %s: missing flags", mapName) + } + + extra, err := io.ReadAll(lr) + if err != nil { + return fmt.Errorf("map %s: reading map tail: %w", mapName, err) + } + if len(extra) > 0 { + spec.Extra = bytes.NewReader(extra) + } + + ec.maps[mapName] = &spec + } + } + + return nil +} + +// loadBTFMaps iterates over all ELF sections marked as BTF map sections +// (like .maps) and parses them into MapSpecs. Dump the .maps section and +// any relocations with `readelf -x .maps -r `. +func (ec *elfCode) loadBTFMaps() error { + for _, sec := range ec.sections { + if sec.kind != btfMapSection { + continue + } + + if ec.btf == nil { + return fmt.Errorf("missing BTF") + } + + // Each section must appear as a DataSec in the ELF's BTF blob. + var ds *btf.Datasec + if err := ec.btf.TypeByName(sec.Name, &ds); err != nil { + return fmt.Errorf("cannot find section '%s' in BTF: %w", sec.Name, err) + } + + // Open a Reader to the ELF's raw section bytes so we can assert that all + // of them are zero on a per-map (per-Var) basis. For now, the section's + // sole purpose is to receive relocations, so all must be zero. + rs := sec.Open() + + for _, vs := range ds.Vars { + // BPF maps are declared as and assigned to global variables, + // so iterate over each Var in the DataSec and validate their types. + v, ok := vs.Type.(*btf.Var) + if !ok { + return fmt.Errorf("section %v: unexpected type %s", sec.Name, vs.Type) + } + name := string(v.Name) + + // The BTF metadata for each Var contains the full length of the map + // declaration, so read the corresponding amount of bytes from the ELF. + // This way, we can pinpoint which map declaration contains unexpected + // (and therefore unsupported) data. + _, err := io.Copy(internal.DiscardZeroes{}, io.LimitReader(rs, int64(vs.Size))) + if err != nil { + return fmt.Errorf("section %v: map %s: initializing BTF map definitions: %w", sec.Name, name, internal.ErrNotSupported) + } + + if ec.maps[name] != nil { + return fmt.Errorf("section %v: map %s already exists", sec.Name, name) + } + + // Each Var representing a BTF map definition contains a Struct. + mapStruct, ok := btf.UnderlyingType(v.Type).(*btf.Struct) + if !ok { + return fmt.Errorf("expected struct, got %s", v.Type) + } + + mapSpec, err := mapSpecFromBTF(sec, &vs, mapStruct, ec.btf, name, false) + if err != nil { + return fmt.Errorf("map %v: %w", name, err) + } + + ec.maps[name] = mapSpec + } + + // Drain the ELF section reader to make sure all bytes are accounted for + // with BTF metadata. + i, err := io.Copy(io.Discard, rs) + if err != nil { + return fmt.Errorf("section %v: unexpected error reading remainder of ELF section: %w", sec.Name, err) + } + if i > 0 { + return fmt.Errorf("section %v: %d unexpected remaining bytes in ELF section, invalid BTF?", sec.Name, i) + } + } + + return nil +} + +// mapSpecFromBTF produces a MapSpec based on a btf.Struct def representing +// a BTF map definition. The name and spec arguments will be copied to the +// resulting MapSpec, and inner must be true on any recursive invocations. +func mapSpecFromBTF(es *elfSection, vs *btf.VarSecinfo, def *btf.Struct, spec *btf.Spec, name string, inner bool) (*MapSpec, error) { + var ( + key, value btf.Type + keySize, valueSize uint32 + mapType MapType + flags, maxEntries uint32 + pinType PinType + innerMapSpec *MapSpec + contents []MapKV + err error + ) + + for i, member := range def.Members { + switch member.Name { + case "type": + mt, err := uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get type: %w", err) + } + mapType = MapType(mt) + + case "map_flags": + flags, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF map flags: %w", err) + } + + case "max_entries": + maxEntries, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF map max entries: %w", err) + } + + case "key": + if keySize != 0 { + return nil, errors.New("both key and key_size given") + } + + pk, ok := member.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("key type is not a pointer: %T", member.Type) + } + + key = pk.Target + + size, err := btf.Sizeof(pk.Target) + if err != nil { + return nil, fmt.Errorf("can't get size of BTF key: %w", err) + } + + keySize = uint32(size) + + case "value": + if valueSize != 0 { + return nil, errors.New("both value and value_size given") + } + + vk, ok := member.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("value type is not a pointer: %T", member.Type) + } + + value = vk.Target + + size, err := btf.Sizeof(vk.Target) + if err != nil { + return nil, fmt.Errorf("can't get size of BTF value: %w", err) + } + + valueSize = uint32(size) + + case "key_size": + // Key needs to be nil and keySize needs to be 0 for key_size to be + // considered a valid member. + if key != nil || keySize != 0 { + return nil, errors.New("both key and key_size given") + } + + keySize, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF key size: %w", err) + } + + case "value_size": + // Value needs to be nil and valueSize needs to be 0 for value_size to be + // considered a valid member. + if value != nil || valueSize != 0 { + return nil, errors.New("both value and value_size given") + } + + valueSize, err = uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get BTF value size: %w", err) + } + + case "pinning": + if inner { + return nil, errors.New("inner maps can't be pinned") + } + + pinning, err := uintFromBTF(member.Type) + if err != nil { + return nil, fmt.Errorf("can't get pinning: %w", err) + } + + pinType = PinType(pinning) + + case "values": + // The 'values' field in BTF map definitions is used for declaring map + // value types that are references to other BPF objects, like other maps + // or programs. It is always expected to be an array of pointers. + if i != len(def.Members)-1 { + return nil, errors.New("'values' must be the last member in a BTF map definition") + } + + if valueSize != 0 && valueSize != 4 { + return nil, errors.New("value_size must be 0 or 4") + } + valueSize = 4 + + valueType, err := resolveBTFArrayMacro(member.Type) + if err != nil { + return nil, fmt.Errorf("can't resolve type of member 'values': %w", err) + } + + switch t := valueType.(type) { + case *btf.Struct: + // The values member pointing to an array of structs means we're expecting + // a map-in-map declaration. + if mapType != ArrayOfMaps && mapType != HashOfMaps { + return nil, errors.New("outer map needs to be an array or a hash of maps") + } + if inner { + return nil, fmt.Errorf("nested inner maps are not supported") + } + + // This inner map spec is used as a map template, but it needs to be + // created as a traditional map before it can be used to do so. + // libbpf names the inner map template '.inner', but we + // opted for _inner to simplify validation logic. (dots only supported + // on kernels 5.2 and up) + // Pass the BTF spec from the parent object, since both parent and + // child must be created from the same BTF blob (on kernels that support BTF). + innerMapSpec, err = mapSpecFromBTF(es, vs, t, spec, name+"_inner", true) + if err != nil { + return nil, fmt.Errorf("can't parse BTF map definition of inner map: %w", err) + } + + case *btf.FuncProto: + // The values member contains an array of function pointers, meaning an + // autopopulated PROG_ARRAY. + if mapType != ProgramArray { + return nil, errors.New("map needs to be a program array") + } + + default: + return nil, fmt.Errorf("unsupported value type %q in 'values' field", t) + } + + contents, err = resolveBTFValuesContents(es, vs, member) + if err != nil { + return nil, fmt.Errorf("resolving values contents: %w", err) + } + + case "map_extra": + return nil, fmt.Errorf("BTF map definition: field %s: %w", member.Name, ErrNotSupported) + + default: + return nil, fmt.Errorf("unrecognized field %s in BTF map definition", member.Name) + } + } + + // Some maps don't support value sizes, but annotating their map definitions + // with __type macros can still be useful, especially to let bpf2go generate + // type definitions for them. + if value != nil && !mapType.canHaveValueSize() { + valueSize = 0 + } + + return &MapSpec{ + Name: SanitizeName(name, -1), + Type: MapType(mapType), + KeySize: keySize, + ValueSize: valueSize, + MaxEntries: maxEntries, + Flags: flags, + Key: key, + Value: value, + Pinning: pinType, + InnerMap: innerMapSpec, + Contents: contents, + }, nil +} + +// uintFromBTF resolves the __uint macro, which is a pointer to a sized +// array, e.g. for int (*foo)[10], this function will return 10. +func uintFromBTF(typ btf.Type) (uint32, error) { + ptr, ok := typ.(*btf.Pointer) + if !ok { + return 0, fmt.Errorf("not a pointer: %v", typ) + } + + arr, ok := ptr.Target.(*btf.Array) + if !ok { + return 0, fmt.Errorf("not a pointer to array: %v", typ) + } + + return arr.Nelems, nil +} + +// resolveBTFArrayMacro resolves the __array macro, which declares an array +// of pointers to a given type. This function returns the target Type of +// the pointers in the array. +func resolveBTFArrayMacro(typ btf.Type) (btf.Type, error) { + arr, ok := typ.(*btf.Array) + if !ok { + return nil, fmt.Errorf("not an array: %v", typ) + } + + ptr, ok := arr.Type.(*btf.Pointer) + if !ok { + return nil, fmt.Errorf("not an array of pointers: %v", typ) + } + + return ptr.Target, nil +} + +// resolveBTFValuesContents resolves relocations into ELF sections belonging +// to btf.VarSecinfo's. This can be used on the 'values' member in BTF map +// definitions to extract static declarations of map contents. +func resolveBTFValuesContents(es *elfSection, vs *btf.VarSecinfo, member btf.Member) ([]MapKV, error) { + // The elements of a .values pointer array are not encoded in BTF. + // Instead, relocations are generated into each array index. + // However, it's possible to leave certain array indices empty, so all + // indices' offsets need to be checked for emitted relocations. + + // The offset of the 'values' member within the _struct_ (in bits) + // is the starting point of the array. Convert to bytes. Add VarSecinfo + // offset to get the absolute position in the ELF blob. + start := member.Offset.Bytes() + vs.Offset + // 'values' is encoded in BTF as a zero (variable) length struct + // member, and its contents run until the end of the VarSecinfo. + // Add VarSecinfo offset to get the absolute position in the ELF blob. + end := vs.Size + vs.Offset + // The size of an address in this section. This determines the width of + // an index in the array. + align := uint32(es.SectionHeader.Addralign) + + // Check if variable-length section is aligned. + if (end-start)%align != 0 { + return nil, errors.New("unaligned static values section") + } + elems := (end - start) / align + + if elems == 0 { + return nil, nil + } + + contents := make([]MapKV, 0, elems) + + // k is the array index, off is its corresponding ELF section offset. + for k, off := uint32(0), start; k < elems; k, off = k+1, off+align { + r, ok := es.relocations[uint64(off)] + if !ok { + continue + } + + // Relocation exists for the current offset in the ELF section. + // Emit a value stub based on the type of relocation to be replaced by + // a real fd later in the pipeline before populating the map. + // Map keys are encoded in MapKV entries, so empty array indices are + // skipped here. + switch t := elf.ST_TYPE(r.Info); t { + case elf.STT_FUNC: + contents = append(contents, MapKV{uint32(k), r.Name}) + case elf.STT_OBJECT: + contents = append(contents, MapKV{uint32(k), r.Name}) + default: + return nil, fmt.Errorf("unknown relocation type %v for symbol %s", t, r.Name) + } + } + + return contents, nil +} + +func (ec *elfCode) loadDataSections() error { + for _, sec := range ec.sections { + if sec.kind != dataSection { + continue + } + + // If a section has no references, it will be freed as soon as the + // Collection closes, so creating and populating it is wasteful. If it has + // no symbols, it is likely an ephemeral section used during compilation + // that wasn't sanitized by the bpf linker. (like .rodata.str1.1) + // + // No symbols means no VariableSpecs can be generated from it, making it + // pointless to emit a data section for. + if sec.references == 0 && len(sec.symbols) == 0 { + continue + } + + if sec.Size > math.MaxUint32 { + return fmt.Errorf("data section %s: contents exceed maximum size", sec.Name) + } + + mapSpec := &MapSpec{ + Name: SanitizeName(sec.Name, -1), + Type: Array, + KeySize: 4, + ValueSize: uint32(sec.Size), + MaxEntries: 1, + } + + if isConstantDataSection(sec.Name) { + mapSpec.Flags = sys.BPF_F_RDONLY_PROG + } + + switch sec.Type { + // Only open the section if we know there's actual data to be read. + case elf.SHT_PROGBITS: + data, err := sec.Data() + if err != nil { + return fmt.Errorf("data section %s: can't get contents: %w", sec.Name, err) + } + mapSpec.Contents = []MapKV{{uint32(0), data}} + + case elf.SHT_NOBITS: + // NOBITS sections like .bss contain only zeroes and are not allocated in + // the ELF. Since data sections are Arrays, the kernel can preallocate + // them. Don't attempt reading zeroes from the ELF, instead allocate the + // zeroed memory to support getting and setting VariableSpecs for sections + // like .bss. + mapSpec.Contents = []MapKV{{uint32(0), make([]byte, sec.Size)}} + + default: + return fmt.Errorf("data section %s: unknown section type %s", sec.Name, sec.Type) + } + + for off, sym := range sec.symbols { + // Skip symbols marked with the 'hidden' attribute. + if elf.ST_VISIBILITY(sym.Other) == elf.STV_HIDDEN || + elf.ST_VISIBILITY(sym.Other) == elf.STV_INTERNAL { + continue + } + + // Only accept symbols with global or weak bindings. The common + // alternative is STB_LOCAL, which are either function-scoped or declared + // 'static'. + if elf.ST_BIND(sym.Info) != elf.STB_GLOBAL && + elf.ST_BIND(sym.Info) != elf.STB_WEAK { + continue + } + + if ec.vars[sym.Name] != nil { + return fmt.Errorf("data section %s: duplicate variable %s", sec.Name, sym.Name) + } + + // Skip symbols starting with a dot, they are compiler-internal symbols + // emitted by clang 11 and earlier and are not cleaned up by the bpf + // compiler backend (e.g. symbols named .Lconstinit.1 in sections like + // .rodata.cst32). Variables in C cannot start with a dot, so filter these + // out. + if strings.HasPrefix(sym.Name, ".") { + continue + } + + ec.vars[sym.Name] = &VariableSpec{ + name: sym.Name, + offset: off, + size: sym.Size, + m: mapSpec, + } + } + + // It is possible for a data section to exist without a corresponding BTF Datasec + // if it only contains anonymous values like macro-defined arrays. + if ec.btf != nil { + var ds *btf.Datasec + if ec.btf.TypeByName(sec.Name, &ds) == nil { + // Assign the spec's key and BTF only if the Datasec lookup was successful. + mapSpec.Key = &btf.Void{} + mapSpec.Value = ds + + // Populate VariableSpecs with type information, if available. + for _, v := range ds.Vars { + name := v.Type.TypeName() + if name == "" { + return fmt.Errorf("data section %s: anonymous variable %v", sec.Name, v) + } + + vt, ok := v.Type.(*btf.Var) + if !ok { + return fmt.Errorf("data section %s: unexpected type %T for variable %s", sec.Name, v.Type, name) + } + + ev := ec.vars[name] + if ev == nil { + // Hidden symbols appear in the BTF Datasec but don't receive a VariableSpec. + continue + } + + if uint64(v.Offset) != ev.offset { + return fmt.Errorf("data section %s: variable %s datasec offset (%d) doesn't match ELF symbol offset (%d)", sec.Name, name, v.Offset, ev.offset) + } + + if uint64(v.Size) != ev.size { + return fmt.Errorf("data section %s: variable %s size in datasec (%d) doesn't match ELF symbol size (%d)", sec.Name, name, v.Size, ev.size) + } + + // Decouple the Var in the VariableSpec from the underlying DataSec in + // the MapSpec to avoid modifications from affecting map loads later on. + ev.t = btf.Copy(vt).(*btf.Var) + } + } + } + + ec.maps[sec.Name] = mapSpec + } + + return nil +} + +// loadKconfigSection handles the 'virtual' Datasec .kconfig that doesn't +// have a corresponding ELF section and exist purely in BTF. +func (ec *elfCode) loadKconfigSection() error { + if ec.btf == nil { + return nil + } + + var ds *btf.Datasec + err := ec.btf.TypeByName(".kconfig", &ds) + if errors.Is(err, btf.ErrNotFound) { + return nil + } + if err != nil { + return err + } + + if ds.Size == 0 { + return errors.New("zero-length .kconfig") + } + + ec.kconfig = &MapSpec{ + Name: ".kconfig", + Type: Array, + KeySize: uint32(4), + ValueSize: ds.Size, + MaxEntries: 1, + Flags: sys.BPF_F_RDONLY_PROG, + Key: &btf.Int{Size: 4}, + Value: ds, + } + + return nil +} + +// loadKsymsSection handles the 'virtual' Datasec .ksyms that doesn't +// have a corresponding ELF section and exist purely in BTF. +func (ec *elfCode) loadKsymsSection() error { + if ec.btf == nil { + return nil + } + + var ds *btf.Datasec + err := ec.btf.TypeByName(".ksyms", &ds) + if errors.Is(err, btf.ErrNotFound) { + return nil + } + if err != nil { + return err + } + + for _, v := range ds.Vars { + switch t := v.Type.(type) { + case *btf.Func: + ec.kfuncs[t.TypeName()] = t + case *btf.Var: + ec.ksyms[t.TypeName()] = struct{}{} + default: + return fmt.Errorf("unexpected variable type in .ksyms: %T", v) + } + } + + return nil +} + +type libbpfElfSectionDef struct { + pattern string + programType sys.ProgType + attachType sys.AttachType + flags libbpfElfSectionFlag +} + +type libbpfElfSectionFlag uint32 + +// The values correspond to enum sec_def_flags in libbpf. +const ( + _SEC_NONE libbpfElfSectionFlag = 0 + + _SEC_EXP_ATTACH_OPT libbpfElfSectionFlag = 1 << (iota - 1) + _SEC_ATTACHABLE + _SEC_ATTACH_BTF + _SEC_SLEEPABLE + _SEC_XDP_FRAGS + _SEC_USDT + + // Ignore any present extra in order to preserve backwards compatibility + // with earlier versions of the library. + ignoreExtra + + _SEC_ATTACHABLE_OPT = _SEC_ATTACHABLE | _SEC_EXP_ATTACH_OPT +) + +func init() { + // Compatibility with older versions of the library. + // We prepend libbpf definitions since they contain a prefix match + // for "xdp". + elfSectionDefs = append([]libbpfElfSectionDef{ + {"xdp.frags/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS | ignoreExtra}, + {"xdp.frags_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp_devmap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, 0}, + {"xdp.frags_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp_cpumap/", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, 0}, + // This has been in the library since the beginning of time. Not sure + // where it came from. + {"seccomp", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + }, elfSectionDefs...) +} + +func getProgType(sectionName string) (ProgramType, AttachType, uint32, string) { + // Skip optional program marking for now. + sectionName = strings.TrimPrefix(sectionName, "?") + + for _, t := range elfSectionDefs { + extra, ok := matchSectionName(sectionName, t.pattern) + if !ok { + continue + } + + programType := ProgramType(t.programType) + attachType := AttachType(t.attachType) + + var flags uint32 + if t.flags&_SEC_SLEEPABLE > 0 { + flags |= sys.BPF_F_SLEEPABLE + } + if t.flags&_SEC_XDP_FRAGS > 0 { + flags |= sys.BPF_F_XDP_HAS_FRAGS + } + if t.flags&_SEC_EXP_ATTACH_OPT > 0 { + if programType == XDP { + // The library doesn't yet have code to fallback to not specifying + // attach type. Only do this for XDP since we've enforced correct + // attach type for all other program types. + attachType = AttachNone + } + } + if t.flags&ignoreExtra > 0 { + extra = "" + } + + return programType, attachType, flags, extra + } + + return UnspecifiedProgram, AttachNone, 0, "" +} + +// matchSectionName checks a section name against a pattern. +// +// It's behaviour mirrors that of libbpf's sec_def_matches. +func matchSectionName(sectionName, pattern string) (extra string, found bool) { + have, extra, found := strings.Cut(sectionName, "/") + want := strings.TrimRight(pattern, "+/") + + if strings.HasSuffix(pattern, "/") { + // Section name must have a slash and extra may be empty. + return extra, have == want && found + } else if strings.HasSuffix(pattern, "+") { + // Section name may have a slash and extra may be empty. + return extra, have == want + } + + // Section name must have a prefix. extra is ignored. + return "", strings.HasPrefix(sectionName, pattern) +} + +func (ec *elfCode) loadSectionRelocations(sec *elf.Section, symbols []elf.Symbol) (map[uint64]elf.Symbol, error) { + rels := make(map[uint64]elf.Symbol) + + if sec.Entsize < 16 { + return nil, fmt.Errorf("section %s: relocations are less than 16 bytes", sec.Name) + } + + r := bufio.NewReader(sec.Open()) + for off := uint64(0); off < sec.Size; off += sec.Entsize { + ent := io.LimitReader(r, int64(sec.Entsize)) + + var rel elf.Rel64 + if binary.Read(ent, ec.ByteOrder, &rel) != nil { + return nil, fmt.Errorf("can't parse relocation at offset %v", off) + } + + symNo := int(elf.R_SYM64(rel.Info) - 1) + if symNo >= len(symbols) { + return nil, fmt.Errorf("offset %d: symbol %d doesn't exist", off, symNo) + } + + symbol := symbols[symNo] + rels[rel.Off] = symbol + } + + return rels, nil +} diff --git a/vendor/github.com/cilium/ebpf/elf_sections.go b/vendor/github.com/cilium/ebpf/elf_sections.go new file mode 100644 index 0000000000..4b58251d9a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/elf_sections.go @@ -0,0 +1,109 @@ +// Code generated by internal/cmd/gensections.awk; DO NOT EDIT. + +package ebpf + +// Code in this file is derived from libbpf, available under BSD-2-Clause. + +import "github.com/cilium/ebpf/internal/sys" + +var elfSectionDefs = []libbpfElfSectionDef{ + {"socket", sys.BPF_PROG_TYPE_SOCKET_FILTER, 0, _SEC_NONE}, + {"sk_reuseport/migrate", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, _SEC_ATTACHABLE}, + {"sk_reuseport", sys.BPF_PROG_TYPE_SK_REUSEPORT, sys.BPF_SK_REUSEPORT_SELECT, _SEC_ATTACHABLE}, + {"kprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, + {"kretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uretprobe+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"uretprobe.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_SLEEPABLE}, + {"kprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"kretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_KPROBE_MULTI, _SEC_NONE}, + {"uprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uretprobe.multi+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_NONE}, + {"uprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"uretprobe.multi.s+", sys.BPF_PROG_TYPE_KPROBE, sys.BPF_TRACE_UPROBE_MULTI, _SEC_SLEEPABLE}, + {"ksyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"kretsyscall+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_NONE}, + {"usdt+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT}, + {"usdt.s+", sys.BPF_PROG_TYPE_KPROBE, 0, _SEC_USDT | _SEC_SLEEPABLE}, + {"tc/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE}, + {"tc/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE}, + {"tcx/ingress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_INGRESS, _SEC_NONE}, + {"tcx/egress", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_TCX_EGRESS, _SEC_NONE}, + {"tc", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE}, + {"classifier", sys.BPF_PROG_TYPE_SCHED_CLS, 0, _SEC_NONE}, + {"action", sys.BPF_PROG_TYPE_SCHED_ACT, 0, _SEC_NONE}, + {"netkit/primary", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PRIMARY, _SEC_NONE}, + {"netkit/peer", sys.BPF_PROG_TYPE_SCHED_CLS, sys.BPF_NETKIT_PEER, _SEC_NONE}, + {"tracepoint+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE}, + {"tp+", sys.BPF_PROG_TYPE_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tracepoint+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tp+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT, 0, _SEC_NONE}, + {"raw_tracepoint.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE}, + {"raw_tp.w+", sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, 0, _SEC_NONE}, + {"tp_btf+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_RAW_TP, _SEC_ATTACH_BTF}, + {"fentry+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF}, + {"fmod_ret+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF}, + {"fexit+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF}, + {"fentry.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FENTRY, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"fmod_ret.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_MODIFY_RETURN, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"fexit.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_FEXIT, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"freplace+", sys.BPF_PROG_TYPE_EXT, 0, _SEC_ATTACH_BTF}, + {"lsm+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF}, + {"lsm.s+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_MAC, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"lsm_cgroup+", sys.BPF_PROG_TYPE_LSM, sys.BPF_LSM_CGROUP, _SEC_ATTACH_BTF}, + {"iter+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF}, + {"iter.s+", sys.BPF_PROG_TYPE_TRACING, sys.BPF_TRACE_ITER, _SEC_ATTACH_BTF | _SEC_SLEEPABLE}, + {"syscall", sys.BPF_PROG_TYPE_SYSCALL, 0, _SEC_SLEEPABLE}, + {"xdp.frags/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_XDP_FRAGS}, + {"xdp/devmap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_DEVMAP, _SEC_ATTACHABLE}, + {"xdp.frags/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_XDP_FRAGS}, + {"xdp/cpumap", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP_CPUMAP, _SEC_ATTACHABLE}, + {"xdp.frags", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_XDP_FRAGS}, + {"xdp", sys.BPF_PROG_TYPE_XDP, sys.BPF_XDP, _SEC_ATTACHABLE_OPT}, + {"perf_event", sys.BPF_PROG_TYPE_PERF_EVENT, 0, _SEC_NONE}, + {"lwt_in", sys.BPF_PROG_TYPE_LWT_IN, 0, _SEC_NONE}, + {"lwt_out", sys.BPF_PROG_TYPE_LWT_OUT, 0, _SEC_NONE}, + {"lwt_xmit", sys.BPF_PROG_TYPE_LWT_XMIT, 0, _SEC_NONE}, + {"lwt_seg6local", sys.BPF_PROG_TYPE_LWT_SEG6LOCAL, 0, _SEC_NONE}, + {"sockops", sys.BPF_PROG_TYPE_SOCK_OPS, sys.BPF_CGROUP_SOCK_OPS, _SEC_ATTACHABLE_OPT}, + {"sk_skb/stream_parser", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_PARSER, _SEC_ATTACHABLE_OPT}, + {"sk_skb/stream_verdict", sys.BPF_PROG_TYPE_SK_SKB, sys.BPF_SK_SKB_STREAM_VERDICT, _SEC_ATTACHABLE_OPT}, + {"sk_skb", sys.BPF_PROG_TYPE_SK_SKB, 0, _SEC_NONE}, + {"sk_msg", sys.BPF_PROG_TYPE_SK_MSG, sys.BPF_SK_MSG_VERDICT, _SEC_ATTACHABLE_OPT}, + {"lirc_mode2", sys.BPF_PROG_TYPE_LIRC_MODE2, sys.BPF_LIRC_MODE2, _SEC_ATTACHABLE_OPT}, + {"flow_dissector", sys.BPF_PROG_TYPE_FLOW_DISSECTOR, sys.BPF_FLOW_DISSECTOR, _SEC_ATTACHABLE_OPT}, + {"cgroup_skb/ingress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_INGRESS, _SEC_ATTACHABLE_OPT}, + {"cgroup_skb/egress", sys.BPF_PROG_TYPE_CGROUP_SKB, sys.BPF_CGROUP_INET_EGRESS, _SEC_ATTACHABLE_OPT}, + {"cgroup/skb", sys.BPF_PROG_TYPE_CGROUP_SKB, 0, _SEC_NONE}, + {"cgroup/sock_create", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE}, + {"cgroup/sock_release", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_RELEASE, _SEC_ATTACHABLE}, + {"cgroup/sock", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET_SOCK_CREATE, _SEC_ATTACHABLE_OPT}, + {"cgroup/post_bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET4_POST_BIND, _SEC_ATTACHABLE}, + {"cgroup/post_bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK, sys.BPF_CGROUP_INET6_POST_BIND, _SEC_ATTACHABLE}, + {"cgroup/bind4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_BIND, _SEC_ATTACHABLE}, + {"cgroup/bind6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_BIND, _SEC_ATTACHABLE}, + {"cgroup/connect4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/connect6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/connect_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_CONNECT, _SEC_ATTACHABLE}, + {"cgroup/sendmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/sendmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/sendmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_SENDMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP4_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UDP6_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/recvmsg_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_RECVMSG, _SEC_ATTACHABLE}, + {"cgroup/getpeername4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getpeername6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getpeername_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETPEERNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname4", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET4_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname6", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_INET6_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/getsockname_unix", sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR, sys.BPF_CGROUP_UNIX_GETSOCKNAME, _SEC_ATTACHABLE}, + {"cgroup/sysctl", sys.BPF_PROG_TYPE_CGROUP_SYSCTL, sys.BPF_CGROUP_SYSCTL, _SEC_ATTACHABLE}, + {"cgroup/getsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_GETSOCKOPT, _SEC_ATTACHABLE}, + {"cgroup/setsockopt", sys.BPF_PROG_TYPE_CGROUP_SOCKOPT, sys.BPF_CGROUP_SETSOCKOPT, _SEC_ATTACHABLE}, + {"cgroup/dev", sys.BPF_PROG_TYPE_CGROUP_DEVICE, sys.BPF_CGROUP_DEVICE, _SEC_ATTACHABLE_OPT}, + {"struct_ops+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_NONE}, + {"struct_ops.s+", sys.BPF_PROG_TYPE_STRUCT_OPS, 0, _SEC_SLEEPABLE}, + {"sk_lookup", sys.BPF_PROG_TYPE_SK_LOOKUP, sys.BPF_SK_LOOKUP, _SEC_ATTACHABLE}, + {"netfilter", sys.BPF_PROG_TYPE_NETFILTER, sys.BPF_NETFILTER, _SEC_NONE}, +} diff --git a/vendor/github.com/cilium/ebpf/features/doc.go b/vendor/github.com/cilium/ebpf/features/doc.go new file mode 100644 index 0000000000..acc57e3b1e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/doc.go @@ -0,0 +1,19 @@ +// Package features allows probing for BPF features available to the calling process. +// +// In general, the error return values from feature probes in this package +// all have the following semantics unless otherwise specified: +// +// err == nil: The feature is available. +// errors.Is(err, ebpf.ErrNotSupported): The feature is not available. +// err != nil: Any errors encountered during probe execution, wrapped. +// +// Note that the latter case may include false negatives, and that resource +// creation may succeed despite an error being returned. For example, some +// map and program types cannot reliably be probed and will return an +// inconclusive error. +// +// As a rule, only `nil` and `ebpf.ErrNotSupported` are conclusive. +// +// Probe results are cached by the library and persist throughout any changes +// to the process' environment, like capability changes. +package features diff --git a/vendor/github.com/cilium/ebpf/features/map.go b/vendor/github.com/cilium/ebpf/features/map.go new file mode 100644 index 0000000000..4b16e6af42 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/map.go @@ -0,0 +1,321 @@ +package features + +import ( + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HaveMapType probes the running kernel for the availability of the specified map type. +// +// See the package documentation for the meaning of the error return value. +func HaveMapType(mt ebpf.MapType) error { + return haveMapTypeMatrix.Result(mt) +} + +func probeCgroupStorageMap(mt sys.MapType) error { + // keySize needs to be sizeof(struct{u32 + u64}) = 12 (+ padding = 16) + // by using unsafe.Sizeof(int) we are making sure that this works on 32bit and 64bit archs + return createMap(&sys.MapCreateAttr{ + MapType: mt, + ValueSize: 4, + KeySize: uint32(8 + unsafe.Sizeof(int(0))), + MaxEntries: 0, + }) +} + +func probeStorageMap(mt sys.MapType) error { + // maxEntries needs to be 0 + // BPF_F_NO_PREALLOC needs to be set + // btf* fields need to be set + // see alloc_check for local_storage map types + err := createMap(&sys.MapCreateAttr{ + MapType: mt, + KeySize: 4, + ValueSize: 4, + MaxEntries: 0, + MapFlags: sys.BPF_F_NO_PREALLOC, + BtfKeyTypeId: 1, + BtfValueTypeId: 1, + BtfFd: ^uint32(0), + }) + if errors.Is(err, unix.EBADF) { + // Triggered by BtfFd. + return nil + } + return err +} + +func probeNestedMap(mt sys.MapType) error { + // assign invalid innerMapFd to pass validation check + // will return EBADF + err := probeMap(&sys.MapCreateAttr{ + MapType: mt, + InnerMapFd: ^uint32(0), + }) + if errors.Is(err, unix.EBADF) { + return nil + } + return err +} + +func probeMap(attr *sys.MapCreateAttr) error { + if attr.KeySize == 0 { + attr.KeySize = 4 + } + if attr.ValueSize == 0 { + attr.ValueSize = 4 + } + attr.MaxEntries = 1 + return createMap(attr) +} + +func createMap(attr *sys.MapCreateAttr) error { + fd, err := sys.MapCreate(attr) + if err == nil { + fd.Close() + return nil + } + + switch { + // EINVAL occurs when attempting to create a map with an unknown type. + // E2BIG occurs when MapCreateAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given map type. + case errors.Is(err, unix.EINVAL), errors.Is(err, unix.E2BIG): + return ebpf.ErrNotSupported + } + + return err +} + +var haveMapTypeMatrix = internal.FeatureMatrix[ebpf.MapType]{ + ebpf.Hash: {Version: "3.19"}, + ebpf.Array: {Version: "3.19"}, + ebpf.ProgramArray: {Version: "4.2"}, + ebpf.PerfEventArray: {Version: "4.3"}, + ebpf.PerCPUHash: {Version: "4.6"}, + ebpf.PerCPUArray: {Version: "4.6"}, + ebpf.StackTrace: { + Version: "4.6", + Fn: func() error { + return probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STACK_TRACE, + ValueSize: 8, // sizeof(uint64) + }) + }, + }, + ebpf.CGroupArray: {Version: "4.8"}, + ebpf.LRUHash: {Version: "4.10"}, + ebpf.LRUCPUHash: {Version: "4.10"}, + ebpf.LPMTrie: { + Version: "4.11", + Fn: func() error { + // keySize and valueSize need to be sizeof(struct{u32 + u8}) + 1 + padding = 8 + // BPF_F_NO_PREALLOC needs to be set + return probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_LPM_TRIE, + KeySize: 8, + ValueSize: 8, + MapFlags: sys.BPF_F_NO_PREALLOC, + }) + }, + }, + ebpf.ArrayOfMaps: { + Version: "4.12", + Fn: func() error { return probeNestedMap(sys.BPF_MAP_TYPE_ARRAY_OF_MAPS) }, + }, + ebpf.HashOfMaps: { + Version: "4.12", + Fn: func() error { return probeNestedMap(sys.BPF_MAP_TYPE_HASH_OF_MAPS) }, + }, + ebpf.DevMap: {Version: "4.14"}, + ebpf.SockMap: {Version: "4.14"}, + ebpf.CPUMap: {Version: "4.15"}, + ebpf.XSKMap: {Version: "4.18"}, + ebpf.SockHash: {Version: "4.18"}, + ebpf.CGroupStorage: { + Version: "4.19", + Fn: func() error { return probeCgroupStorageMap(sys.BPF_MAP_TYPE_CGROUP_STORAGE) }, + }, + ebpf.ReusePortSockArray: {Version: "4.19"}, + ebpf.PerCPUCGroupStorage: { + Version: "4.20", + Fn: func() error { return probeCgroupStorageMap(sys.BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) }, + }, + ebpf.Queue: { + Version: "4.20", + Fn: func() error { + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_QUEUE, + KeySize: 0, + ValueSize: 4, + MaxEntries: 1, + }) + }, + }, + ebpf.Stack: { + Version: "4.20", + Fn: func() error { + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STACK, + KeySize: 0, + ValueSize: 4, + MaxEntries: 1, + }) + }, + }, + ebpf.SkStorage: { + Version: "5.2", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_SK_STORAGE) }, + }, + ebpf.DevMapHash: {Version: "5.4"}, + ebpf.StructOpsMap: { + Version: "5.6", + Fn: func() error { + // StructOps requires setting a vmlinux type id, but id 1 will always + // resolve to some type of integer. This will cause ENOTSUPP. + err := probeMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_STRUCT_OPS, + BtfVmlinuxValueTypeId: 1, + }) + if errors.Is(err, sys.ENOTSUPP) { + // ENOTSUPP means the map type is at least known to the kernel. + return nil + } + return err + }, + }, + ebpf.RingBuf: { + Version: "5.8", + Fn: func() error { + // keySize and valueSize need to be 0 + // maxEntries needs to be power of 2 and PAGE_ALIGNED + return createMap(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_RINGBUF, + KeySize: 0, + ValueSize: 0, + MaxEntries: uint32(os.Getpagesize()), + }) + }, + }, + ebpf.InodeStorage: { + Version: "5.10", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_INODE_STORAGE) }, + }, + ebpf.TaskStorage: { + Version: "5.11", + Fn: func() error { return probeStorageMap(sys.BPF_MAP_TYPE_TASK_STORAGE) }, + }, +} + +func init() { + for mt, ft := range haveMapTypeMatrix { + ft.Name = mt.String() + if ft.Fn == nil { + // Avoid referring to the loop variable in the closure. + mt := sys.MapType(mt) + ft.Fn = func() error { return probeMap(&sys.MapCreateAttr{MapType: mt}) } + } + } +} + +// MapFlags document which flags may be feature probed. +type MapFlags uint32 + +// Flags which may be feature probed. +const ( + BPF_F_NO_PREALLOC = sys.BPF_F_NO_PREALLOC + BPF_F_RDONLY_PROG = sys.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = sys.BPF_F_WRONLY_PROG + BPF_F_MMAPABLE = sys.BPF_F_MMAPABLE + BPF_F_INNER_MAP = sys.BPF_F_INNER_MAP +) + +// HaveMapFlag probes the running kernel for the availability of the specified map flag. +// +// Returns an error if flag is not one of the flags declared in this package. +// See the package documentation for the meaning of the error return value. +func HaveMapFlag(flag MapFlags) (err error) { + return haveMapFlagsMatrix.Result(flag) +} + +func probeMapFlag(attr *sys.MapCreateAttr) error { + // For now, we do not check if the map type is supported because we only support + // probing for flags defined on arrays and hashes that are always supported. + // In the future, if we allow probing on flags defined on newer types, checking for map type + // support will be required. + if attr.MapType == sys.BPF_MAP_TYPE_UNSPEC { + attr.MapType = sys.BPF_MAP_TYPE_ARRAY + } + + attr.KeySize = 4 + attr.ValueSize = 4 + attr.MaxEntries = 1 + + fd, err := sys.MapCreate(attr) + if err == nil { + fd.Close() + } else if errors.Is(err, unix.EINVAL) { + // EINVAL occurs when attempting to create a map with an unknown type or an unknown flag. + err = ebpf.ErrNotSupported + } + + return err +} + +var haveMapFlagsMatrix = internal.FeatureMatrix[MapFlags]{ + BPF_F_NO_PREALLOC: { + Version: "4.6", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapType: sys.BPF_MAP_TYPE_HASH, + MapFlags: BPF_F_NO_PREALLOC, + }) + }, + }, + BPF_F_RDONLY_PROG: { + Version: "5.2", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_RDONLY_PROG, + }) + }, + }, + BPF_F_WRONLY_PROG: { + Version: "5.2", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_WRONLY_PROG, + }) + }, + }, + BPF_F_MMAPABLE: { + Version: "5.5", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_MMAPABLE, + }) + }, + }, + BPF_F_INNER_MAP: { + Version: "5.10", + Fn: func() error { + return probeMapFlag(&sys.MapCreateAttr{ + MapFlags: BPF_F_INNER_MAP, + }) + }, + }, +} + +func init() { + for mf, ft := range haveMapFlagsMatrix { + ft.Name = fmt.Sprint(mf) + } +} diff --git a/vendor/github.com/cilium/ebpf/features/misc.go b/vendor/github.com/cilium/ebpf/features/misc.go new file mode 100644 index 0000000000..c039020a95 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/misc.go @@ -0,0 +1,135 @@ +package features + +import ( + "errors" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +// HaveLargeInstructions probes the running kernel if more than 4096 instructions +// per program are supported. +// +// Upstream commit c04c0d2b968a ("bpf: increase complexity limit and maximum program size"). +// +// See the package documentation for the meaning of the error return value. +func HaveLargeInstructions() error { + return haveLargeInstructions() +} + +var haveLargeInstructions = internal.NewFeatureTest(">4096 instructions", func() error { + const maxInsns = 4096 + + insns := make(asm.Instructions, maxInsns, maxInsns+1) + for i := range insns { + insns[i] = asm.Mov.Imm(asm.R0, 1) + } + insns = append(insns, asm.Return()) + + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: insns, + }) +}, "5.2") + +// HaveBoundedLoops probes the running kernel if bounded loops are supported. +// +// Upstream commit 2589726d12a1 ("bpf: introduce bounded loops"). +// +// See the package documentation for the meaning of the error return value. +func HaveBoundedLoops() error { + return haveBoundedLoops() +} + +var haveBoundedLoops = internal.NewFeatureTest("bounded loops", func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 10), + asm.Sub.Imm(asm.R0, 1).WithSymbol("loop"), + asm.JNE.Imm(asm.R0, 0, "loop"), + asm.Return(), + }, + }) +}, "5.3") + +// HaveV2ISA probes the running kernel if instructions of the v2 ISA are supported. +// +// Upstream commit 92b31a9af73b ("bpf: add BPF_J{LT,LE,SLT,SLE} instructions"). +// +// See the package documentation for the meaning of the error return value. +func HaveV2ISA() error { + return haveV2ISA() +} + +var haveV2ISA = internal.NewFeatureTest("v2 ISA", func() error { + err := probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.JLT.Imm(asm.R0, 0, "exit"), + asm.Mov.Imm(asm.R0, 1), + asm.Return().WithSymbol("exit"), + }, + }) + // This sometimes bubbles up from the JIT on aarch64. + if errors.Is(err, sys.ENOTSUPP) { + return ebpf.ErrNotSupported + } + return err +}, "4.14") + +// HaveV3ISA probes the running kernel if instructions of the v3 ISA are supported. +// +// Upstream commit 092ed0968bb6 ("bpf: verifier support JMP32"). +// +// See the package documentation for the meaning of the error return value. +func HaveV3ISA() error { + return haveV3ISA() +} + +var haveV3ISA = internal.NewFeatureTest("v3 ISA", func() error { + err := probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.JLT.Imm32(asm.R0, 0, "exit"), + asm.Mov.Imm(asm.R0, 1), + asm.Return().WithSymbol("exit"), + }, + }) + // This sometimes bubbles up from the JIT on aarch64. + if errors.Is(err, sys.ENOTSUPP) { + return ebpf.ErrNotSupported + } + return err +}, "5.1") + +// HaveV4ISA probes the running kernel if instructions of the v4 ISA are supported. +// +// Upstream commit 1f9a1ea821ff ("bpf: Support new sign-extension load insns"). +// +// See the package documentation for the meaning of the error return value. +func HaveV4ISA() error { + return haveV4ISA() +} + +var haveV4ISA = internal.NewFeatureTest("v4 ISA", func() error { + err := probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SocketFilter, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.JEq.Imm(asm.R0, 1, "error"), + asm.LongJump("exit"), + asm.Mov.Imm(asm.R0, 1).WithSymbol("error"), + asm.Return().WithSymbol("exit"), + }, + }) + // This sometimes bubbles up from the JIT on aarch64. + if errors.Is(err, sys.ENOTSUPP) { + return ebpf.ErrNotSupported + } + return err +}, "6.6") diff --git a/vendor/github.com/cilium/ebpf/features/prog.go b/vendor/github.com/cilium/ebpf/features/prog.go new file mode 100644 index 0000000000..003bf00646 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/prog.go @@ -0,0 +1,300 @@ +package features + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// HaveProgType probes the running kernel for the availability of the specified program type. +// +// Deprecated: use HaveProgramType() instead. +var HaveProgType = HaveProgramType + +// HaveProgramType probes the running kernel for the availability of the specified program type. +// +// See the package documentation for the meaning of the error return value. +func HaveProgramType(pt ebpf.ProgramType) (err error) { + return haveProgramTypeMatrix.Result(pt) +} + +func probeProgram(spec *ebpf.ProgramSpec) error { + if spec.Instructions == nil { + spec.Instructions = asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + } + } + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err == nil { + prog.Close() + } + + switch { + // EINVAL occurs when attempting to create a program with an unknown type. + // E2BIG occurs when ProgLoadAttr contains non-zero bytes past the end + // of the struct known by the running kernel, meaning the kernel is too old + // to support the given prog type. + case errors.Is(err, unix.EINVAL), errors.Is(err, unix.E2BIG): + err = ebpf.ErrNotSupported + } + + return err +} + +var haveProgramTypeMatrix = internal.FeatureMatrix[ebpf.ProgramType]{ + ebpf.SocketFilter: {Version: "3.19"}, + ebpf.Kprobe: {Version: "4.1"}, + ebpf.SchedCLS: {Version: "4.1"}, + ebpf.SchedACT: {Version: "4.1"}, + ebpf.TracePoint: {Version: "4.7"}, + ebpf.XDP: {Version: "4.8"}, + ebpf.PerfEvent: {Version: "4.9"}, + ebpf.CGroupSKB: {Version: "4.10"}, + ebpf.CGroupSock: {Version: "4.10"}, + ebpf.LWTIn: {Version: "4.10"}, + ebpf.LWTOut: {Version: "4.10"}, + ebpf.LWTXmit: {Version: "4.10"}, + ebpf.SockOps: {Version: "4.13"}, + ebpf.SkSKB: {Version: "4.14"}, + ebpf.CGroupDevice: {Version: "4.15"}, + ebpf.SkMsg: {Version: "4.17"}, + ebpf.RawTracepoint: {Version: "4.17"}, + ebpf.CGroupSockAddr: { + Version: "4.17", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSockAddr, + AttachType: ebpf.AttachCGroupInet4Connect, + }) + }, + }, + ebpf.LWTSeg6Local: {Version: "4.18"}, + ebpf.LircMode2: {Version: "4.18"}, + ebpf.SkReuseport: {Version: "4.19"}, + ebpf.FlowDissector: {Version: "4.20"}, + ebpf.CGroupSysctl: {Version: "5.2"}, + ebpf.RawTracepointWritable: {Version: "5.2"}, + ebpf.CGroupSockopt: { + Version: "5.3", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSockopt, + AttachType: ebpf.AttachCGroupGetsockopt, + }) + }, + }, + ebpf.Tracing: { + Version: "5.5", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Tracing, + AttachType: ebpf.AttachTraceFEntry, + AttachTo: "bpf_init", + }) + }, + }, + ebpf.StructOps: { + Version: "5.6", + Fn: func() error { + err := probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.StructOps, + License: "GPL", + }) + if errors.Is(err, sys.ENOTSUPP) { + // ENOTSUPP means the program type is at least known to the kernel. + return nil + } + return err + }, + }, + ebpf.Extension: { + Version: "5.6", + Fn: func() error { + // create btf.Func to add to first ins of target and extension so both progs are btf powered + btfFn := btf.Func{ + Name: "a", + Type: &btf.FuncProto{ + Return: &btf.Int{}, + Params: []btf.FuncParam{ + {Name: "ctx", Type: &btf.Pointer{Target: &btf.Struct{Name: "xdp_md"}}}, + }, + }, + Linkage: btf.GlobalFunc, + } + insns := asm.Instructions{ + btf.WithFuncMetadata(asm.Mov.Imm(asm.R0, 0), &btfFn), + asm.Return(), + } + + // create target prog + prog, err := ebpf.NewProgramWithOptions( + &ebpf.ProgramSpec{ + Type: ebpf.XDP, + Instructions: insns, + }, + ebpf.ProgramOptions{ + LogDisabled: true, + }, + ) + if err != nil { + return err + } + defer prog.Close() + + // probe for Extension prog with target + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Extension, + Instructions: insns, + AttachTarget: prog, + AttachTo: btfFn.Name, + }) + }, + }, + ebpf.LSM: { + Version: "5.7", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.LSM, + AttachType: ebpf.AttachLSMMac, + AttachTo: "file_mprotect", + License: "GPL", + }) + }, + }, + ebpf.SkLookup: { + Version: "5.9", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.SkLookup, + AttachType: ebpf.AttachSkLookup, + }) + }, + }, + ebpf.Syscall: { + Version: "5.14", + Fn: func() error { + return probeProgram(&ebpf.ProgramSpec{ + Type: ebpf.Syscall, + Flags: sys.BPF_F_SLEEPABLE, + }) + }, + }, +} + +func init() { + for key, ft := range haveProgramTypeMatrix { + ft.Name = key.String() + if ft.Fn == nil { + key := key // avoid the dreaded loop variable problem + ft.Fn = func() error { return probeProgram(&ebpf.ProgramSpec{Type: key}) } + } + } +} + +type helperKey struct { + typ ebpf.ProgramType + helper asm.BuiltinFunc +} + +var helperCache = internal.NewFeatureCache(func(key helperKey) *internal.FeatureTest { + return &internal.FeatureTest{ + Name: fmt.Sprintf("%s for program type %s", key.helper, key.typ), + Fn: func() error { + return haveProgramHelper(key.typ, key.helper) + }, + } +}) + +// HaveProgramHelper probes the running kernel for the availability of the specified helper +// function to a specified program type. +// Return values have the following semantics: +// +// err == nil: The feature is available. +// errors.Is(err, ebpf.ErrNotSupported): The feature is not available. +// err != nil: Any errors encountered during probe execution, wrapped. +// +// Note that the latter case may include false negatives, and that program creation may +// succeed despite an error being returned. +// Only `nil` and `ebpf.ErrNotSupported` are conclusive. +// +// Probe results are cached and persist throughout any process capability changes. +func HaveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + if helper > helper.Max() { + return os.ErrInvalid + } + + return helperCache.Result(helperKey{pt, helper}) +} + +func haveProgramHelper(pt ebpf.ProgramType, helper asm.BuiltinFunc) error { + if ok := helperProbeNotImplemented(pt); ok { + return fmt.Errorf("no feature probe for %v/%v", pt, helper) + } + + if err := HaveProgramType(pt); err != nil { + return err + } + + spec := &ebpf.ProgramSpec{ + Type: pt, + Instructions: asm.Instructions{ + helper.Call(), + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + License: "GPL", + } + + switch pt { + case ebpf.CGroupSockAddr: + spec.AttachType = ebpf.AttachCGroupInet4Connect + case ebpf.CGroupSockopt: + spec.AttachType = ebpf.AttachCGroupGetsockopt + case ebpf.SkLookup: + spec.AttachType = ebpf.AttachSkLookup + case ebpf.Syscall: + spec.Flags = sys.BPF_F_SLEEPABLE + } + + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err == nil { + prog.Close() + } + + switch { + // EACCES occurs when attempting to create a program probe with a helper + // while the register args when calling this helper aren't set up properly. + // We interpret this as the helper being available, because the verifier + // returns EINVAL if the helper is not supported by the running kernel. + case errors.Is(err, unix.EACCES): + // TODO: possibly we need to check verifier output here to be sure + err = nil + + // EINVAL occurs when attempting to create a program with an unknown helper. + case errors.Is(err, unix.EINVAL): + // TODO: possibly we need to check verifier output here to be sure + err = ebpf.ErrNotSupported + } + + return err +} + +func helperProbeNotImplemented(pt ebpf.ProgramType) bool { + switch pt { + case ebpf.Extension, ebpf.LSM, ebpf.StructOps, ebpf.Tracing: + return true + } + return false +} diff --git a/vendor/github.com/cilium/ebpf/features/version.go b/vendor/github.com/cilium/ebpf/features/version.go new file mode 100644 index 0000000000..d54d3ea212 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/features/version.go @@ -0,0 +1,18 @@ +package features + +import "github.com/cilium/ebpf/internal/linux" + +// LinuxVersionCode returns the version of the currently running kernel +// as defined in the LINUX_VERSION_CODE compile-time macro. It is represented +// in the format described by the KERNEL_VERSION macro from linux/version.h. +// +// Do not use the version to make assumptions about the presence of certain +// kernel features, always prefer feature probes in this package. Some +// distributions backport or disable eBPF features. +func LinuxVersionCode() (uint32, error) { + v, err := linux.KernelVersion() + if err != nil { + return 0, err + } + return v.Kernel(), nil +} diff --git a/vendor/github.com/cilium/ebpf/info.go b/vendor/github.com/cilium/ebpf/info.go new file mode 100644 index 0000000000..56a1f1e9a3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/info.go @@ -0,0 +1,795 @@ +package ebpf + +import ( + "bufio" + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + "reflect" + "strings" + "syscall" + "time" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// The *Info structs expose metadata about a program or map. Most +// fields are exposed via a getter: +// +// func (*MapInfo) ID() (MapID, bool) +// +// This is because the metadata available changes based on kernel version. +// The second boolean return value indicates whether a particular field is +// available on the current kernel. +// +// Always add new metadata as such a getter, unless you can somehow get the +// value of the field on all supported kernels. Also document which version +// a particular field first appeared in. +// +// Some metadata is a buffer which needs additional parsing. In this case, +// store the undecoded data in the Info struct and provide a getter which +// decodes it when necessary. See ProgramInfo.Instructions for an example. + +// MapInfo describes a map. +type MapInfo struct { + // Type of the map. + Type MapType + // KeySize is the size of the map key in bytes. + KeySize uint32 + // ValueSize is the size of the map value in bytes. + ValueSize uint32 + // MaxEntries is the maximum number of entries the map can hold. Its meaning + // is map-specific. + MaxEntries uint32 + // Flags used during map creation. + Flags uint32 + // Name as supplied by user space at load time. Available from 4.15. + Name string + + id MapID + btf btf.ID + mapExtra uint64 + memlock uint64 + frozen bool +} + +// newMapInfoFromFd queries map information about the given fd. [sys.ObjInfo] is +// attempted first, supplementing any missing values with information from +// /proc/self/fdinfo. Ignores EINVAL from ObjInfo as well as ErrNotSupported +// from reading fdinfo (indicating the file exists, but no fields of interest +// were found). If both fail, an error is always returned. +func newMapInfoFromFd(fd *sys.FD) (*MapInfo, error) { + var info sys.MapInfo + err1 := sys.ObjInfo(fd, &info) + // EINVAL means the kernel doesn't support BPF_OBJ_GET_INFO_BY_FD. Continue + // with fdinfo if that's the case. + if err1 != nil && !errors.Is(err1, unix.EINVAL) { + return nil, fmt.Errorf("getting object info: %w", err1) + } + + mi := &MapInfo{ + MapType(info.Type), + info.KeySize, + info.ValueSize, + info.MaxEntries, + uint32(info.MapFlags), + unix.ByteSliceToString(info.Name[:]), + MapID(info.Id), + btf.ID(info.BtfId), + info.MapExtra, + 0, + false, + } + + // Supplement OBJ_INFO with data from /proc/self/fdinfo. It contains fields + // like memlock and frozen that are not present in OBJ_INFO. + err2 := readMapInfoFromProc(fd, mi) + if err2 != nil && !errors.Is(err2, ErrNotSupported) { + return nil, fmt.Errorf("getting map info from fdinfo: %w", err2) + } + + if err1 != nil && err2 != nil { + return nil, fmt.Errorf("ObjInfo and fdinfo both failed: objinfo: %w, fdinfo: %w", err1, err2) + } + + return mi, nil +} + +// readMapInfoFromProc queries map information about the given fd from +// /proc/self/fdinfo. It only writes data into fields that have a zero value. +func readMapInfoFromProc(fd *sys.FD, mi *MapInfo) error { + return scanFdInfo(fd, map[string]interface{}{ + "map_type": &mi.Type, + "map_id": &mi.id, + "key_size": &mi.KeySize, + "value_size": &mi.ValueSize, + "max_entries": &mi.MaxEntries, + "map_flags": &mi.Flags, + "map_extra": &mi.mapExtra, + "memlock": &mi.memlock, + "frozen": &mi.frozen, + }) +} + +// ID returns the map ID. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (mi *MapInfo) ID() (MapID, bool) { + return mi.id, mi.id > 0 +} + +// BTFID returns the BTF ID associated with the Map. +// +// The ID is only valid as long as the associated Map is kept alive. +// Available from 4.18. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the Map was loaded without BTF information.) +func (mi *MapInfo) BTFID() (btf.ID, bool) { + return mi.btf, mi.btf > 0 +} + +// MapExtra returns an opaque field whose meaning is map-specific. +// +// Available from 5.16. +// +// The bool return value indicates whether this optional field is available and +// populated, if it was specified during Map creation. +func (mi *MapInfo) MapExtra() (uint64, bool) { + return mi.mapExtra, mi.mapExtra > 0 +} + +// Memlock returns an approximate number of bytes allocated to this map. +// +// Available from 4.10. +// +// The bool return value indicates whether this optional field is available. +func (mi *MapInfo) Memlock() (uint64, bool) { + return mi.memlock, mi.memlock > 0 +} + +// Frozen indicates whether [Map.Freeze] was called on this map. If true, +// modifications from user space are not allowed. +// +// Available from 5.2. Requires access to procfs. +// +// If the kernel doesn't support map freezing, this field will always be false. +func (mi *MapInfo) Frozen() bool { + return mi.frozen +} + +// programStats holds statistics of a program. +type programStats struct { + // Total accumulated runtime of the program ins ns. + runtime time.Duration + // Total number of times the program was called. + runCount uint64 + // Total number of times the programm was NOT called. + // Added in commit 9ed9e9ba2337 ("bpf: Count the number of times recursion was prevented"). + recursionMisses uint64 +} + +// programJitedInfo holds information about JITed info of a program. +type programJitedInfo struct { + // ksyms holds the ksym addresses of the BPF program, including those of its + // subprograms. + // + // Available from 4.18. + ksyms []uintptr + numKsyms uint32 + + // insns holds the JITed machine native instructions of the program, + // including those of its subprograms. + // + // Available from 4.13. + insns []byte + numInsns uint32 + + // lineInfos holds the JITed line infos, which are kernel addresses. + // + // Available from 5.0. + lineInfos []uint64 + numLineInfos uint32 + + // lineInfoRecSize is the size of a single line info record. + // + // Available from 5.0. + lineInfoRecSize uint32 + + // funcLens holds the insns length of each function. + // + // Available from 4.18. + funcLens []uint32 + numFuncLens uint32 +} + +// ProgramInfo describes a program. +type ProgramInfo struct { + Type ProgramType + id ProgramID + // Truncated hash of the BPF bytecode. Available from 4.13. + Tag string + // Name as supplied by user space at load time. Available from 4.15. + Name string + + createdByUID uint32 + haveCreatedByUID bool + btf btf.ID + stats *programStats + loadTime time.Duration + + maps []MapID + insns []byte + jitedSize uint32 + verifiedInstructions uint32 + + jitedInfo programJitedInfo + + lineInfos []byte + numLineInfos uint32 + funcInfos []byte + numFuncInfos uint32 +} + +func newProgramInfoFromFd(fd *sys.FD) (*ProgramInfo, error) { + var info sys.ProgInfo + err := sys.ObjInfo(fd, &info) + if errors.Is(err, syscall.EINVAL) { + return newProgramInfoFromProc(fd) + } + if err != nil { + return nil, err + } + + pi := ProgramInfo{ + Type: ProgramType(info.Type), + id: ProgramID(info.Id), + Tag: hex.EncodeToString(info.Tag[:]), + Name: unix.ByteSliceToString(info.Name[:]), + btf: btf.ID(info.BtfId), + stats: &programStats{ + runtime: time.Duration(info.RunTimeNs), + runCount: info.RunCnt, + recursionMisses: info.RecursionMisses, + }, + jitedSize: info.JitedProgLen, + loadTime: time.Duration(info.LoadTime), + verifiedInstructions: info.VerifiedInsns, + } + + // Start with a clean struct for the second call, otherwise we may get EFAULT. + var info2 sys.ProgInfo + + makeSecondCall := false + + if info.NrMapIds > 0 { + pi.maps = make([]MapID, info.NrMapIds) + info2.NrMapIds = info.NrMapIds + info2.MapIds = sys.NewSlicePointer(pi.maps) + makeSecondCall = true + } else if haveProgramInfoMapIDs() == nil { + // This program really has no associated maps. + pi.maps = make([]MapID, 0) + } else { + // The kernel doesn't report associated maps. + pi.maps = nil + } + + // createdByUID and NrMapIds were introduced in the same kernel version. + if pi.maps != nil { + pi.createdByUID = info.CreatedByUid + pi.haveCreatedByUID = true + } + + if info.XlatedProgLen > 0 { + pi.insns = make([]byte, info.XlatedProgLen) + info2.XlatedProgLen = info.XlatedProgLen + info2.XlatedProgInsns = sys.NewSlicePointer(pi.insns) + makeSecondCall = true + } + + if info.NrLineInfo > 0 { + pi.lineInfos = make([]byte, btf.LineInfoSize*info.NrLineInfo) + info2.LineInfo = sys.NewSlicePointer(pi.lineInfos) + info2.LineInfoRecSize = btf.LineInfoSize + info2.NrLineInfo = info.NrLineInfo + pi.numLineInfos = info.NrLineInfo + makeSecondCall = true + } + + if info.NrFuncInfo > 0 { + pi.funcInfos = make([]byte, btf.FuncInfoSize*info.NrFuncInfo) + info2.FuncInfo = sys.NewSlicePointer(pi.funcInfos) + info2.FuncInfoRecSize = btf.FuncInfoSize + info2.NrFuncInfo = info.NrFuncInfo + pi.numFuncInfos = info.NrFuncInfo + makeSecondCall = true + } + + pi.jitedInfo.lineInfoRecSize = info.JitedLineInfoRecSize + if info.JitedProgLen > 0 { + pi.jitedInfo.numInsns = info.JitedProgLen + pi.jitedInfo.insns = make([]byte, info.JitedProgLen) + info2.JitedProgLen = info.JitedProgLen + info2.JitedProgInsns = sys.NewSlicePointer(pi.jitedInfo.insns) + makeSecondCall = true + } + + if info.NrJitedFuncLens > 0 { + pi.jitedInfo.numFuncLens = info.NrJitedFuncLens + pi.jitedInfo.funcLens = make([]uint32, info.NrJitedFuncLens) + info2.NrJitedFuncLens = info.NrJitedFuncLens + info2.JitedFuncLens = sys.NewSlicePointer(pi.jitedInfo.funcLens) + makeSecondCall = true + } + + if info.NrJitedLineInfo > 0 { + pi.jitedInfo.numLineInfos = info.NrJitedLineInfo + pi.jitedInfo.lineInfos = make([]uint64, info.NrJitedLineInfo) + info2.NrJitedLineInfo = info.NrJitedLineInfo + info2.JitedLineInfo = sys.NewSlicePointer(pi.jitedInfo.lineInfos) + info2.JitedLineInfoRecSize = info.JitedLineInfoRecSize + makeSecondCall = true + } + + if info.NrJitedKsyms > 0 { + pi.jitedInfo.numKsyms = info.NrJitedKsyms + pi.jitedInfo.ksyms = make([]uintptr, info.NrJitedKsyms) + info2.JitedKsyms = sys.NewSlicePointer(pi.jitedInfo.ksyms) + info2.NrJitedKsyms = info.NrJitedKsyms + makeSecondCall = true + } + + if makeSecondCall { + if err := sys.ObjInfo(fd, &info2); err != nil { + return nil, err + } + } + + return &pi, nil +} + +func newProgramInfoFromProc(fd *sys.FD) (*ProgramInfo, error) { + var info ProgramInfo + err := scanFdInfo(fd, map[string]interface{}{ + "prog_type": &info.Type, + "prog_tag": &info.Tag, + }) + if errors.Is(err, ErrNotSupported) { + return nil, &internal.UnsupportedFeatureError{ + Name: "reading program info from /proc/self/fdinfo", + MinimumVersion: internal.Version{4, 10, 0}, + } + } + if err != nil { + return nil, err + } + + return &info, nil +} + +// ID returns the program ID. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) ID() (ProgramID, bool) { + return pi.id, pi.id > 0 +} + +// CreatedByUID returns the Uid that created the program. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) CreatedByUID() (uint32, bool) { + return pi.createdByUID, pi.haveCreatedByUID +} + +// BTFID returns the BTF ID associated with the program. +// +// The ID is only valid as long as the associated program is kept alive. +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available and +// populated. (The field may be available but not populated if the kernel +// supports the field but the program was loaded without BTF information.) +func (pi *ProgramInfo) BTFID() (btf.ID, bool) { + return pi.btf, pi.btf > 0 +} + +// RunCount returns the total number of times the program was called. +// +// Can return 0 if the collection of statistics is not enabled. See EnableStats(). +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) RunCount() (uint64, bool) { + if pi.stats != nil { + return pi.stats.runCount, true + } + return 0, false +} + +// Runtime returns the total accumulated runtime of the program. +// +// Can return 0 if the collection of statistics is not enabled. See EnableStats(). +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) Runtime() (time.Duration, bool) { + if pi.stats != nil { + return pi.stats.runtime, true + } + return time.Duration(0), false +} + +// RecursionMisses returns the total number of times the program was NOT called. +// This can happen when another bpf program is already running on the cpu, which +// is likely to happen for example when you interrupt bpf program execution. +func (pi *ProgramInfo) RecursionMisses() (uint64, bool) { + if pi.stats != nil { + return pi.stats.recursionMisses, true + } + return 0, false +} + +// btfSpec returns the BTF spec associated with the program. +func (pi *ProgramInfo) btfSpec() (*btf.Spec, error) { + id, ok := pi.BTFID() + if !ok { + return nil, fmt.Errorf("program created without BTF or unsupported kernel: %w", ErrNotSupported) + } + + h, err := btf.NewHandleFromID(id) + if err != nil { + return nil, fmt.Errorf("get BTF handle: %w", err) + } + defer h.Close() + + spec, err := h.Spec(nil) + if err != nil { + return nil, fmt.Errorf("get BTF spec: %w", err) + } + + return spec, nil +} + +// LineInfos returns the BTF line information of the program. +// +// Available from 5.0. +// +// Requires CAP_SYS_ADMIN or equivalent for reading BTF information. Returns +// ErrNotSupported if the program was created without BTF or if the kernel +// doesn't support the field. +func (pi *ProgramInfo) LineInfos() (btf.LineOffsets, error) { + if len(pi.lineInfos) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + spec, err := pi.btfSpec() + if err != nil { + return nil, err + } + + return btf.LoadLineInfos( + bytes.NewReader(pi.lineInfos), + internal.NativeEndian, + pi.numLineInfos, + spec, + ) +} + +// Instructions returns the 'xlated' instruction stream of the program +// after it has been verified and rewritten by the kernel. These instructions +// cannot be loaded back into the kernel as-is, this is mainly used for +// inspecting loaded programs for troubleshooting, dumping, etc. +// +// For example, map accesses are made to reference their kernel map IDs, +// not the FDs they had when the program was inserted. Note that before +// the introduction of bpf_insn_prepare_dump in kernel 4.16, xlated +// instructions were not sanitized, making the output even less reusable +// and less likely to round-trip or evaluate to the same program Tag. +// +// The first instruction is marked as a symbol using the Program's name. +// +// If available, the instructions will be annotated with metadata from the +// BTF. This includes line information and function information. Reading +// this metadata requires CAP_SYS_ADMIN or equivalent. If capability is +// unavailable, the instructions will be returned without metadata. +// +// Available from 4.13. Requires CAP_BPF or equivalent for plain instructions. +// Requires CAP_SYS_ADMIN for instructions with metadata. +func (pi *ProgramInfo) Instructions() (asm.Instructions, error) { + // If the calling process is not BPF-capable or if the kernel doesn't + // support getting xlated instructions, the field will be zero. + if len(pi.insns) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + r := bytes.NewReader(pi.insns) + var insns asm.Instructions + if err := insns.Unmarshal(r, internal.NativeEndian); err != nil { + return nil, fmt.Errorf("unmarshaling instructions: %w", err) + } + + if pi.btf != 0 { + btfh, err := btf.NewHandleFromID(pi.btf) + if err != nil { + // Getting a BTF handle requires CAP_SYS_ADMIN, if not available we get an -EPERM. + // Ignore it and fall back to instructions without metadata. + if !errors.Is(err, unix.EPERM) { + return nil, fmt.Errorf("unable to get BTF handle: %w", err) + } + } + + // If we have a BTF handle, we can use it to assign metadata to the instructions. + if btfh != nil { + defer btfh.Close() + + spec, err := btfh.Spec(nil) + if err != nil { + return nil, fmt.Errorf("unable to get BTF spec: %w", err) + } + + lineInfos, err := btf.LoadLineInfos( + bytes.NewReader(pi.lineInfos), + internal.NativeEndian, + pi.numLineInfos, + spec, + ) + if err != nil { + return nil, fmt.Errorf("parse line info: %w", err) + } + + funcInfos, err := btf.LoadFuncInfos( + bytes.NewReader(pi.funcInfos), + internal.NativeEndian, + pi.numFuncInfos, + spec, + ) + if err != nil { + return nil, fmt.Errorf("parse func info: %w", err) + } + + btf.AssignMetadataToInstructions(insns, funcInfos, lineInfos, btf.CORERelocationInfos{}) + } + } + + fn := btf.FuncMetadata(&insns[0]) + name := pi.Name + if fn != nil { + name = fn.Name + } + insns[0] = insns[0].WithSymbol(name) + + return insns, nil +} + +// JitedSize returns the size of the program's JIT-compiled machine code in bytes, which is the +// actual code executed on the host's CPU. This field requires the BPF JIT compiler to be enabled. +// +// Available from 4.13. Reading this metadata requires CAP_BPF or equivalent. +func (pi *ProgramInfo) JitedSize() (uint32, error) { + if pi.jitedSize == 0 { + return 0, fmt.Errorf("insufficient permissions, unsupported kernel, or JIT compiler disabled: %w", ErrNotSupported) + } + return pi.jitedSize, nil +} + +// TranslatedSize returns the size of the program's translated instructions in bytes, after it has +// been verified and rewritten by the kernel. +// +// Available from 4.13. Reading this metadata requires CAP_BPF or equivalent. +func (pi *ProgramInfo) TranslatedSize() (int, error) { + insns := len(pi.insns) + if insns == 0 { + return 0, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + return insns, nil +} + +// MapIDs returns the maps related to the program. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) MapIDs() ([]MapID, bool) { + return pi.maps, pi.maps != nil +} + +// LoadTime returns when the program was loaded since boot time. +// +// Available from 4.15. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) LoadTime() (time.Duration, bool) { + // loadTime and NrMapIds were introduced in the same kernel version. + return pi.loadTime, pi.loadTime > 0 +} + +// VerifiedInstructions returns the number verified instructions in the program. +// +// Available from 5.16. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) VerifiedInstructions() (uint32, bool) { + return pi.verifiedInstructions, pi.verifiedInstructions > 0 +} + +// JitedKsymAddrs returns the ksym addresses of the BPF program, including its +// subprograms. The addresses correspond to their symbols in /proc/kallsyms. +// +// Available from 4.18. Note that before 5.x, this field can be empty for +// programs without subprograms (bpf2bpf calls). +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedKsymAddrs() ([]uintptr, bool) { + return pi.jitedInfo.ksyms, len(pi.jitedInfo.ksyms) > 0 +} + +// JitedInsns returns the JITed machine native instructions of the program. +// +// Available from 4.13. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedInsns() ([]byte, bool) { + return pi.jitedInfo.insns, len(pi.jitedInfo.insns) > 0 +} + +// JitedLineInfos returns the JITed line infos of the program. +// +// Available from 5.0. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedLineInfos() ([]uint64, bool) { + return pi.jitedInfo.lineInfos, len(pi.jitedInfo.lineInfos) > 0 +} + +// JitedFuncLens returns the insns length of each function in the JITed program. +// +// Available from 4.18. +// +// The bool return value indicates whether this optional field is available. +func (pi *ProgramInfo) JitedFuncLens() ([]uint32, bool) { + return pi.jitedInfo.funcLens, len(pi.jitedInfo.funcLens) > 0 +} + +// FuncInfos returns the offset and function information of all (sub)programs in +// a BPF program. +// +// Available from 5.0. +// +// Requires CAP_SYS_ADMIN or equivalent for reading BTF information. Returns +// ErrNotSupported if the program was created without BTF or if the kernel +// doesn't support the field. +func (pi *ProgramInfo) FuncInfos() (btf.FuncOffsets, error) { + if len(pi.funcInfos) == 0 { + return nil, fmt.Errorf("insufficient permissions or unsupported kernel: %w", ErrNotSupported) + } + + spec, err := pi.btfSpec() + if err != nil { + return nil, err + } + + return btf.LoadFuncInfos( + bytes.NewReader(pi.funcInfos), + internal.NativeEndian, + pi.numFuncInfos, + spec, + ) +} + +func scanFdInfo(fd *sys.FD, fields map[string]interface{}) error { + fh, err := os.Open(fmt.Sprintf("/proc/self/fdinfo/%d", fd.Int())) + if err != nil { + return err + } + defer fh.Close() + + if err := scanFdInfoReader(fh, fields); err != nil { + return fmt.Errorf("%s: %w", fh.Name(), err) + } + return nil +} + +func scanFdInfoReader(r io.Reader, fields map[string]interface{}) error { + var ( + scanner = bufio.NewScanner(r) + scanned int + ) + + for scanner.Scan() { + parts := strings.SplitN(scanner.Text(), "\t", 2) + if len(parts) != 2 { + continue + } + + name := strings.TrimSuffix(parts[0], ":") + field, ok := fields[string(name)] + if !ok { + continue + } + + // If field already contains a non-zero value, don't overwrite it with fdinfo. + if zero(field) { + if n, err := fmt.Sscanln(parts[1], field); err != nil || n != 1 { + return fmt.Errorf("can't parse field %s: %v", name, err) + } + } + + scanned++ + } + + if err := scanner.Err(); err != nil { + return fmt.Errorf("scanning fdinfo: %w", err) + } + + if len(fields) > 0 && scanned == 0 { + return ErrNotSupported + } + + return nil +} + +func zero(arg any) bool { + v := reflect.ValueOf(arg) + + // Unwrap pointers and interfaces. + for v.Kind() == reflect.Pointer || + v.Kind() == reflect.Interface { + v = v.Elem() + } + + return v.IsZero() +} + +// EnableStats starts the measuring of the runtime +// and run counts of eBPF programs. +// +// Collecting statistics can have an impact on the performance. +// +// Requires at least 5.8. +func EnableStats(which uint32) (io.Closer, error) { + fd, err := sys.EnableStats(&sys.EnableStatsAttr{ + Type: which, + }) + if err != nil { + return nil, err + } + return fd, nil +} + +var haveProgramInfoMapIDs = internal.NewFeatureTest("map IDs in program info", func() error { + prog, err := progLoad(asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, SocketFilter, "MIT") + if err != nil { + return err + } + defer prog.Close() + + err = sys.ObjInfo(prog, &sys.ProgInfo{ + // NB: Don't need to allocate MapIds since the program isn't using + // any maps. + NrMapIds: 1, + }) + if errors.Is(err, unix.EINVAL) { + // Most likely the syscall doesn't exist. + return internal.ErrNotSupported + } + if errors.Is(err, unix.E2BIG) { + // We've hit check_uarg_tail_zero on older kernels. + return internal.ErrNotSupported + } + + return err +}, "4.15") diff --git a/vendor/github.com/cilium/ebpf/internal/buffer.go b/vendor/github.com/cilium/ebpf/internal/buffer.go new file mode 100644 index 0000000000..81c6544330 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/buffer.go @@ -0,0 +1,31 @@ +package internal + +import ( + "bytes" + "sync" +) + +var bytesBufferPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, +} + +// NewBuffer retrieves a [bytes.Buffer] from a pool an re-initialises it. +// +// The returned buffer should be passed to [PutBuffer]. +func NewBuffer(buf []byte) *bytes.Buffer { + wr := bytesBufferPool.Get().(*bytes.Buffer) + // Reinitialize the Buffer with a new backing slice since it is returned to + // the caller by wr.Bytes() below. Pooling is faster despite calling + // NewBuffer. The pooled alloc is still reused, it only needs to be zeroed. + *wr = *bytes.NewBuffer(buf) + return wr +} + +// PutBuffer releases a buffer to the pool. +func PutBuffer(buf *bytes.Buffer) { + // Release reference to the backing buffer. + *buf = *bytes.NewBuffer(nil) + bytesBufferPool.Put(buf) +} diff --git a/vendor/github.com/cilium/ebpf/internal/deque.go b/vendor/github.com/cilium/ebpf/internal/deque.go new file mode 100644 index 0000000000..e3a3050215 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/deque.go @@ -0,0 +1,91 @@ +package internal + +import "math/bits" + +// Deque implements a double ended queue. +type Deque[T any] struct { + elems []T + read, write uint64 + mask uint64 +} + +// Reset clears the contents of the deque while retaining the backing buffer. +func (dq *Deque[T]) Reset() { + var zero T + + for i := dq.read; i < dq.write; i++ { + dq.elems[i&dq.mask] = zero + } + + dq.read, dq.write = 0, 0 +} + +func (dq *Deque[T]) Empty() bool { + return dq.read == dq.write +} + +// Push adds an element to the end. +func (dq *Deque[T]) Push(e T) { + dq.Grow(1) + dq.elems[dq.write&dq.mask] = e + dq.write++ +} + +// Shift returns the first element or the zero value. +func (dq *Deque[T]) Shift() T { + var zero T + + if dq.Empty() { + return zero + } + + index := dq.read & dq.mask + t := dq.elems[index] + dq.elems[index] = zero + dq.read++ + return t +} + +// Pop returns the last element or the zero value. +func (dq *Deque[T]) Pop() T { + var zero T + + if dq.Empty() { + return zero + } + + dq.write-- + index := dq.write & dq.mask + t := dq.elems[index] + dq.elems[index] = zero + return t +} + +// Grow the deque's capacity, if necessary, to guarantee space for another n +// elements. +func (dq *Deque[T]) Grow(n int) { + have := dq.write - dq.read + need := have + uint64(n) + if need < have { + panic("overflow") + } + if uint64(len(dq.elems)) >= need { + return + } + + // Round up to the new power of two which is at least 8. + // See https://jameshfisher.com/2018/03/30/round-up-power-2/ + capacity := 1 << (64 - bits.LeadingZeros64(need-1)) + if capacity < 8 { + capacity = 8 + } + + elems := make([]T, have, capacity) + pivot := dq.read & dq.mask + copied := copy(elems, dq.elems[pivot:]) + copy(elems[copied:], dq.elems[:pivot]) + + dq.elems = elems[:capacity] + dq.mask = uint64(capacity) - 1 + dq.read, dq.write = 0, have +} diff --git a/vendor/github.com/cilium/ebpf/internal/elf.go b/vendor/github.com/cilium/ebpf/internal/elf.go new file mode 100644 index 0000000000..011581938d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/elf.go @@ -0,0 +1,102 @@ +package internal + +import ( + "debug/elf" + "fmt" + "io" +) + +type SafeELFFile struct { + *elf.File +} + +// NewSafeELFFile reads an ELF safely. +// +// Any panic during parsing is turned into an error. This is necessary since +// there are a bunch of unfixed bugs in debug/elf. +// +// https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+debug%2Felf+in%3Atitle +func NewSafeELFFile(r io.ReaderAt) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.NewFile(r) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + +// OpenSafeELFFile reads an ELF from a file. +// +// It works like NewSafeELFFile, with the exception that safe.Close will +// close the underlying file. +func OpenSafeELFFile(path string) (safe *SafeELFFile, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + safe = nil + err = fmt.Errorf("reading ELF file panicked: %s", r) + }() + + file, err := elf.Open(path) + if err != nil { + return nil, err + } + + return &SafeELFFile{file}, nil +} + +// Symbols is the safe version of elf.File.Symbols. +func (se *SafeELFFile) Symbols() (syms []elf.Symbol, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + syms = nil + err = fmt.Errorf("reading ELF symbols panicked: %s", r) + }() + + syms, err = se.File.Symbols() + return +} + +// DynamicSymbols is the safe version of elf.File.DynamicSymbols. +func (se *SafeELFFile) DynamicSymbols() (syms []elf.Symbol, err error) { + defer func() { + r := recover() + if r == nil { + return + } + + syms = nil + err = fmt.Errorf("reading ELF dynamic symbols panicked: %s", r) + }() + + syms, err = se.File.DynamicSymbols() + return +} + +// SectionsByType returns all sections in the file with the specified section type. +func (se *SafeELFFile) SectionsByType(typ elf.SectionType) []*elf.Section { + sections := make([]*elf.Section, 0, 1) + for _, section := range se.Sections { + if section.Type == typ { + sections = append(sections, section) + } + } + return sections +} diff --git a/vendor/github.com/cilium/ebpf/internal/endian_be.go b/vendor/github.com/cilium/ebpf/internal/endian_be.go new file mode 100644 index 0000000000..a37777f21f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/endian_be.go @@ -0,0 +1,9 @@ +//go:build armbe || arm64be || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian = binary.BigEndian diff --git a/vendor/github.com/cilium/ebpf/internal/endian_le.go b/vendor/github.com/cilium/ebpf/internal/endian_le.go new file mode 100644 index 0000000000..6dcd916d5d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/endian_le.go @@ -0,0 +1,9 @@ +//go:build 386 || amd64 || amd64p32 || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || ppc64le || riscv64 + +package internal + +import "encoding/binary" + +// NativeEndian is set to either binary.BigEndian or binary.LittleEndian, +// depending on the host's endianness. +var NativeEndian = binary.LittleEndian diff --git a/vendor/github.com/cilium/ebpf/internal/errors.go b/vendor/github.com/cilium/ebpf/internal/errors.go new file mode 100644 index 0000000000..19d5294ca0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/errors.go @@ -0,0 +1,179 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "strings" +) + +// ErrorWithLog wraps err in a VerifierError that includes the parsed verifier +// log buffer. +// +// The default error output is a summary of the full log. The latter can be +// accessed via VerifierError.Log or by formatting the error, see Format. +func ErrorWithLog(source string, err error, log []byte) *VerifierError { + const whitespace = "\t\r\v\n " + + // Convert verifier log C string by truncating it on the first 0 byte + // and trimming trailing whitespace before interpreting as a Go string. + if i := bytes.IndexByte(log, 0); i != -1 { + log = log[:i] + } + + log = bytes.Trim(log, whitespace) + if len(log) == 0 { + return &VerifierError{source, err, nil} + } + + logLines := bytes.Split(log, []byte{'\n'}) + lines := make([]string, 0, len(logLines)) + for _, line := range logLines { + // Don't remove leading white space on individual lines. We rely on it + // when outputting logs. + lines = append(lines, string(bytes.TrimRight(line, whitespace))) + } + + return &VerifierError{source, err, lines} +} + +// VerifierError includes information from the eBPF verifier. +// +// It summarises the log output, see Format if you want to output the full contents. +type VerifierError struct { + source string + // The error which caused this error. + Cause error + // The verifier output split into lines. + Log []string +} + +func (le *VerifierError) Unwrap() error { + return le.Cause +} + +func (le *VerifierError) Error() string { + log := le.Log + if n := len(log); n > 0 && strings.HasPrefix(log[n-1], "processed ") { + // Get rid of "processed 39 insns (limit 1000000) ..." from summary. + log = log[:n-1] + } + + var b strings.Builder + fmt.Fprintf(&b, "%s: %s", le.source, le.Cause.Error()) + + n := len(log) + if n == 0 { + return b.String() + } + + lines := log[n-1:] + if n >= 2 && includePreviousLine(log[n-1]) { + // Add one more line of context if it aids understanding the error. + lines = log[n-2:] + } + + for _, line := range lines { + b.WriteString(": ") + b.WriteString(strings.TrimSpace(line)) + } + + omitted := len(le.Log) - len(lines) + if omitted > 0 { + fmt.Fprintf(&b, " (%d line(s) omitted)", omitted) + } + + return b.String() +} + +// includePreviousLine returns true if the given line likely is better +// understood with additional context from the preceding line. +func includePreviousLine(line string) bool { + // We need to find a good trade off between understandable error messages + // and too much complexity here. Checking the string prefix is ok, requiring + // regular expressions to do it is probably overkill. + + if strings.HasPrefix(line, "\t") { + // [13] STRUCT drm_rect size=16 vlen=4 + // \tx1 type_id=2 + return true + } + + if len(line) >= 2 && line[0] == 'R' && line[1] >= '0' && line[1] <= '9' { + // 0: (95) exit + // R0 !read_ok + return true + } + + if strings.HasPrefix(line, "invalid bpf_context access") { + // 0: (79) r6 = *(u64 *)(r1 +0) + // func '__x64_sys_recvfrom' arg0 type FWD is not a struct + // invalid bpf_context access off=0 size=8 + return true + } + + return false +} + +// Format the error. +// +// Understood verbs are %s and %v, which are equivalent to calling Error(). %v +// allows outputting additional information using the following flags: +// +// %+v: Output the first lines, or all lines if no width is given. +// %-v: Output the last lines, or all lines if no width is given. +// +// Use width to specify how many lines to output. Use the '-' flag to output +// lines from the end of the log instead of the beginning. +func (le *VerifierError) Format(f fmt.State, verb rune) { + switch verb { + case 's': + _, _ = io.WriteString(f, le.Error()) + + case 'v': + n, haveWidth := f.Width() + if !haveWidth || n > len(le.Log) { + n = len(le.Log) + } + + if !f.Flag('+') && !f.Flag('-') { + if haveWidth { + _, _ = io.WriteString(f, "%!v(BADWIDTH)") + return + } + + _, _ = io.WriteString(f, le.Error()) + return + } + + if f.Flag('+') && f.Flag('-') { + _, _ = io.WriteString(f, "%!v(BADFLAG)") + return + } + + fmt.Fprintf(f, "%s: %s:", le.source, le.Cause.Error()) + + omitted := len(le.Log) - n + lines := le.Log[:n] + if f.Flag('-') { + // Print last instead of first lines. + lines = le.Log[len(le.Log)-n:] + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + for _, line := range lines { + fmt.Fprintf(f, "\n\t%s", line) + } + + if !f.Flag('-') { + if omitted > 0 { + fmt.Fprintf(f, "\n\t(%d line(s) omitted)", omitted) + } + } + + default: + fmt.Fprintf(f, "%%!%c(BADVERB)", verb) + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/feature.go b/vendor/github.com/cilium/ebpf/internal/feature.go new file mode 100644 index 0000000000..6399be0851 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/feature.go @@ -0,0 +1,227 @@ +package internal + +import ( + "errors" + "fmt" + "runtime" + "strings" + "sync" +) + +// ErrNotSupported indicates that a feature is not supported. +var ErrNotSupported = errors.New("not supported") + +// ErrNotSupportedOnOS indicates that a feature is not supported on the current +// operating system. +var ErrNotSupportedOnOS = fmt.Errorf("%w on %s", ErrNotSupported, runtime.GOOS) + +// UnsupportedFeatureError is returned by FeatureTest() functions. +type UnsupportedFeatureError struct { + // The minimum version required for this feature. + // + // On Linux this refers to the mainline kernel version, on other platforms + // to the version of the runtime. + // + // Used for the error string, and for sanity checking during testing. + MinimumVersion Version + + // The name of the feature that isn't supported. + Name string +} + +func (ufe *UnsupportedFeatureError) Error() string { + if ufe.MinimumVersion.Unspecified() { + return fmt.Sprintf("%s not supported", ufe.Name) + } + return fmt.Sprintf("%s not supported (requires >= %s)", ufe.Name, ufe.MinimumVersion) +} + +// Is indicates that UnsupportedFeatureError is ErrNotSupported. +func (ufe *UnsupportedFeatureError) Is(target error) bool { + return target == ErrNotSupported +} + +// FeatureTest caches the result of a [FeatureTestFn]. +// +// Fields should not be modified after creation. +type FeatureTest struct { + // The name of the feature being detected. + Name string + // Version in the form Major.Minor[.Patch]. + Version string + // The feature test itself. + Fn FeatureTestFn + + mu sync.RWMutex + done bool + result error +} + +// FeatureTestFn is used to determine whether the kernel supports +// a certain feature. +// +// The return values have the following semantics: +// +// err == ErrNotSupported: the feature is not available +// err == nil: the feature is available +// err != nil: the test couldn't be executed +type FeatureTestFn func() error + +// NewFeatureTest is a convenient way to create a single [FeatureTest]. +// +// versions specifies in which version of a BPF runtime a feature appeared. +// The format is "GOOS:Major.Minor[.Patch]". GOOS may be omitted when targeting +// Linux. Returns [ErrNotSupportedOnOS] if there is no version specified for the +// current OS. +func NewFeatureTest(name string, fn FeatureTestFn, versions ...string) func() error { + const nativePrefix = runtime.GOOS + ":" + + if len(versions) == 0 { + return func() error { + return fmt.Errorf("feature test %q: no versions specified", name) + } + } + + ft := &FeatureTest{ + Name: name, + Fn: fn, + } + + for _, version := range versions { + if strings.HasPrefix(version, nativePrefix) { + ft.Version = strings.TrimPrefix(version, nativePrefix) + break + } + + if runtime.GOOS == "linux" && !strings.ContainsRune(version, ':') { + // Allow version numbers without a GOOS prefix on Linux. + ft.Version = version + break + } + } + + if ft.Version == "" { + return func() error { + // We don't return an UnsupportedFeatureError here, since that will + // trigger version checks which don't make sense. + return fmt.Errorf("%s: %w", name, ErrNotSupportedOnOS) + } + } + + return ft.execute +} + +// execute the feature test. +// +// The result is cached if the test is conclusive. +// +// See [FeatureTestFn] for the meaning of the returned error. +func (ft *FeatureTest) execute() error { + ft.mu.RLock() + result, done := ft.result, ft.done + ft.mu.RUnlock() + + if done { + return result + } + + ft.mu.Lock() + defer ft.mu.Unlock() + + // The test may have been executed by another caller while we were + // waiting to acquire ft.mu. + if ft.done { + return ft.result + } + + err := ft.Fn() + if err == nil { + ft.done = true + return nil + } + + if errors.Is(err, ErrNotSupported) { + var v Version + if ft.Version != "" { + v, err = NewVersion(ft.Version) + if err != nil { + return fmt.Errorf("feature %s: %w", ft.Name, err) + } + } + + ft.done = true + ft.result = &UnsupportedFeatureError{ + MinimumVersion: v, + Name: ft.Name, + } + + return ft.result + } + + // We couldn't execute the feature test to a point + // where it could make a determination. + // Don't cache the result, just return it. + return fmt.Errorf("detect support for %s: %w", ft.Name, err) +} + +// FeatureMatrix groups multiple related feature tests into a map. +// +// Useful when there is a small number of discrete features which are known +// at compile time. +// +// It must not be modified concurrently with calling [FeatureMatrix.Result]. +type FeatureMatrix[K comparable] map[K]*FeatureTest + +// Result returns the outcome of the feature test for the given key. +// +// It's safe to call this function concurrently. +func (fm FeatureMatrix[K]) Result(key K) error { + ft, ok := fm[key] + if !ok { + return fmt.Errorf("no feature probe for %v", key) + } + + return ft.execute() +} + +// FeatureCache caches a potentially unlimited number of feature probes. +// +// Useful when there is a high cardinality for a feature test. +type FeatureCache[K comparable] struct { + mu sync.RWMutex + newTest func(K) *FeatureTest + features map[K]*FeatureTest +} + +func NewFeatureCache[K comparable](newTest func(K) *FeatureTest) *FeatureCache[K] { + return &FeatureCache[K]{ + newTest: newTest, + features: make(map[K]*FeatureTest), + } +} + +func (fc *FeatureCache[K]) Result(key K) error { + // NB: Executing the feature test happens without fc.mu taken. + return fc.retrieve(key).execute() +} + +func (fc *FeatureCache[K]) retrieve(key K) *FeatureTest { + fc.mu.RLock() + ft := fc.features[key] + fc.mu.RUnlock() + + if ft != nil { + return ft + } + + fc.mu.Lock() + defer fc.mu.Unlock() + + if ft := fc.features[key]; ft != nil { + return ft + } + + ft = fc.newTest(key) + fc.features[key] = ft + return ft +} diff --git a/vendor/github.com/cilium/ebpf/internal/io.go b/vendor/github.com/cilium/ebpf/internal/io.go new file mode 100644 index 0000000000..1eaf4775ad --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/io.go @@ -0,0 +1,128 @@ +package internal + +import ( + "bufio" + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +// NewBufferedSectionReader wraps an io.ReaderAt in an appropriately-sized +// buffered reader. It is a convenience function for reading subsections of +// ELF sections while minimizing the amount of read() syscalls made. +// +// Syscall overhead is non-negligible in continuous integration context +// where ELFs might be accessed over virtual filesystems with poor random +// access performance. Buffering reads makes sense because (sub)sections +// end up being read completely anyway. +// +// Use instead of the r.Seek() + io.LimitReader() pattern. +func NewBufferedSectionReader(ra io.ReaderAt, off, n int64) *bufio.Reader { + // Clamp the size of the buffer to one page to avoid slurping large parts + // of a file into memory. bufio.NewReader uses a hardcoded default buffer + // of 4096. Allow arches with larger pages to allocate more, but don't + // allocate a fixed 4k buffer if we only need to read a small segment. + buf := n + if ps := int64(os.Getpagesize()); n > ps { + buf = ps + } + + return bufio.NewReaderSize(io.NewSectionReader(ra, off, n), int(buf)) +} + +// DiscardZeroes makes sure that all written bytes are zero +// before discarding them. +type DiscardZeroes struct{} + +func (DiscardZeroes) Write(p []byte) (int, error) { + for _, b := range p { + if b != 0 { + return 0, errors.New("encountered non-zero byte") + } + } + return len(p), nil +} + +// ReadAllCompressed decompresses a gzipped file into memory. +func ReadAllCompressed(file string) ([]byte, error) { + fh, err := os.Open(file) + if err != nil { + return nil, err + } + defer fh.Close() + + gz, err := gzip.NewReader(fh) + if err != nil { + return nil, err + } + defer gz.Close() + + return io.ReadAll(gz) +} + +// ReadUint64FromFile reads a uint64 from a file. +// +// format specifies the contents of the file in fmt.Scanf syntax. +func ReadUint64FromFile(format string, path ...string) (uint64, error) { + filename := filepath.Join(path...) + data, err := os.ReadFile(filename) + if err != nil { + return 0, fmt.Errorf("reading file %q: %w", filename, err) + } + + var value uint64 + n, err := fmt.Fscanf(bytes.NewReader(data), format, &value) + if err != nil { + return 0, fmt.Errorf("parsing file %q: %w", filename, err) + } + if n != 1 { + return 0, fmt.Errorf("parsing file %q: expected 1 item, got %d", filename, n) + } + + return value, nil +} + +type uint64FromFileKey struct { + format, path string +} + +var uint64FromFileCache = struct { + sync.RWMutex + values map[uint64FromFileKey]uint64 +}{ + values: map[uint64FromFileKey]uint64{}, +} + +// ReadUint64FromFileOnce is like readUint64FromFile but memoizes the result. +func ReadUint64FromFileOnce(format string, path ...string) (uint64, error) { + filename := filepath.Join(path...) + key := uint64FromFileKey{format, filename} + + uint64FromFileCache.RLock() + if value, ok := uint64FromFileCache.values[key]; ok { + uint64FromFileCache.RUnlock() + return value, nil + } + uint64FromFileCache.RUnlock() + + value, err := ReadUint64FromFile(format, filename) + if err != nil { + return 0, err + } + + uint64FromFileCache.Lock() + defer uint64FromFileCache.Unlock() + + if value, ok := uint64FromFileCache.values[key]; ok { + // Someone else got here before us, use what is cached. + return value, nil + } + + uint64FromFileCache.values[key] = value + return value, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go b/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go new file mode 100644 index 0000000000..b7f3e0b781 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kallsyms/cache.go @@ -0,0 +1,20 @@ +package kallsyms + +import "sync" + +type cache[K, V comparable] struct { + m sync.Map +} + +func (c *cache[K, V]) Load(key K) (value V, _ bool) { + v, ok := c.m.Load(key) + if !ok { + return value, false + } + value = v.(V) + return value, true +} + +func (c *cache[K, V]) Store(key K, value V) { + c.m.Store(key, value) +} diff --git a/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go b/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go new file mode 100644 index 0000000000..f93d785849 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kallsyms/kallsyms.go @@ -0,0 +1,277 @@ +package kallsyms + +import ( + "errors" + "fmt" + "io" + "os" + "slices" + "strconv" + "strings" +) + +var errAmbiguousKsym = errors.New("multiple kernel symbols with the same name") + +var symAddrs cache[string, uint64] +var symModules cache[string, string] + +// Module returns the kernel module providing the given symbol in the kernel, if +// any. Returns an empty string and no error if the symbol is not present in the +// kernel. Only function symbols are considered. Returns an error if multiple +// symbols with the same name were found. +// +// Consider [AssignModules] if you need to resolve multiple symbols, as it will +// only perform one iteration over /proc/kallsyms. +func Module(name string) (string, error) { + if name == "" { + return "", nil + } + + if mod, ok := symModules.Load(name); ok { + return mod, nil + } + + request := map[string]string{name: ""} + if err := AssignModules(request); err != nil { + return "", err + } + + return request[name], nil +} + +// AssignModules looks up the kernel module providing each given symbol, if any, +// and assigns them to their corresponding values in the symbols map. Only +// function symbols are considered. Results of all lookups are cached, +// successful or otherwise. +// +// Any symbols missing in the kernel are ignored. Returns an error if multiple +// symbols with a given name were found. +func AssignModules(symbols map[string]string) error { + if len(symbols) == 0 { + return nil + } + + // Attempt to fetch symbols from cache. + request := make(map[string]string) + for name := range symbols { + if mod, ok := symModules.Load(name); ok { + symbols[name] = mod + continue + } + + // Mark the symbol to be read from /proc/kallsyms. + request[name] = "" + } + if len(request) == 0 { + // All symbols satisfied from cache. + return nil + } + + f, err := os.Open("/proc/kallsyms") + if err != nil { + return err + } + + if err := assignModules(f, request); err != nil { + return fmt.Errorf("assigning symbol modules: %w", err) + } + + // Update the cache with the new symbols. Cache all requested symbols, even if + // they're missing or don't belong to a module. + for name, mod := range request { + symModules.Store(name, mod) + symbols[name] = mod + } + + return nil +} + +// assignModules assigns kernel symbol modules read from f to values requested +// by symbols. Always scans the whole input to make sure the user didn't request +// an ambiguous symbol. +func assignModules(f io.Reader, symbols map[string]string) error { + if len(symbols) == 0 { + return nil + } + + found := make(map[string]struct{}) + r := newReader(f) + for r.Line() { + // Only look for function symbols in the kernel's text section (tT). + s, err, skip := parseSymbol(r, []rune{'t', 'T'}) + if err != nil { + return fmt.Errorf("parsing kallsyms line: %w", err) + } + if skip { + continue + } + + if _, requested := symbols[s.name]; !requested { + continue + } + + if _, ok := found[s.name]; ok { + // We've already seen this symbol. Return an error to avoid silently + // attaching to a symbol in the wrong module. libbpf also rejects + // referring to ambiguous symbols. + // + // We can't simply check if we already have a value for the given symbol, + // since many won't have an associated kernel module. + return fmt.Errorf("symbol %s: duplicate found at address 0x%x (module %q): %w", + s.name, s.addr, s.mod, errAmbiguousKsym) + } + + symbols[s.name] = s.mod + found[s.name] = struct{}{} + } + if err := r.Err(); err != nil { + return fmt.Errorf("reading kallsyms: %w", err) + } + + return nil +} + +// Address returns the address of the given symbol in the kernel. Returns 0 and +// no error if the symbol is not present. Returns an error if multiple addresses +// were found for a symbol. +// +// Consider [AssignAddresses] if you need to resolve multiple symbols, as it +// will only perform one iteration over /proc/kallsyms. +func Address(symbol string) (uint64, error) { + if symbol == "" { + return 0, nil + } + + if addr, ok := symAddrs.Load(symbol); ok { + return addr, nil + } + + request := map[string]uint64{symbol: 0} + if err := AssignAddresses(request); err != nil { + return 0, err + } + + return request[symbol], nil +} + +// AssignAddresses looks up the addresses of the requested symbols in the kernel +// and assigns them to their corresponding values in the symbols map. Results +// of all lookups are cached, successful or otherwise. +// +// Any symbols missing in the kernel are ignored. Returns an error if multiple +// addresses were found for a symbol. +func AssignAddresses(symbols map[string]uint64) error { + if len(symbols) == 0 { + return nil + } + + // Attempt to fetch symbols from cache. + request := make(map[string]uint64) + for name := range symbols { + if addr, ok := symAddrs.Load(name); ok { + symbols[name] = addr + continue + } + + // Mark the symbol to be read from /proc/kallsyms. + request[name] = 0 + } + if len(request) == 0 { + // All symbols satisfied from cache. + return nil + } + + f, err := os.Open("/proc/kallsyms") + if err != nil { + return err + } + + if err := assignAddresses(f, request); err != nil { + return fmt.Errorf("loading symbol addresses: %w", err) + } + + // Update the cache with the new symbols. Cache all requested symbols even if + // they weren't found, to avoid repeated lookups. + for name, addr := range request { + symAddrs.Store(name, addr) + symbols[name] = addr + } + + return nil +} + +// assignAddresses assigns kernel symbol addresses read from f to values +// requested by symbols. Always scans the whole input to make sure the user +// didn't request an ambiguous symbol. +func assignAddresses(f io.Reader, symbols map[string]uint64) error { + if len(symbols) == 0 { + return nil + } + r := newReader(f) + for r.Line() { + s, err, skip := parseSymbol(r, nil) + if err != nil { + return fmt.Errorf("parsing kallsyms line: %w", err) + } + if skip { + continue + } + + existing, requested := symbols[s.name] + if existing != 0 { + // Multiple addresses for a symbol have been found. Return a friendly + // error to avoid silently attaching to the wrong symbol. libbpf also + // rejects referring to ambiguous symbols. + return fmt.Errorf("symbol %s(0x%x): duplicate found at address 0x%x: %w", s.name, existing, s.addr, errAmbiguousKsym) + } + if requested { + symbols[s.name] = s.addr + } + } + if err := r.Err(); err != nil { + return fmt.Errorf("reading kallsyms: %w", err) + } + + return nil +} + +type ksym struct { + addr uint64 + name string + mod string +} + +// parseSymbol parses a line from /proc/kallsyms into an address, type, name and +// module. Skip will be true if the symbol doesn't match any of the given symbol +// types. See `man 1 nm` for all available types. +// +// Example line: `ffffffffc1682010 T nf_nat_init [nf_nat]` +func parseSymbol(r *reader, types []rune) (s ksym, err error, skip bool) { + for i := 0; r.Word(); i++ { + switch i { + // Address of the symbol. + case 0: + s.addr, err = strconv.ParseUint(r.Text(), 16, 64) + if err != nil { + return s, fmt.Errorf("parsing address: %w", err), false + } + // Type of the symbol. Assume the character is ASCII-encoded by converting + // it directly to a rune, since it's a fixed field controlled by the kernel. + case 1: + if len(types) > 0 && !slices.Contains(types, rune(r.Bytes()[0])) { + return s, nil, true + } + // Name of the symbol. + case 2: + s.name = r.Text() + // Kernel module the symbol is provided by. + case 3: + s.mod = strings.Trim(r.Text(), "[]") + // Ignore any future fields. + default: + break + } + } + + return +} diff --git a/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go b/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go new file mode 100644 index 0000000000..2bd4f8eafc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kallsyms/reader.go @@ -0,0 +1,118 @@ +package kallsyms + +import ( + "bufio" + "io" + "unicode" + "unicode/utf8" +) + +// reader is a line and word-oriented reader built for reading /proc/kallsyms. +// It takes an io.Reader and iterates its contents line by line, then word by +// word. +// +// It's designed to allow partial reading of lines without paying the cost of +// allocating objects that will never be accessed, resulting in less work for +// the garbage collector. +type reader struct { + s *bufio.Scanner + line []byte + word []byte + + err error +} + +func newReader(r io.Reader) *reader { + return &reader{ + s: bufio.NewScanner(r), + } +} + +// Bytes returns the current word as a byte slice. +func (r *reader) Bytes() []byte { + return r.word +} + +// Text returns the output of Bytes as a string. +func (r *reader) Text() string { + return string(r.Bytes()) +} + +// Line advances the reader to the next line in the input. Calling Line resets +// the current word, making [reader.Bytes] and [reader.Text] return empty +// values. Follow this up with a call to [reader.Word]. +// +// Like [bufio.Scanner], [reader.Err] needs to be checked after Line returns +// false to determine if an error occurred during reading. +// +// Returns true if Line can be called again. Returns false if all lines in the +// input have been read. +func (r *reader) Line() bool { + for r.s.Scan() { + line := r.s.Bytes() + if len(line) == 0 { + continue + } + + r.line = line + r.word = nil + + return true + } + if err := r.s.Err(); err != nil { + r.err = err + } + + return false +} + +// Word advances the reader to the next word in the current line. +// +// Returns true if a word is found and Word should be called again. Returns +// false when all words on the line have been read. +func (r *reader) Word() bool { + if len(r.line) == 0 { + return false + } + + // Find next word start, skipping leading spaces. + start := 0 + for width := 0; start < len(r.line); start += width { + var c rune + c, width = utf8.DecodeRune(r.line[start:]) + if !unicode.IsSpace(c) { + break + } + } + + // Whitespace scanning reached the end of the line due to trailing whitespace, + // meaning there are no more words to read + if start == len(r.line) { + return false + } + + // Find next word end. + for width, i := 0, start; i < len(r.line); i += width { + var c rune + c, width = utf8.DecodeRune(r.line[i:]) + if unicode.IsSpace(c) { + r.word = r.line[start:i] + r.line = r.line[i:] + return true + } + } + + // The line contains data, but no end-of-word boundary was found. This is the + // last, unterminated word in the line. + if len(r.line) > start { + r.word = r.line[start:] + r.line = nil + return true + } + + return false +} + +func (r *reader) Err() error { + return r.err +} diff --git a/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go new file mode 100644 index 0000000000..29c62b6266 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/kconfig/kconfig.go @@ -0,0 +1,274 @@ +// Package kconfig implements a parser for the format of Linux's .config file. +package kconfig + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "strconv" + "strings" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" +) + +// Parse parses the kconfig file for which a reader is given. +// All the CONFIG_* which are in filter and which are set set will be +// put in the returned map as key with their corresponding value as map value. +// If filter is nil, no filtering will occur. +// If the kconfig file is not valid, error will be returned. +func Parse(source io.ReaderAt, filter map[string]struct{}) (map[string]string, error) { + var r io.Reader + zr, err := gzip.NewReader(io.NewSectionReader(source, 0, math.MaxInt64)) + if err != nil { + r = io.NewSectionReader(source, 0, math.MaxInt64) + } else { + // Source is gzip compressed, transparently decompress. + r = zr + } + + ret := make(map[string]string, len(filter)) + + s := bufio.NewScanner(r) + + for s.Scan() { + line := s.Bytes() + err = processKconfigLine(line, ret, filter) + if err != nil { + return nil, fmt.Errorf("cannot parse line: %w", err) + } + + if filter != nil && len(ret) == len(filter) { + break + } + } + + if err := s.Err(); err != nil { + return nil, fmt.Errorf("cannot parse: %w", err) + } + + if zr != nil { + return ret, zr.Close() + } + + return ret, nil +} + +// Golang translation of libbpf bpf_object__process_kconfig_line(): +// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/libbpf.c#L1874 +// It does the same checks but does not put the data inside the BPF map. +func processKconfigLine(line []byte, m map[string]string, filter map[string]struct{}) error { + // Ignore empty lines and "# CONFIG_* is not set". + if !bytes.HasPrefix(line, []byte("CONFIG_")) { + return nil + } + + key, value, found := bytes.Cut(line, []byte{'='}) + if !found { + return fmt.Errorf("line %q does not contain separator '='", line) + } + + if len(value) == 0 { + return fmt.Errorf("line %q has no value", line) + } + + if filter != nil { + // NB: map[string(key)] gets special optimisation help from the compiler + // and doesn't allocate. Don't turn this into a variable. + _, ok := filter[string(key)] + if !ok { + return nil + } + } + + // This can seem odd, but libbpf only sets the value the first time the key is + // met: + // https://github.com/torvalds/linux/blob/0d85b27b0cc6/tools/lib/bpf/libbpf.c#L1906-L1908 + _, ok := m[string(key)] + if !ok { + m[string(key)] = string(value) + } + + return nil +} + +// PutValue translates the value given as parameter depending on the BTF +// type, the translated value is then written to the byte array. +func PutValue(data []byte, typ btf.Type, value string) error { + typ = btf.UnderlyingType(typ) + + switch value { + case "y", "n", "m": + return putValueTri(data, typ, value) + } + + if strings.HasPrefix(value, `"`) { + return putValueString(data, typ, value) + } + + return putValueNumber(data, typ, value) +} + +// Golang translation of libbpf_tristate enum: +// https://github.com/libbpf/libbpf/blob/fbd60dbff51c870f5e80a17c4f2fd639eb80af90/src/bpf_helpers.h#L169 +type triState int + +const ( + TriNo triState = 0 + TriYes triState = 1 + TriModule triState = 2 +) + +func putValueTri(data []byte, typ btf.Type, value string) error { + switch v := typ.(type) { + case *btf.Int: + if v.Encoding != btf.Bool { + return fmt.Errorf("cannot add tri value, expected btf.Bool, got: %v", v.Encoding) + } + + if v.Size != 1 { + return fmt.Errorf("cannot add tri value, expected size of 1 byte, got: %d", v.Size) + } + + switch value { + case "y": + data[0] = 1 + case "n": + data[0] = 0 + default: + return fmt.Errorf("cannot use %q for btf.Bool", value) + } + case *btf.Enum: + if v.Name != "libbpf_tristate" { + return fmt.Errorf("cannot use enum %q, only libbpf_tristate is supported", v.Name) + } + + if len(data) != 4 { + return fmt.Errorf("expected enum value to occupy 4 bytes in datasec, got: %d", len(data)) + } + + var tri triState + switch value { + case "y": + tri = TriYes + case "m": + tri = TriModule + case "n": + tri = TriNo + default: + return fmt.Errorf("value %q is not supported for libbpf_tristate", value) + } + + internal.NativeEndian.PutUint32(data, uint32(tri)) + default: + return fmt.Errorf("cannot add number value, expected btf.Int or btf.Enum, got: %T", v) + } + + return nil +} + +func putValueString(data []byte, typ btf.Type, value string) error { + array, ok := typ.(*btf.Array) + if !ok { + return fmt.Errorf("cannot add string value, expected btf.Array, got %T", array) + } + + contentType, ok := btf.UnderlyingType(array.Type).(*btf.Int) + if !ok { + return fmt.Errorf("cannot add string value, expected array of btf.Int, got %T", contentType) + } + + // Any Int, which is not bool, of one byte could be used to store char: + // https://github.com/torvalds/linux/blob/1a5304fecee5/tools/lib/bpf/libbpf.c#L3637-L3638 + if contentType.Size != 1 && contentType.Encoding != btf.Bool { + return fmt.Errorf("cannot add string value, expected array of btf.Int of size 1, got array of btf.Int of size: %v", contentType.Size) + } + + if !strings.HasPrefix(value, `"`) || !strings.HasSuffix(value, `"`) { + return fmt.Errorf(`value %q must start and finish with '"'`, value) + } + + str := strings.Trim(value, `"`) + + // We need to trim string if the bpf array is smaller. + if uint32(len(str)) >= array.Nelems { + str = str[:array.Nelems] + } + + // Write the string content to .kconfig. + copy(data, str) + + return nil +} + +func putValueNumber(data []byte, typ btf.Type, value string) error { + integer, ok := typ.(*btf.Int) + if !ok { + return fmt.Errorf("cannot add number value, expected *btf.Int, got: %T", integer) + } + + size := integer.Size + sizeInBits := size * 8 + + var n uint64 + var err error + if integer.Encoding == btf.Signed { + parsed, e := strconv.ParseInt(value, 0, int(sizeInBits)) + + n = uint64(parsed) + err = e + } else { + parsed, e := strconv.ParseUint(value, 0, int(sizeInBits)) + + n = uint64(parsed) + err = e + } + + if err != nil { + return fmt.Errorf("cannot parse value: %w", err) + } + + return PutInteger(data, integer, n) +} + +// PutInteger writes n into data. +// +// integer determines how much is written into data and what the valid values +// are. +func PutInteger(data []byte, integer *btf.Int, n uint64) error { + // This function should match set_kcfg_value_num in libbpf. + if integer.Encoding == btf.Bool && n > 1 { + return fmt.Errorf("invalid boolean value: %d", n) + } + + if len(data) < int(integer.Size) { + return fmt.Errorf("can't fit an integer of size %d into a byte slice of length %d", integer.Size, len(data)) + } + + switch integer.Size { + case 1: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt8 || int64(n) < math.MinInt8) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + data[0] = byte(n) + case 2: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt16 || int64(n) < math.MinInt16) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + internal.NativeEndian.PutUint16(data, uint16(n)) + case 4: + if integer.Encoding == btf.Signed && (int64(n) > math.MaxInt32 || int64(n) < math.MinInt32) { + return fmt.Errorf("can't represent %d as a signed integer of size %d", int64(n), integer.Size) + } + internal.NativeEndian.PutUint32(data, uint32(n)) + case 8: + internal.NativeEndian.PutUint64(data, uint64(n)) + default: + return fmt.Errorf("size (%d) is not valid, expected: 1, 2, 4 or 8", integer.Size) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/auxv.go b/vendor/github.com/cilium/ebpf/internal/linux/auxv.go new file mode 100644 index 0000000000..98bb5d83ca --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/auxv.go @@ -0,0 +1,60 @@ +package linux + +import ( + "errors" + "io" + _ "unsafe" +) + +type auxvPairReader interface { + Close() error + ReadAuxvPair() (uint64, uint64, error) +} + +// See https://elixir.bootlin.com/linux/v6.5.5/source/include/uapi/linux/auxvec.h +const ( + _AT_NULL = 0 // End of vector + _AT_SYSINFO_EHDR = 33 // Offset to vDSO blob in process image +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +type auxvRuntimeReader struct { + data []uintptr + index int +} + +func (r *auxvRuntimeReader) Close() error { + return nil +} + +func (r *auxvRuntimeReader) ReadAuxvPair() (uint64, uint64, error) { + if r.index >= len(r.data)+2 { + return 0, 0, io.EOF + } + + // we manually add the (_AT_NULL, _AT_NULL) pair at the end + // that is not provided by the go runtime + var tag, value uintptr + if r.index+1 < len(r.data) { + tag, value = r.data[r.index], r.data[r.index+1] + } else { + tag, value = _AT_NULL, _AT_NULL + } + r.index += 2 + return uint64(tag), uint64(value), nil +} + +func newAuxvRuntimeReader() (auxvPairReader, error) { + data := runtime_getAuxv() + + if len(data)%2 != 0 { + return nil, errors.New("malformed auxv passed from runtime") + } + + return &auxvRuntimeReader{ + data: data, + index: 0, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/doc.go b/vendor/github.com/cilium/ebpf/internal/linux/doc.go new file mode 100644 index 0000000000..064e75437d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/doc.go @@ -0,0 +1,2 @@ +// Package linux contains OS specific wrappers around package unix. +package linux diff --git a/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go b/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go new file mode 100644 index 0000000000..1488ecb35c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/kconfig.go @@ -0,0 +1,31 @@ +package linux + +import ( + "fmt" + "os" +) + +// FindKConfig searches for a kconfig file on the host. +// +// It first reads from /boot/config- of the current running kernel and tries +// /proc/config.gz if nothing was found in /boot. +// If none of the file provide a kconfig, it returns an error. +func FindKConfig() (*os.File, error) { + kernelRelease, err := KernelRelease() + if err != nil { + return nil, fmt.Errorf("cannot get kernel release: %w", err) + } + + path := "/boot/config-" + kernelRelease + f, err := os.Open(path) + if err == nil { + return f, nil + } + + f, err = os.Open("/proc/config.gz") + if err == nil { + return f, nil + } + + return nil, fmt.Errorf("neither %s nor /proc/config.gz provide a kconfig", path) +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/platform.go b/vendor/github.com/cilium/ebpf/internal/linux/platform.go new file mode 100644 index 0000000000..39bdcc51f9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/platform.go @@ -0,0 +1,43 @@ +package linux + +import ( + "runtime" +) + +// PlatformPrefix returns the platform-dependent syscall wrapper prefix used by +// the linux kernel. +// +// Based on https://github.com/golang/go/blob/master/src/go/build/syslist.go +// and https://github.com/libbpf/libbpf/blob/master/src/libbpf.c#L10047 +func PlatformPrefix() string { + switch runtime.GOARCH { + case "386": + return "__ia32_" + case "amd64", "amd64p32": + return "__x64_" + + case "arm", "armbe": + return "__arm_" + case "arm64", "arm64be": + return "__arm64_" + + case "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le": + return "__mips_" + + case "s390": + return "__s390_" + case "s390x": + return "__s390x_" + + case "riscv", "riscv64": + return "__riscv_" + + case "ppc": + return "__powerpc_" + case "ppc64", "ppc64le": + return "__powerpc64_" + + default: + return "" + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/statfs.go b/vendor/github.com/cilium/ebpf/internal/linux/statfs.go new file mode 100644 index 0000000000..e268c06fab --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/statfs.go @@ -0,0 +1,23 @@ +package linux + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +func FSType(path string) (int64, error) { + var statfs unix.Statfs_t + if err := unix.Statfs(path, &statfs); err != nil { + return 0, err + } + + fsType := int64(statfs.Type) + if unsafe.Sizeof(statfs.Type) == 4 { + // We're on a 32 bit arch, where statfs.Type is int32. bpfFSType is a + // negative number when interpreted as int32 so we need to cast via + // uint32 to avoid sign extension. + fsType = int64(uint32(statfs.Type)) + } + return fsType, nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/vdso.go b/vendor/github.com/cilium/ebpf/internal/linux/vdso.go new file mode 100644 index 0000000000..1d8d0ef6b1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/vdso.go @@ -0,0 +1,144 @@ +package linux + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "os" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + errAuxvNoVDSO = errors.New("no vdso address found in auxv") +) + +// vdsoVersion returns the LINUX_VERSION_CODE embedded in the vDSO library +// linked into the current process image. +func vdsoVersion() (uint32, error) { + av, err := newAuxvRuntimeReader() + if err != nil { + return 0, err + } + + defer av.Close() + + vdsoAddr, err := vdsoMemoryAddress(av) + if err != nil { + return 0, fmt.Errorf("finding vDSO memory address: %w", err) + } + + // Use /proc/self/mem rather than unsafe.Pointer tricks. + mem, err := os.Open("/proc/self/mem") + if err != nil { + return 0, fmt.Errorf("opening mem: %w", err) + } + defer mem.Close() + + // Open ELF at provided memory address, as offset into /proc/self/mem. + c, err := vdsoLinuxVersionCode(io.NewSectionReader(mem, int64(vdsoAddr), math.MaxInt64)) + if err != nil { + return 0, fmt.Errorf("reading linux version code: %w", err) + } + + return c, nil +} + +// vdsoMemoryAddress returns the memory address of the vDSO library +// linked into the current process image. r is an io.Reader into an auxv blob. +func vdsoMemoryAddress(r auxvPairReader) (uintptr, error) { + // Loop through all tag/value pairs in auxv until we find `AT_SYSINFO_EHDR`, + // the address of a page containing the virtual Dynamic Shared Object (vDSO). + for { + tag, value, err := r.ReadAuxvPair() + if err != nil { + return 0, err + } + + switch tag { + case _AT_SYSINFO_EHDR: + if value != 0 { + return uintptr(value), nil + } + return 0, fmt.Errorf("invalid vDSO address in auxv") + // _AT_NULL is always the last tag/val pair in the aux vector + // and can be treated like EOF. + case _AT_NULL: + return 0, errAuxvNoVDSO + } + } +} + +// format described at https://www.man7.org/linux/man-pages/man5/elf.5.html in section 'Notes (Nhdr)' +type elfNoteHeader struct { + NameSize int32 + DescSize int32 + Type int32 +} + +// vdsoLinuxVersionCode returns the LINUX_VERSION_CODE embedded in +// the ELF notes section of the binary provided by the reader. +func vdsoLinuxVersionCode(r io.ReaderAt) (uint32, error) { + hdr, err := internal.NewSafeELFFile(r) + if err != nil { + return 0, fmt.Errorf("reading vDSO ELF: %w", err) + } + + sections := hdr.SectionsByType(elf.SHT_NOTE) + if len(sections) == 0 { + return 0, fmt.Errorf("no note section found in vDSO ELF") + } + + for _, sec := range sections { + sr := sec.Open() + var n elfNoteHeader + + // Read notes until we find one named 'Linux'. + for { + if err := binary.Read(sr, hdr.ByteOrder, &n); err != nil { + if errors.Is(err, io.EOF) { + // We looked at all the notes in this section + break + } + return 0, fmt.Errorf("reading note header: %w", err) + } + + // If a note name is defined, it follows the note header. + var name string + if n.NameSize > 0 { + // Read the note name, aligned to 4 bytes. + buf := make([]byte, internal.Align(n.NameSize, 4)) + if err := binary.Read(sr, hdr.ByteOrder, &buf); err != nil { + return 0, fmt.Errorf("reading note name: %w", err) + } + + // Read nul-terminated string. + name = unix.ByteSliceToString(buf[:n.NameSize]) + } + + // If a note descriptor is defined, it follows the name. + // It is possible for a note to have a descriptor but not a name. + if n.DescSize > 0 { + // LINUX_VERSION_CODE is a uint32 value. + if name == "Linux" && n.DescSize == 4 && n.Type == 0 { + var version uint32 + if err := binary.Read(sr, hdr.ByteOrder, &version); err != nil { + return 0, fmt.Errorf("reading note descriptor: %w", err) + } + return version, nil + } + + // Discard the note descriptor if it exists but we're not interested in it. + if _, err := io.CopyN(io.Discard, sr, int64(internal.Align(n.DescSize, 4))); err != nil { + return 0, err + } + } + } + } + + return 0, fmt.Errorf("no Linux note in ELF") +} diff --git a/vendor/github.com/cilium/ebpf/internal/linux/version.go b/vendor/github.com/cilium/ebpf/internal/linux/version.go new file mode 100644 index 0000000000..798dd3fed0 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/linux/version.go @@ -0,0 +1,34 @@ +package linux + +import ( + "fmt" + "sync" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/unix" +) + +// KernelVersion returns the version of the currently running kernel. +var KernelVersion = sync.OnceValues(detectKernelVersion) + +// detectKernelVersion returns the version of the running kernel. +func detectKernelVersion() (internal.Version, error) { + vc, err := vdsoVersion() + if err != nil { + return internal.Version{}, err + } + return internal.NewVersionFromCode(vc), nil +} + +// KernelRelease returns the release string of the running kernel. +// Its format depends on the Linux distribution and corresponds to directory +// names in /lib/modules by convention. Some examples are 5.15.17-1-lts and +// 4.19.0-16-amd64. +func KernelRelease() (string, error) { + var uname unix.Utsname + if err := unix.Uname(&uname); err != nil { + return "", fmt.Errorf("uname failed: %w", err) + } + + return unix.ByteSliceToString(uname.Release[:]), nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/math.go b/vendor/github.com/cilium/ebpf/internal/math.go new file mode 100644 index 0000000000..10cde66860 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/math.go @@ -0,0 +1,33 @@ +package internal + +// Align returns 'n' updated to 'alignment' boundary. +func Align[I Integer](n, alignment I) I { + return (n + alignment - 1) / alignment * alignment +} + +// IsPow returns true if n is a power of two. +func IsPow[I Integer](n I) bool { + return n != 0 && (n&(n-1)) == 0 +} + +// Between returns the value clamped between a and b. +func Between[I Integer](val, a, b I) I { + lower, upper := a, b + if lower > upper { + upper, lower = a, b + } + + val = min(val, upper) + return max(val, lower) +} + +// Integer represents all possible integer types. +// Remove when x/exp/constraints is moved to the standard library. +type Integer interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// List of integer types known by the Go compiler. Used by TestIntegerConstraint +// to warn if a new integer type is introduced. Remove when x/exp/constraints +// is moved to the standard library. +var integers = []string{"int", "int8", "int16", "int32", "int64", "uint", "uint8", "uint16", "uint32", "uint64", "uintptr"} diff --git a/vendor/github.com/cilium/ebpf/internal/output.go b/vendor/github.com/cilium/ebpf/internal/output.go new file mode 100644 index 0000000000..dd6e6cbafe --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/output.go @@ -0,0 +1,97 @@ +package internal + +import ( + "bytes" + "errors" + "go/format" + "go/scanner" + "io" + "reflect" + "strings" + "unicode" +) + +// Identifier turns a C style type or field name into an exportable Go equivalent. +func Identifier(str string) string { + prev := rune(-1) + return strings.Map(func(r rune) rune { + // See https://golang.org/ref/spec#Identifiers + switch { + case unicode.IsLetter(r): + if prev == -1 { + r = unicode.ToUpper(r) + } + + case r == '_': + switch { + // The previous rune was deleted, or we are at the + // beginning of the string. + case prev == -1: + fallthrough + + // The previous rune is a lower case letter or a digit. + case unicode.IsDigit(prev) || (unicode.IsLetter(prev) && unicode.IsLower(prev)): + // delete the current rune, and force the + // next character to be uppercased. + r = -1 + } + + case unicode.IsDigit(r): + + default: + // Delete the current rune. prev is unchanged. + return -1 + } + + prev = r + return r + }, str) +} + +// WriteFormatted outputs a formatted src into out. +// +// If formatting fails it returns an informative error message. +func WriteFormatted(src []byte, out io.Writer) error { + formatted, err := format.Source(src) + if err == nil { + _, err = out.Write(formatted) + return err + } + + var el scanner.ErrorList + if !errors.As(err, &el) { + return err + } + + var nel scanner.ErrorList + for _, err := range el { + if !err.Pos.IsValid() { + nel = append(nel, err) + continue + } + + buf := src[err.Pos.Offset:] + nl := bytes.IndexRune(buf, '\n') + if nl == -1 { + nel = append(nel, err) + continue + } + + err.Msg += ": " + string(buf[:nl]) + nel = append(nel, err) + } + + return nel +} + +// GoTypeName is like %T, but elides the package name. +// +// Pointers to a type are peeled off. +func GoTypeName(t any) string { + rT := reflect.TypeOf(t) + for rT.Kind() == reflect.Pointer { + rT = rT.Elem() + } + // Doesn't return the correct Name for generic types due to https://github.com/golang/go/issues/55924 + return rT.Name() +} diff --git a/vendor/github.com/cilium/ebpf/internal/prog.go b/vendor/github.com/cilium/ebpf/internal/prog.go new file mode 100644 index 0000000000..d629145b62 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/prog.go @@ -0,0 +1,11 @@ +package internal + +// EmptyBPFContext is the smallest-possible BPF input context to be used for +// invoking `Program.{Run,Benchmark,Test}`. +// +// Programs require a context input buffer of at least 15 bytes. Looking in +// net/bpf/test_run.c, bpf_test_init() requires that the input is at least +// ETH_HLEN (14) bytes. As of Linux commit fd18942 ("bpf: Don't redirect packets +// with invalid pkt_len"), it also requires the skb to be non-empty after +// removing the Layer 2 header. +var EmptyBPFContext = make([]byte, 15) diff --git a/vendor/github.com/cilium/ebpf/internal/sys/doc.go b/vendor/github.com/cilium/ebpf/internal/sys/doc.go new file mode 100644 index 0000000000..dfe174448e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/doc.go @@ -0,0 +1,6 @@ +// Package sys contains bindings for the BPF syscall. +package sys + +// Regenerate types.go by invoking go generate in the current directory. + +//go:generate go run github.com/cilium/ebpf/internal/cmd/gentypes ../../btf/testdata/vmlinux.btf.gz diff --git a/vendor/github.com/cilium/ebpf/internal/sys/fd.go b/vendor/github.com/cilium/ebpf/internal/sys/fd.go new file mode 100644 index 0000000000..e2ba43fd3b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/fd.go @@ -0,0 +1,165 @@ +package sys + +import ( + "fmt" + "math" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + + "github.com/cilium/ebpf/internal/testutils/fdtrace" + "github.com/cilium/ebpf/internal/unix" +) + +var ErrClosedFd = unix.EBADF + +type FD struct { + raw int +} + +func newFD(value int) *FD { + fdtrace.TraceFD(value, 1) + + fd := &FD{value} + runtime.SetFinalizer(fd, (*FD).finalize) + return fd +} + +// finalize is set as the FD's runtime finalizer and +// sends a leak trace before calling FD.Close(). +func (fd *FD) finalize() { + if fd.raw < 0 { + return + } + + fdtrace.LeakFD(fd.raw) + + _ = fd.Close() +} + +// NewFD wraps a raw fd with a finalizer. +// +// You must not use the raw fd after calling this function, since the underlying +// file descriptor number may change. This is because the BPF UAPI assumes that +// zero is not a valid fd value. +func NewFD(value int) (*FD, error) { + if value < 0 { + return nil, fmt.Errorf("invalid fd %d", value) + } + + fd := newFD(value) + if value != 0 { + return fd, nil + } + + dup, err := fd.Dup() + _ = fd.Close() + return dup, err +} + +func (fd *FD) String() string { + return strconv.FormatInt(int64(fd.raw), 10) +} + +func (fd *FD) Int() int { + return fd.raw +} + +func (fd *FD) Uint() uint32 { + if fd.raw < 0 || int64(fd.raw) > math.MaxUint32 { + // Best effort: this is the number most likely to be an invalid file + // descriptor. It is equal to -1 (on two's complement arches). + return math.MaxUint32 + } + return uint32(fd.raw) +} + +func (fd *FD) Close() error { + if fd.raw < 0 { + return nil + } + + return unix.Close(fd.Disown()) +} + +// Disown destroys the FD and returns its raw file descriptor without closing +// it. After this call, the underlying fd is no longer tied to the FD's +// lifecycle. +func (fd *FD) Disown() int { + value := fd.raw + fdtrace.ForgetFD(value) + fd.raw = -1 + + runtime.SetFinalizer(fd, nil) + return value +} + +func (fd *FD) Dup() (*FD, error) { + if fd.raw < 0 { + return nil, ErrClosedFd + } + + // Always require the fd to be larger than zero: the BPF API treats the value + // as "no argument provided". + dup, err := unix.FcntlInt(uintptr(fd.raw), unix.F_DUPFD_CLOEXEC, 1) + if err != nil { + return nil, fmt.Errorf("can't dup fd: %v", err) + } + + return newFD(dup), nil +} + +// File takes ownership of FD and turns it into an [*os.File]. +// +// You must not use the FD after the call returns. +// +// Returns nil if the FD is not valid. +func (fd *FD) File(name string) *os.File { + if fd.raw < 0 { + return nil + } + + return os.NewFile(uintptr(fd.Disown()), name) +} + +// ObjGetTyped wraps [ObjGet] with a readlink call to extract the type of the +// underlying bpf object. +func ObjGetTyped(attr *ObjGetAttr) (*FD, ObjType, error) { + fd, err := ObjGet(attr) + if err != nil { + return nil, 0, err + } + + typ, err := readType(fd) + if err != nil { + _ = fd.Close() + return nil, 0, fmt.Errorf("reading fd type: %w", err) + } + + return fd, typ, nil +} + +// readType returns the bpf object type of the file descriptor by calling +// readlink(3). Returns an error if the file descriptor does not represent a bpf +// object. +func readType(fd *FD) (ObjType, error) { + s, err := os.Readlink(filepath.Join("/proc/self/fd/", fd.String())) + if err != nil { + return 0, fmt.Errorf("readlink fd %d: %w", fd.Int(), err) + } + + s = strings.TrimPrefix(s, "anon_inode:") + + switch s { + case "bpf-map": + return BPF_TYPE_MAP, nil + case "bpf-prog": + return BPF_TYPE_PROG, nil + case "bpf-link": + return BPF_TYPE_LINK, nil + } + + return 0, fmt.Errorf("unknown type %s of fd %d", s, fd.Int()) +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/pinning.go b/vendor/github.com/cilium/ebpf/internal/sys/pinning.go new file mode 100644 index 0000000000..9a4c6c7a15 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/pinning.go @@ -0,0 +1,65 @@ +package sys + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/unix" +) + +func Pin(currentPath, newPath string, fd *FD) error { + if newPath == "" { + return errors.New("given pinning path cannot be empty") + } + if currentPath == newPath { + return nil + } + + fsType, err := linux.FSType(filepath.Dir(newPath)) + if err != nil { + return err + } + if fsType != unix.BPF_FS_MAGIC { + return fmt.Errorf("%s is not on a bpf filesystem", newPath) + } + + defer runtime.KeepAlive(fd) + + if currentPath == "" { + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) + } + + // Renameat2 is used instead of os.Rename to disallow the new path replacing + // an existing path. + err = unix.Renameat2(unix.AT_FDCWD, currentPath, unix.AT_FDCWD, newPath, unix.RENAME_NOREPLACE) + if err == nil { + // Object is now moved to the new pinning path. + return nil + } + if !os.IsNotExist(err) { + return fmt.Errorf("unable to move pinned object to new path %v: %w", newPath, err) + } + // Internal state not in sync with the file system so let's fix it. + return ObjPin(&ObjPinAttr{ + Pathname: NewStringPointer(newPath), + BpfFd: fd.Uint(), + }) +} + +func Unpin(pinnedPath string) error { + if pinnedPath == "" { + return nil + } + err := os.Remove(pinnedPath) + if err == nil || os.IsNotExist(err) { + return nil + } + return err +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go new file mode 100644 index 0000000000..af0c014e3b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr.go @@ -0,0 +1,52 @@ +package sys + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// NewPointer creates a 64-bit pointer from an unsafe Pointer. +func NewPointer(ptr unsafe.Pointer) Pointer { + return Pointer{ptr: ptr} +} + +// NewSlicePointer creates a 64-bit pointer from a slice. +func NewSlicePointer[T comparable](buf []T) Pointer { + if len(buf) == 0 { + return Pointer{} + } + + return Pointer{ptr: unsafe.Pointer(unsafe.SliceData(buf))} +} + +// NewSlicePointerLen creates a 64-bit pointer from a byte slice. +// +// Useful to assign both the pointer and the length in one go. +func NewSlicePointerLen(buf []byte) (Pointer, uint32) { + return NewSlicePointer(buf), uint32(len(buf)) +} + +// NewStringPointer creates a 64-bit pointer from a string. +func NewStringPointer(str string) Pointer { + p, err := unix.BytePtrFromString(str) + if err != nil { + return Pointer{} + } + + return Pointer{ptr: unsafe.Pointer(p)} +} + +// NewStringSlicePointer allocates an array of Pointers to each string in the +// given slice of strings and returns a 64-bit pointer to the start of the +// resulting array. +// +// Use this function to pass arrays of strings as syscall arguments. +func NewStringSlicePointer(strings []string) Pointer { + sp := make([]Pointer, 0, len(strings)) + for _, s := range strings { + sp = append(sp, NewStringPointer(s)) + } + + return Pointer{ptr: unsafe.Pointer(&sp[0])} +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go new file mode 100644 index 0000000000..6278c79c9e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_be.go @@ -0,0 +1,14 @@ +//go:build armbe || mips || mips64p32 + +package sys + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + pad uint32 + ptr unsafe.Pointer +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go new file mode 100644 index 0000000000..c27b537e8e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_32_le.go @@ -0,0 +1,14 @@ +//go:build 386 || amd64p32 || arm || mipsle || mips64p32le + +package sys + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + ptr unsafe.Pointer + pad uint32 +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go new file mode 100644 index 0000000000..2d7828230a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/ptr_64.go @@ -0,0 +1,13 @@ +//go:build !386 && !amd64p32 && !arm && !mipsle && !mips64p32le && !armbe && !mips && !mips64p32 + +package sys + +import ( + "unsafe" +) + +// Pointer wraps an unsafe.Pointer to be 64bit to +// conform to the syscall specification. +type Pointer struct { + ptr unsafe.Pointer +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/signals.go b/vendor/github.com/cilium/ebpf/internal/sys/signals.go new file mode 100644 index 0000000000..e5337191d6 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/signals.go @@ -0,0 +1,83 @@ +package sys + +import ( + "fmt" + "runtime" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// A sigset containing only SIGPROF. +var profSet unix.Sigset_t + +func init() { + // See sigsetAdd for details on the implementation. Open coded here so + // that the compiler will check the constant calculations for us. + profSet.Val[sigprofBit/wordBits] |= 1 << (sigprofBit % wordBits) +} + +// maskProfilerSignal locks the calling goroutine to its underlying OS thread +// and adds SIGPROF to the thread's signal mask. This prevents pprof from +// interrupting expensive syscalls like e.g. BPF_PROG_LOAD. +// +// The caller must defer unmaskProfilerSignal() to reverse the operation. +func maskProfilerSignal() { + runtime.LockOSThread() + + if err := unix.PthreadSigmask(unix.SIG_BLOCK, &profSet, nil); err != nil { + runtime.UnlockOSThread() + panic(fmt.Errorf("masking profiler signal: %w", err)) + } +} + +// unmaskProfilerSignal removes SIGPROF from the underlying thread's signal +// mask, allowing it to be interrupted for profiling once again. +// +// It also unlocks the current goroutine from its underlying OS thread. +func unmaskProfilerSignal() { + defer runtime.UnlockOSThread() + + if err := unix.PthreadSigmask(unix.SIG_UNBLOCK, &profSet, nil); err != nil { + panic(fmt.Errorf("unmasking profiler signal: %w", err)) + } +} + +const ( + // Signal is the nth bit in the bitfield. + sigprofBit = int(unix.SIGPROF - 1) + // The number of bits in one Sigset_t word. + wordBits = int(unsafe.Sizeof(unix.Sigset_t{}.Val[0])) * 8 +) + +// sigsetAdd adds signal to set. +// +// Note: Sigset_t.Val's value type is uint32 or uint64 depending on the arch. +// This function must be able to deal with both and so must avoid any direct +// references to u32 or u64 types. +func sigsetAdd(set *unix.Sigset_t, signal unix.Signal) error { + if signal < 1 { + return fmt.Errorf("signal %d must be larger than 0", signal) + } + + // For amd64, runtime.sigaddset() performs the following operation: + // set[(signal-1)/32] |= 1 << ((uint32(signal) - 1) & 31) + // + // This trick depends on sigset being two u32's, causing a signal in the + // bottom 31 bits to be written to the low word if bit 32 is low, or the high + // word if bit 32 is high. + + // Signal is the nth bit in the bitfield. + bit := int(signal - 1) + // Word within the sigset the bit needs to be written to. + word := bit / wordBits + + if word >= len(set.Val) { + return fmt.Errorf("signal %d does not fit within unix.Sigset_t", signal) + } + + // Write the signal bit into its corresponding word at the corrected offset. + set.Val[word] |= 1 << (bit % wordBits) + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/syscall.go b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go new file mode 100644 index 0000000000..e37f4cf671 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/syscall.go @@ -0,0 +1,216 @@ +package sys + +import ( + "runtime" + "syscall" + "unsafe" + + "github.com/cilium/ebpf/internal/unix" +) + +// ENOTSUPP is a Linux internal error code that has leaked into UAPI. +// +// It is not the same as ENOTSUP or EOPNOTSUPP. +const ENOTSUPP = syscall.Errno(524) + +// BPF wraps SYS_BPF. +// +// Any pointers contained in attr must use the Pointer type from this package. +func BPF(cmd Cmd, attr unsafe.Pointer, size uintptr) (uintptr, error) { + // Prevent the Go profiler from repeatedly interrupting the verifier, + // which could otherwise lead to a livelock due to receiving EAGAIN. + if cmd == BPF_PROG_LOAD || cmd == BPF_PROG_RUN { + maskProfilerSignal() + defer unmaskProfilerSignal() + } + + for { + r1, _, errNo := unix.Syscall(unix.SYS_BPF, uintptr(cmd), uintptr(attr), size) + runtime.KeepAlive(attr) + + // As of ~4.20 the verifier can be interrupted by a signal, + // and returns EAGAIN in that case. + if errNo == unix.EAGAIN && cmd == BPF_PROG_LOAD { + continue + } + + var err error + if errNo != 0 { + err = wrappedErrno{errNo} + } + + return r1, err + } +} + +// Info is implemented by all structs that can be passed to the ObjInfo syscall. +// +// MapInfo +// ProgInfo +// LinkInfo +// BtfInfo +type Info interface { + info() (unsafe.Pointer, uint32) +} + +var _ Info = (*MapInfo)(nil) + +func (i *MapInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*ProgInfo)(nil) + +func (i *ProgInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*LinkInfo)(nil) + +func (i *LinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *TracingLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *CgroupLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetNsLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *XDPLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *TcxLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetfilterLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *NetkitLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *KprobeMultiLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *KprobeLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +var _ Info = (*BtfInfo)(nil) + +func (i *BtfInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +func (i *PerfEventLinkInfo) info() (unsafe.Pointer, uint32) { + return unsafe.Pointer(i), uint32(unsafe.Sizeof(*i)) +} + +// ObjInfo retrieves information about a BPF Fd. +// +// info may be one of MapInfo, ProgInfo, LinkInfo and BtfInfo. +func ObjInfo(fd *FD, info Info) error { + ptr, len := info.info() + err := ObjGetInfoByFd(&ObjGetInfoByFdAttr{ + BpfFd: fd.Uint(), + InfoLen: len, + Info: NewPointer(ptr), + }) + runtime.KeepAlive(fd) + return err +} + +// BPFObjName is a null-terminated string made up of +// 'A-Za-z0-9_' characters. +type ObjName [BPF_OBJ_NAME_LEN]byte + +// NewObjName truncates the result if it is too long. +func NewObjName(name string) ObjName { + var result ObjName + copy(result[:BPF_OBJ_NAME_LEN-1], name) + return result +} + +// LogLevel controls the verbosity of the kernel's eBPF program verifier. +type LogLevel uint32 + +const ( + BPF_LOG_LEVEL1 LogLevel = 1 << iota + BPF_LOG_LEVEL2 + BPF_LOG_STATS +) + +// LinkID uniquely identifies a bpf_link. +type LinkID uint32 + +// BTFID uniquely identifies a BTF blob loaded into the kernel. +type BTFID uint32 + +// TypeID identifies a type in a BTF blob. +type TypeID uint32 + +// Flags used by bpf_mprog. +const ( + BPF_F_REPLACE = 1 << (iota + 2) + BPF_F_BEFORE + BPF_F_AFTER + BPF_F_ID + BPF_F_LINK_MPROG = 1 << 13 // aka BPF_F_LINK +) + +// Flags used by BPF_PROG_LOAD. +const ( + BPF_F_SLEEPABLE = 1 << 4 + BPF_F_XDP_HAS_FRAGS = 1 << 5 + BPF_F_XDP_DEV_BOUND_ONLY = 1 << 6 +) + +const BPF_TAG_SIZE = 8 +const BPF_OBJ_NAME_LEN = 16 + +// wrappedErrno wraps syscall.Errno to prevent direct comparisons with +// syscall.E* or unix.E* constants. +// +// You should never export an error of this type. +type wrappedErrno struct { + syscall.Errno +} + +func (we wrappedErrno) Unwrap() error { + return we.Errno +} + +func (we wrappedErrno) Error() string { + if we.Errno == ENOTSUPP { + return "operation not supported" + } + return we.Errno.Error() +} + +type syscallError struct { + error + errno syscall.Errno +} + +func Error(err error, errno syscall.Errno) error { + return &syscallError{err, errno} +} + +func (se *syscallError) Is(target error) bool { + return target == se.error +} + +func (se *syscallError) Unwrap() error { + return se.errno +} diff --git a/vendor/github.com/cilium/ebpf/internal/sys/types.go b/vendor/github.com/cilium/ebpf/internal/sys/types.go new file mode 100644 index 0000000000..88001c319e --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sys/types.go @@ -0,0 +1,1556 @@ +// Code generated by internal/cmd/gentypes; DO NOT EDIT. + +package sys + +import ( + "unsafe" +) + +const ( + BPF_ADJ_ROOM_ENCAP_L2_MASK = 255 + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56 + BPF_ANY = 0 + BPF_CSUM_LEVEL_DEC = 2 + BPF_CSUM_LEVEL_INC = 1 + BPF_CSUM_LEVEL_QUERY = 0 + BPF_CSUM_LEVEL_RESET = 3 + BPF_EXIST = 2 + BPF_FIB_LKUP_RET_BLACKHOLE = 1 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 8 + BPF_FIB_LKUP_RET_FWD_DISABLED = 5 + BPF_FIB_LKUP_RET_NOT_FWDED = 4 + BPF_FIB_LKUP_RET_NO_NEIGH = 7 + BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9 + BPF_FIB_LKUP_RET_PROHIBIT = 3 + BPF_FIB_LKUP_RET_SUCCESS = 0 + BPF_FIB_LKUP_RET_UNREACHABLE = 2 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 6 + BPF_FIB_LOOKUP_DIRECT = 1 + BPF_FIB_LOOKUP_OUTPUT = 2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 4 + BPF_FIB_LOOKUP_SRC = 16 + BPF_FIB_LOOKUP_TBID = 8 + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1 + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4 + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 128 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 256 + BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 64 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 16 + BPF_F_ADJ_ROOM_FIXED_GSO = 1 + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 32 + BPF_F_BPRM_SECUREEXEC = 1 + BPF_F_BROADCAST = 8 + BPF_F_CLONE = 512 + BPF_F_CTXLEN_MASK = 4503595332403200 + BPF_F_CURRENT_CPU = 4294967295 + BPF_F_CURRENT_NETNS = 18446744073709551615 + BPF_F_DONT_FRAGMENT = 4 + BPF_F_EXCLUDE_INGRESS = 16 + BPF_F_FAST_STACK_CMP = 512 + BPF_F_GET_BRANCH_RECORDS_SIZE = 1 + BPF_F_HDR_FIELD_MASK = 15 + BPF_F_INDEX_MASK = 4294967295 + BPF_F_INGRESS = 1 + BPF_F_INNER_MAP = 4096 + BPF_F_INVALIDATE_HASH = 2 + BPF_F_KPROBE_MULTI_RETURN = 1 + BPF_F_LINK = 8192 + BPF_F_LOCK = 4 + BPF_F_MARK_ENFORCE = 64 + BPF_F_MARK_MANGLED_0 = 32 + BPF_F_MMAPABLE = 1024 + BPF_F_NEIGH = 2 + BPF_F_NEXTHOP = 8 + BPF_F_NO_COMMON_LRU = 2 + BPF_F_NO_PREALLOC = 1 + BPF_F_NO_TUNNEL_KEY = 16 + BPF_F_NUMA_NODE = 4 + BPF_F_PATH_FD = 16384 + BPF_F_PEER = 4 + BPF_F_PRESERVE_ELEMS = 2048 + BPF_F_PSEUDO_HDR = 16 + BPF_F_RDONLY = 8 + BPF_F_RDONLY_PROG = 128 + BPF_F_RECOMPUTE_CSUM = 1 + BPF_F_REUSE_STACKID = 1024 + BPF_F_SEQ_NUMBER = 8 + BPF_F_SKIP_FIELD_MASK = 255 + BPF_F_STACK_BUILD_ID = 32 + BPF_F_SYSCTL_BASE_NAME = 1 + BPF_F_TIMER_ABS = 1 + BPF_F_TIMER_CPU_PIN = 2 + BPF_F_TUNINFO_FLAGS = 16 + BPF_F_TUNINFO_IPV6 = 1 + BPF_F_UPROBE_MULTI_RETURN = 1 + BPF_F_USER_BUILD_ID = 2048 + BPF_F_USER_STACK = 256 + BPF_F_WRONLY = 16 + BPF_F_WRONLY_PROG = 256 + BPF_F_ZERO_CSUM_TX = 2 + BPF_F_ZERO_SEED = 64 + BPF_LOAD_HDR_OPT_TCP_SYN = 1 + BPF_LOCAL_STORAGE_GET_F_CREATE = 1 + BPF_MAX_LOOPS = 8388608 + BPF_MAX_TRAMP_LINKS = 38 + BPF_NOEXIST = 1 + BPF_RB_AVAIL_DATA = 0 + BPF_RB_CONS_POS = 2 + BPF_RB_FORCE_WAKEUP = 2 + BPF_RB_NO_WAKEUP = 1 + BPF_RB_PROD_POS = 3 + BPF_RB_RING_SIZE = 1 + BPF_REG_0 = 0 + BPF_REG_1 = 1 + BPF_REG_10 = 10 + BPF_REG_2 = 2 + BPF_REG_3 = 3 + BPF_REG_4 = 4 + BPF_REG_5 = 5 + BPF_REG_6 = 6 + BPF_REG_7 = 7 + BPF_REG_8 = 8 + BPF_REG_9 = 9 + BPF_RINGBUF_BUSY_BIT = 2147483648 + BPF_RINGBUF_DISCARD_BIT = 1073741824 + BPF_RINGBUF_HDR_SZ = 8 + BPF_SKB_TSTAMP_DELIVERY_MONO = 1 + BPF_SKB_TSTAMP_UNSPEC = 0 + BPF_SK_LOOKUP_F_NO_REUSEPORT = 2 + BPF_SK_LOOKUP_F_REPLACE = 1 + BPF_SK_STORAGE_GET_F_CREATE = 1 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4 + BPF_SOCK_OPS_ALL_CB_FLAGS = 127 + BPF_SOCK_OPS_BASE_RTT = 7 + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14 + BPF_SOCK_OPS_NEEDS_ECN = 6 + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16 + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13 + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5 + BPF_SOCK_OPS_RETRANS_CB = 9 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 2 + BPF_SOCK_OPS_RTO_CB = 8 + BPF_SOCK_OPS_RTO_CB_FLAG = 1 + BPF_SOCK_OPS_RTT_CB = 12 + BPF_SOCK_OPS_RTT_CB_FLAG = 8 + BPF_SOCK_OPS_RWND_INIT = 2 + BPF_SOCK_OPS_STATE_CB = 10 + BPF_SOCK_OPS_STATE_CB_FLAG = 4 + BPF_SOCK_OPS_TCP_CONNECT_CB = 3 + BPF_SOCK_OPS_TCP_LISTEN_CB = 11 + BPF_SOCK_OPS_TIMEOUT_INIT = 1 + BPF_SOCK_OPS_VOID = 0 + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15 + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64 + BPF_STRUCT_OPS_TYPE_bpf_dummy_ops = 0 + BPF_STRUCT_OPS_TYPE_tcp_congestion_ops = 1 + BPF_TASK_ITER_ALL_PROCS = 0 + BPF_TASK_ITER_ALL_THREADS = 1 + BPF_TASK_ITER_PROC_THREADS = 2 + BPF_TCP_BOUND_INACTIVE = 13 + BPF_TCP_CLOSE = 7 + BPF_TCP_CLOSE_WAIT = 8 + BPF_TCP_CLOSING = 11 + BPF_TCP_ESTABLISHED = 1 + BPF_TCP_FIN_WAIT1 = 4 + BPF_TCP_FIN_WAIT2 = 5 + BPF_TCP_LAST_ACK = 9 + BPF_TCP_LISTEN = 10 + BPF_TCP_MAX_STATES = 14 + BPF_TCP_NEW_SYN_RECV = 12 + BPF_TCP_SYN_RECV = 3 + BPF_TCP_SYN_SENT = 2 + BPF_TCP_TIME_WAIT = 6 + BPF_WRITE_HDR_TCP_CURRENT_MSS = 1 + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2 + BPF_XFRM_STATE_OPTS_SZ = 36 +) + +type AdjRoomMode uint32 + +const ( + BPF_ADJ_ROOM_NET AdjRoomMode = 0 + BPF_ADJ_ROOM_MAC AdjRoomMode = 1 +) + +type AttachType uint32 + +const ( + BPF_CGROUP_INET_INGRESS AttachType = 0 + BPF_CGROUP_INET_EGRESS AttachType = 1 + BPF_CGROUP_INET_SOCK_CREATE AttachType = 2 + BPF_CGROUP_SOCK_OPS AttachType = 3 + BPF_SK_SKB_STREAM_PARSER AttachType = 4 + BPF_SK_SKB_STREAM_VERDICT AttachType = 5 + BPF_CGROUP_DEVICE AttachType = 6 + BPF_SK_MSG_VERDICT AttachType = 7 + BPF_CGROUP_INET4_BIND AttachType = 8 + BPF_CGROUP_INET6_BIND AttachType = 9 + BPF_CGROUP_INET4_CONNECT AttachType = 10 + BPF_CGROUP_INET6_CONNECT AttachType = 11 + BPF_CGROUP_INET4_POST_BIND AttachType = 12 + BPF_CGROUP_INET6_POST_BIND AttachType = 13 + BPF_CGROUP_UDP4_SENDMSG AttachType = 14 + BPF_CGROUP_UDP6_SENDMSG AttachType = 15 + BPF_LIRC_MODE2 AttachType = 16 + BPF_FLOW_DISSECTOR AttachType = 17 + BPF_CGROUP_SYSCTL AttachType = 18 + BPF_CGROUP_UDP4_RECVMSG AttachType = 19 + BPF_CGROUP_UDP6_RECVMSG AttachType = 20 + BPF_CGROUP_GETSOCKOPT AttachType = 21 + BPF_CGROUP_SETSOCKOPT AttachType = 22 + BPF_TRACE_RAW_TP AttachType = 23 + BPF_TRACE_FENTRY AttachType = 24 + BPF_TRACE_FEXIT AttachType = 25 + BPF_MODIFY_RETURN AttachType = 26 + BPF_LSM_MAC AttachType = 27 + BPF_TRACE_ITER AttachType = 28 + BPF_CGROUP_INET4_GETPEERNAME AttachType = 29 + BPF_CGROUP_INET6_GETPEERNAME AttachType = 30 + BPF_CGROUP_INET4_GETSOCKNAME AttachType = 31 + BPF_CGROUP_INET6_GETSOCKNAME AttachType = 32 + BPF_XDP_DEVMAP AttachType = 33 + BPF_CGROUP_INET_SOCK_RELEASE AttachType = 34 + BPF_XDP_CPUMAP AttachType = 35 + BPF_SK_LOOKUP AttachType = 36 + BPF_XDP AttachType = 37 + BPF_SK_SKB_VERDICT AttachType = 38 + BPF_SK_REUSEPORT_SELECT AttachType = 39 + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE AttachType = 40 + BPF_PERF_EVENT AttachType = 41 + BPF_TRACE_KPROBE_MULTI AttachType = 42 + BPF_LSM_CGROUP AttachType = 43 + BPF_STRUCT_OPS AttachType = 44 + BPF_NETFILTER AttachType = 45 + BPF_TCX_INGRESS AttachType = 46 + BPF_TCX_EGRESS AttachType = 47 + BPF_TRACE_UPROBE_MULTI AttachType = 48 + BPF_CGROUP_UNIX_CONNECT AttachType = 49 + BPF_CGROUP_UNIX_SENDMSG AttachType = 50 + BPF_CGROUP_UNIX_RECVMSG AttachType = 51 + BPF_CGROUP_UNIX_GETPEERNAME AttachType = 52 + BPF_CGROUP_UNIX_GETSOCKNAME AttachType = 53 + BPF_NETKIT_PRIMARY AttachType = 54 + BPF_NETKIT_PEER AttachType = 55 + __MAX_BPF_ATTACH_TYPE AttachType = 56 +) + +type Cmd uint32 + +const ( + BPF_MAP_CREATE Cmd = 0 + BPF_MAP_LOOKUP_ELEM Cmd = 1 + BPF_MAP_UPDATE_ELEM Cmd = 2 + BPF_MAP_DELETE_ELEM Cmd = 3 + BPF_MAP_GET_NEXT_KEY Cmd = 4 + BPF_PROG_LOAD Cmd = 5 + BPF_OBJ_PIN Cmd = 6 + BPF_OBJ_GET Cmd = 7 + BPF_PROG_ATTACH Cmd = 8 + BPF_PROG_DETACH Cmd = 9 + BPF_PROG_TEST_RUN Cmd = 10 + BPF_PROG_RUN Cmd = 10 + BPF_PROG_GET_NEXT_ID Cmd = 11 + BPF_MAP_GET_NEXT_ID Cmd = 12 + BPF_PROG_GET_FD_BY_ID Cmd = 13 + BPF_MAP_GET_FD_BY_ID Cmd = 14 + BPF_OBJ_GET_INFO_BY_FD Cmd = 15 + BPF_PROG_QUERY Cmd = 16 + BPF_RAW_TRACEPOINT_OPEN Cmd = 17 + BPF_BTF_LOAD Cmd = 18 + BPF_BTF_GET_FD_BY_ID Cmd = 19 + BPF_TASK_FD_QUERY Cmd = 20 + BPF_MAP_LOOKUP_AND_DELETE_ELEM Cmd = 21 + BPF_MAP_FREEZE Cmd = 22 + BPF_BTF_GET_NEXT_ID Cmd = 23 + BPF_MAP_LOOKUP_BATCH Cmd = 24 + BPF_MAP_LOOKUP_AND_DELETE_BATCH Cmd = 25 + BPF_MAP_UPDATE_BATCH Cmd = 26 + BPF_MAP_DELETE_BATCH Cmd = 27 + BPF_LINK_CREATE Cmd = 28 + BPF_LINK_UPDATE Cmd = 29 + BPF_LINK_GET_FD_BY_ID Cmd = 30 + BPF_LINK_GET_NEXT_ID Cmd = 31 + BPF_ENABLE_STATS Cmd = 32 + BPF_ITER_CREATE Cmd = 33 + BPF_LINK_DETACH Cmd = 34 + BPF_PROG_BIND_MAP Cmd = 35 +) + +type FunctionId uint32 + +const ( + BPF_FUNC_unspec FunctionId = 0 + BPF_FUNC_map_lookup_elem FunctionId = 1 + BPF_FUNC_map_update_elem FunctionId = 2 + BPF_FUNC_map_delete_elem FunctionId = 3 + BPF_FUNC_probe_read FunctionId = 4 + BPF_FUNC_ktime_get_ns FunctionId = 5 + BPF_FUNC_trace_printk FunctionId = 6 + BPF_FUNC_get_prandom_u32 FunctionId = 7 + BPF_FUNC_get_smp_processor_id FunctionId = 8 + BPF_FUNC_skb_store_bytes FunctionId = 9 + BPF_FUNC_l3_csum_replace FunctionId = 10 + BPF_FUNC_l4_csum_replace FunctionId = 11 + BPF_FUNC_tail_call FunctionId = 12 + BPF_FUNC_clone_redirect FunctionId = 13 + BPF_FUNC_get_current_pid_tgid FunctionId = 14 + BPF_FUNC_get_current_uid_gid FunctionId = 15 + BPF_FUNC_get_current_comm FunctionId = 16 + BPF_FUNC_get_cgroup_classid FunctionId = 17 + BPF_FUNC_skb_vlan_push FunctionId = 18 + BPF_FUNC_skb_vlan_pop FunctionId = 19 + BPF_FUNC_skb_get_tunnel_key FunctionId = 20 + BPF_FUNC_skb_set_tunnel_key FunctionId = 21 + BPF_FUNC_perf_event_read FunctionId = 22 + BPF_FUNC_redirect FunctionId = 23 + BPF_FUNC_get_route_realm FunctionId = 24 + BPF_FUNC_perf_event_output FunctionId = 25 + BPF_FUNC_skb_load_bytes FunctionId = 26 + BPF_FUNC_get_stackid FunctionId = 27 + BPF_FUNC_csum_diff FunctionId = 28 + BPF_FUNC_skb_get_tunnel_opt FunctionId = 29 + BPF_FUNC_skb_set_tunnel_opt FunctionId = 30 + BPF_FUNC_skb_change_proto FunctionId = 31 + BPF_FUNC_skb_change_type FunctionId = 32 + BPF_FUNC_skb_under_cgroup FunctionId = 33 + BPF_FUNC_get_hash_recalc FunctionId = 34 + BPF_FUNC_get_current_task FunctionId = 35 + BPF_FUNC_probe_write_user FunctionId = 36 + BPF_FUNC_current_task_under_cgroup FunctionId = 37 + BPF_FUNC_skb_change_tail FunctionId = 38 + BPF_FUNC_skb_pull_data FunctionId = 39 + BPF_FUNC_csum_update FunctionId = 40 + BPF_FUNC_set_hash_invalid FunctionId = 41 + BPF_FUNC_get_numa_node_id FunctionId = 42 + BPF_FUNC_skb_change_head FunctionId = 43 + BPF_FUNC_xdp_adjust_head FunctionId = 44 + BPF_FUNC_probe_read_str FunctionId = 45 + BPF_FUNC_get_socket_cookie FunctionId = 46 + BPF_FUNC_get_socket_uid FunctionId = 47 + BPF_FUNC_set_hash FunctionId = 48 + BPF_FUNC_setsockopt FunctionId = 49 + BPF_FUNC_skb_adjust_room FunctionId = 50 + BPF_FUNC_redirect_map FunctionId = 51 + BPF_FUNC_sk_redirect_map FunctionId = 52 + BPF_FUNC_sock_map_update FunctionId = 53 + BPF_FUNC_xdp_adjust_meta FunctionId = 54 + BPF_FUNC_perf_event_read_value FunctionId = 55 + BPF_FUNC_perf_prog_read_value FunctionId = 56 + BPF_FUNC_getsockopt FunctionId = 57 + BPF_FUNC_override_return FunctionId = 58 + BPF_FUNC_sock_ops_cb_flags_set FunctionId = 59 + BPF_FUNC_msg_redirect_map FunctionId = 60 + BPF_FUNC_msg_apply_bytes FunctionId = 61 + BPF_FUNC_msg_cork_bytes FunctionId = 62 + BPF_FUNC_msg_pull_data FunctionId = 63 + BPF_FUNC_bind FunctionId = 64 + BPF_FUNC_xdp_adjust_tail FunctionId = 65 + BPF_FUNC_skb_get_xfrm_state FunctionId = 66 + BPF_FUNC_get_stack FunctionId = 67 + BPF_FUNC_skb_load_bytes_relative FunctionId = 68 + BPF_FUNC_fib_lookup FunctionId = 69 + BPF_FUNC_sock_hash_update FunctionId = 70 + BPF_FUNC_msg_redirect_hash FunctionId = 71 + BPF_FUNC_sk_redirect_hash FunctionId = 72 + BPF_FUNC_lwt_push_encap FunctionId = 73 + BPF_FUNC_lwt_seg6_store_bytes FunctionId = 74 + BPF_FUNC_lwt_seg6_adjust_srh FunctionId = 75 + BPF_FUNC_lwt_seg6_action FunctionId = 76 + BPF_FUNC_rc_repeat FunctionId = 77 + BPF_FUNC_rc_keydown FunctionId = 78 + BPF_FUNC_skb_cgroup_id FunctionId = 79 + BPF_FUNC_get_current_cgroup_id FunctionId = 80 + BPF_FUNC_get_local_storage FunctionId = 81 + BPF_FUNC_sk_select_reuseport FunctionId = 82 + BPF_FUNC_skb_ancestor_cgroup_id FunctionId = 83 + BPF_FUNC_sk_lookup_tcp FunctionId = 84 + BPF_FUNC_sk_lookup_udp FunctionId = 85 + BPF_FUNC_sk_release FunctionId = 86 + BPF_FUNC_map_push_elem FunctionId = 87 + BPF_FUNC_map_pop_elem FunctionId = 88 + BPF_FUNC_map_peek_elem FunctionId = 89 + BPF_FUNC_msg_push_data FunctionId = 90 + BPF_FUNC_msg_pop_data FunctionId = 91 + BPF_FUNC_rc_pointer_rel FunctionId = 92 + BPF_FUNC_spin_lock FunctionId = 93 + BPF_FUNC_spin_unlock FunctionId = 94 + BPF_FUNC_sk_fullsock FunctionId = 95 + BPF_FUNC_tcp_sock FunctionId = 96 + BPF_FUNC_skb_ecn_set_ce FunctionId = 97 + BPF_FUNC_get_listener_sock FunctionId = 98 + BPF_FUNC_skc_lookup_tcp FunctionId = 99 + BPF_FUNC_tcp_check_syncookie FunctionId = 100 + BPF_FUNC_sysctl_get_name FunctionId = 101 + BPF_FUNC_sysctl_get_current_value FunctionId = 102 + BPF_FUNC_sysctl_get_new_value FunctionId = 103 + BPF_FUNC_sysctl_set_new_value FunctionId = 104 + BPF_FUNC_strtol FunctionId = 105 + BPF_FUNC_strtoul FunctionId = 106 + BPF_FUNC_sk_storage_get FunctionId = 107 + BPF_FUNC_sk_storage_delete FunctionId = 108 + BPF_FUNC_send_signal FunctionId = 109 + BPF_FUNC_tcp_gen_syncookie FunctionId = 110 + BPF_FUNC_skb_output FunctionId = 111 + BPF_FUNC_probe_read_user FunctionId = 112 + BPF_FUNC_probe_read_kernel FunctionId = 113 + BPF_FUNC_probe_read_user_str FunctionId = 114 + BPF_FUNC_probe_read_kernel_str FunctionId = 115 + BPF_FUNC_tcp_send_ack FunctionId = 116 + BPF_FUNC_send_signal_thread FunctionId = 117 + BPF_FUNC_jiffies64 FunctionId = 118 + BPF_FUNC_read_branch_records FunctionId = 119 + BPF_FUNC_get_ns_current_pid_tgid FunctionId = 120 + BPF_FUNC_xdp_output FunctionId = 121 + BPF_FUNC_get_netns_cookie FunctionId = 122 + BPF_FUNC_get_current_ancestor_cgroup_id FunctionId = 123 + BPF_FUNC_sk_assign FunctionId = 124 + BPF_FUNC_ktime_get_boot_ns FunctionId = 125 + BPF_FUNC_seq_printf FunctionId = 126 + BPF_FUNC_seq_write FunctionId = 127 + BPF_FUNC_sk_cgroup_id FunctionId = 128 + BPF_FUNC_sk_ancestor_cgroup_id FunctionId = 129 + BPF_FUNC_ringbuf_output FunctionId = 130 + BPF_FUNC_ringbuf_reserve FunctionId = 131 + BPF_FUNC_ringbuf_submit FunctionId = 132 + BPF_FUNC_ringbuf_discard FunctionId = 133 + BPF_FUNC_ringbuf_query FunctionId = 134 + BPF_FUNC_csum_level FunctionId = 135 + BPF_FUNC_skc_to_tcp6_sock FunctionId = 136 + BPF_FUNC_skc_to_tcp_sock FunctionId = 137 + BPF_FUNC_skc_to_tcp_timewait_sock FunctionId = 138 + BPF_FUNC_skc_to_tcp_request_sock FunctionId = 139 + BPF_FUNC_skc_to_udp6_sock FunctionId = 140 + BPF_FUNC_get_task_stack FunctionId = 141 + BPF_FUNC_load_hdr_opt FunctionId = 142 + BPF_FUNC_store_hdr_opt FunctionId = 143 + BPF_FUNC_reserve_hdr_opt FunctionId = 144 + BPF_FUNC_inode_storage_get FunctionId = 145 + BPF_FUNC_inode_storage_delete FunctionId = 146 + BPF_FUNC_d_path FunctionId = 147 + BPF_FUNC_copy_from_user FunctionId = 148 + BPF_FUNC_snprintf_btf FunctionId = 149 + BPF_FUNC_seq_printf_btf FunctionId = 150 + BPF_FUNC_skb_cgroup_classid FunctionId = 151 + BPF_FUNC_redirect_neigh FunctionId = 152 + BPF_FUNC_per_cpu_ptr FunctionId = 153 + BPF_FUNC_this_cpu_ptr FunctionId = 154 + BPF_FUNC_redirect_peer FunctionId = 155 + BPF_FUNC_task_storage_get FunctionId = 156 + BPF_FUNC_task_storage_delete FunctionId = 157 + BPF_FUNC_get_current_task_btf FunctionId = 158 + BPF_FUNC_bprm_opts_set FunctionId = 159 + BPF_FUNC_ktime_get_coarse_ns FunctionId = 160 + BPF_FUNC_ima_inode_hash FunctionId = 161 + BPF_FUNC_sock_from_file FunctionId = 162 + BPF_FUNC_check_mtu FunctionId = 163 + BPF_FUNC_for_each_map_elem FunctionId = 164 + BPF_FUNC_snprintf FunctionId = 165 + BPF_FUNC_sys_bpf FunctionId = 166 + BPF_FUNC_btf_find_by_name_kind FunctionId = 167 + BPF_FUNC_sys_close FunctionId = 168 + BPF_FUNC_timer_init FunctionId = 169 + BPF_FUNC_timer_set_callback FunctionId = 170 + BPF_FUNC_timer_start FunctionId = 171 + BPF_FUNC_timer_cancel FunctionId = 172 + BPF_FUNC_get_func_ip FunctionId = 173 + BPF_FUNC_get_attach_cookie FunctionId = 174 + BPF_FUNC_task_pt_regs FunctionId = 175 + BPF_FUNC_get_branch_snapshot FunctionId = 176 + BPF_FUNC_trace_vprintk FunctionId = 177 + BPF_FUNC_skc_to_unix_sock FunctionId = 178 + BPF_FUNC_kallsyms_lookup_name FunctionId = 179 + BPF_FUNC_find_vma FunctionId = 180 + BPF_FUNC_loop FunctionId = 181 + BPF_FUNC_strncmp FunctionId = 182 + BPF_FUNC_get_func_arg FunctionId = 183 + BPF_FUNC_get_func_ret FunctionId = 184 + BPF_FUNC_get_func_arg_cnt FunctionId = 185 + BPF_FUNC_get_retval FunctionId = 186 + BPF_FUNC_set_retval FunctionId = 187 + BPF_FUNC_xdp_get_buff_len FunctionId = 188 + BPF_FUNC_xdp_load_bytes FunctionId = 189 + BPF_FUNC_xdp_store_bytes FunctionId = 190 + BPF_FUNC_copy_from_user_task FunctionId = 191 + BPF_FUNC_skb_set_tstamp FunctionId = 192 + BPF_FUNC_ima_file_hash FunctionId = 193 + BPF_FUNC_kptr_xchg FunctionId = 194 + BPF_FUNC_map_lookup_percpu_elem FunctionId = 195 + BPF_FUNC_skc_to_mptcp_sock FunctionId = 196 + BPF_FUNC_dynptr_from_mem FunctionId = 197 + BPF_FUNC_ringbuf_reserve_dynptr FunctionId = 198 + BPF_FUNC_ringbuf_submit_dynptr FunctionId = 199 + BPF_FUNC_ringbuf_discard_dynptr FunctionId = 200 + BPF_FUNC_dynptr_read FunctionId = 201 + BPF_FUNC_dynptr_write FunctionId = 202 + BPF_FUNC_dynptr_data FunctionId = 203 + BPF_FUNC_tcp_raw_gen_syncookie_ipv4 FunctionId = 204 + BPF_FUNC_tcp_raw_gen_syncookie_ipv6 FunctionId = 205 + BPF_FUNC_tcp_raw_check_syncookie_ipv4 FunctionId = 206 + BPF_FUNC_tcp_raw_check_syncookie_ipv6 FunctionId = 207 + BPF_FUNC_ktime_get_tai_ns FunctionId = 208 + BPF_FUNC_user_ringbuf_drain FunctionId = 209 + BPF_FUNC_cgrp_storage_get FunctionId = 210 + BPF_FUNC_cgrp_storage_delete FunctionId = 211 + __BPF_FUNC_MAX_ID FunctionId = 212 +) + +type HdrStartOff uint32 + +const ( + BPF_HDR_START_MAC HdrStartOff = 0 + BPF_HDR_START_NET HdrStartOff = 1 +) + +type LinkType uint32 + +const ( + BPF_LINK_TYPE_UNSPEC LinkType = 0 + BPF_LINK_TYPE_RAW_TRACEPOINT LinkType = 1 + BPF_LINK_TYPE_TRACING LinkType = 2 + BPF_LINK_TYPE_CGROUP LinkType = 3 + BPF_LINK_TYPE_ITER LinkType = 4 + BPF_LINK_TYPE_NETNS LinkType = 5 + BPF_LINK_TYPE_XDP LinkType = 6 + BPF_LINK_TYPE_PERF_EVENT LinkType = 7 + BPF_LINK_TYPE_KPROBE_MULTI LinkType = 8 + BPF_LINK_TYPE_STRUCT_OPS LinkType = 9 + BPF_LINK_TYPE_NETFILTER LinkType = 10 + BPF_LINK_TYPE_TCX LinkType = 11 + BPF_LINK_TYPE_UPROBE_MULTI LinkType = 12 + BPF_LINK_TYPE_NETKIT LinkType = 13 + __MAX_BPF_LINK_TYPE LinkType = 14 +) + +type MapType uint32 + +const ( + BPF_MAP_TYPE_UNSPEC MapType = 0 + BPF_MAP_TYPE_HASH MapType = 1 + BPF_MAP_TYPE_ARRAY MapType = 2 + BPF_MAP_TYPE_PROG_ARRAY MapType = 3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY MapType = 4 + BPF_MAP_TYPE_PERCPU_HASH MapType = 5 + BPF_MAP_TYPE_PERCPU_ARRAY MapType = 6 + BPF_MAP_TYPE_STACK_TRACE MapType = 7 + BPF_MAP_TYPE_CGROUP_ARRAY MapType = 8 + BPF_MAP_TYPE_LRU_HASH MapType = 9 + BPF_MAP_TYPE_LRU_PERCPU_HASH MapType = 10 + BPF_MAP_TYPE_LPM_TRIE MapType = 11 + BPF_MAP_TYPE_ARRAY_OF_MAPS MapType = 12 + BPF_MAP_TYPE_HASH_OF_MAPS MapType = 13 + BPF_MAP_TYPE_DEVMAP MapType = 14 + BPF_MAP_TYPE_SOCKMAP MapType = 15 + BPF_MAP_TYPE_CPUMAP MapType = 16 + BPF_MAP_TYPE_XSKMAP MapType = 17 + BPF_MAP_TYPE_SOCKHASH MapType = 18 + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED MapType = 19 + BPF_MAP_TYPE_CGROUP_STORAGE MapType = 19 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY MapType = 20 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED MapType = 21 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE MapType = 21 + BPF_MAP_TYPE_QUEUE MapType = 22 + BPF_MAP_TYPE_STACK MapType = 23 + BPF_MAP_TYPE_SK_STORAGE MapType = 24 + BPF_MAP_TYPE_DEVMAP_HASH MapType = 25 + BPF_MAP_TYPE_STRUCT_OPS MapType = 26 + BPF_MAP_TYPE_RINGBUF MapType = 27 + BPF_MAP_TYPE_INODE_STORAGE MapType = 28 + BPF_MAP_TYPE_TASK_STORAGE MapType = 29 + BPF_MAP_TYPE_BLOOM_FILTER MapType = 30 + BPF_MAP_TYPE_USER_RINGBUF MapType = 31 + BPF_MAP_TYPE_CGRP_STORAGE MapType = 32 +) + +type ObjType uint32 + +const ( + BPF_TYPE_UNSPEC ObjType = 0 + BPF_TYPE_PROG ObjType = 1 + BPF_TYPE_MAP ObjType = 2 + BPF_TYPE_LINK ObjType = 3 +) + +type PerfEventType uint32 + +const ( + BPF_PERF_EVENT_UNSPEC PerfEventType = 0 + BPF_PERF_EVENT_UPROBE PerfEventType = 1 + BPF_PERF_EVENT_URETPROBE PerfEventType = 2 + BPF_PERF_EVENT_KPROBE PerfEventType = 3 + BPF_PERF_EVENT_KRETPROBE PerfEventType = 4 + BPF_PERF_EVENT_TRACEPOINT PerfEventType = 5 + BPF_PERF_EVENT_EVENT PerfEventType = 6 +) + +type ProgType uint32 + +const ( + BPF_PROG_TYPE_UNSPEC ProgType = 0 + BPF_PROG_TYPE_SOCKET_FILTER ProgType = 1 + BPF_PROG_TYPE_KPROBE ProgType = 2 + BPF_PROG_TYPE_SCHED_CLS ProgType = 3 + BPF_PROG_TYPE_SCHED_ACT ProgType = 4 + BPF_PROG_TYPE_TRACEPOINT ProgType = 5 + BPF_PROG_TYPE_XDP ProgType = 6 + BPF_PROG_TYPE_PERF_EVENT ProgType = 7 + BPF_PROG_TYPE_CGROUP_SKB ProgType = 8 + BPF_PROG_TYPE_CGROUP_SOCK ProgType = 9 + BPF_PROG_TYPE_LWT_IN ProgType = 10 + BPF_PROG_TYPE_LWT_OUT ProgType = 11 + BPF_PROG_TYPE_LWT_XMIT ProgType = 12 + BPF_PROG_TYPE_SOCK_OPS ProgType = 13 + BPF_PROG_TYPE_SK_SKB ProgType = 14 + BPF_PROG_TYPE_CGROUP_DEVICE ProgType = 15 + BPF_PROG_TYPE_SK_MSG ProgType = 16 + BPF_PROG_TYPE_RAW_TRACEPOINT ProgType = 17 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR ProgType = 18 + BPF_PROG_TYPE_LWT_SEG6LOCAL ProgType = 19 + BPF_PROG_TYPE_LIRC_MODE2 ProgType = 20 + BPF_PROG_TYPE_SK_REUSEPORT ProgType = 21 + BPF_PROG_TYPE_FLOW_DISSECTOR ProgType = 22 + BPF_PROG_TYPE_CGROUP_SYSCTL ProgType = 23 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE ProgType = 24 + BPF_PROG_TYPE_CGROUP_SOCKOPT ProgType = 25 + BPF_PROG_TYPE_TRACING ProgType = 26 + BPF_PROG_TYPE_STRUCT_OPS ProgType = 27 + BPF_PROG_TYPE_EXT ProgType = 28 + BPF_PROG_TYPE_LSM ProgType = 29 + BPF_PROG_TYPE_SK_LOOKUP ProgType = 30 + BPF_PROG_TYPE_SYSCALL ProgType = 31 + BPF_PROG_TYPE_NETFILTER ProgType = 32 +) + +type RetCode uint32 + +const ( + BPF_OK RetCode = 0 + BPF_DROP RetCode = 2 + BPF_REDIRECT RetCode = 7 + BPF_LWT_REROUTE RetCode = 128 + BPF_FLOW_DISSECTOR_CONTINUE RetCode = 129 +) + +type SkAction uint32 + +const ( + SK_DROP SkAction = 0 + SK_PASS SkAction = 1 +) + +type StackBuildIdStatus uint32 + +const ( + BPF_STACK_BUILD_ID_EMPTY StackBuildIdStatus = 0 + BPF_STACK_BUILD_ID_VALID StackBuildIdStatus = 1 + BPF_STACK_BUILD_ID_IP StackBuildIdStatus = 2 +) + +type StatsType uint32 + +const ( + BPF_STATS_RUN_TIME StatsType = 0 +) + +type TcxActionBase int32 + +const ( + TCX_NEXT TcxActionBase = -1 + TCX_PASS TcxActionBase = 0 + TCX_DROP TcxActionBase = 2 + TCX_REDIRECT TcxActionBase = 7 +) + +type XdpAction uint32 + +const ( + XDP_ABORTED XdpAction = 0 + XDP_DROP XdpAction = 1 + XDP_PASS XdpAction = 2 + XDP_TX XdpAction = 3 + XDP_REDIRECT XdpAction = 4 +) + +type BtfInfo struct { + Btf Pointer + BtfSize uint32 + Id BTFID + Name Pointer + NameLen uint32 + KernelBtf uint32 +} + +type FuncInfo struct { + InsnOff uint32 + TypeId uint32 +} + +type LineInfo struct { + InsnOff uint32 + FileNameOff uint32 + LineOff uint32 + LineCol uint32 +} + +type LinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Extra [48]uint8 +} + +type MapInfo struct { + Type uint32 + Id uint32 + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags uint32 + Name ObjName + Ifindex uint32 + BtfVmlinuxValueTypeId TypeID + NetnsDev uint64 + NetnsIno uint64 + BtfId uint32 + BtfKeyTypeId TypeID + BtfValueTypeId TypeID + _ [4]byte + MapExtra uint64 +} + +type ProgInfo struct { + Type uint32 + Id uint32 + Tag [8]uint8 + JitedProgLen uint32 + XlatedProgLen uint32 + JitedProgInsns Pointer + XlatedProgInsns Pointer + LoadTime uint64 + CreatedByUid uint32 + NrMapIds uint32 + MapIds Pointer + Name ObjName + Ifindex uint32 + _ [4]byte /* unsupported bitfield */ + NetnsDev uint64 + NetnsIno uint64 + NrJitedKsyms uint32 + NrJitedFuncLens uint32 + JitedKsyms Pointer + JitedFuncLens Pointer + BtfId BTFID + FuncInfoRecSize uint32 + FuncInfo Pointer + NrFuncInfo uint32 + NrLineInfo uint32 + LineInfo Pointer + JitedLineInfo Pointer + NrJitedLineInfo uint32 + LineInfoRecSize uint32 + JitedLineInfoRecSize uint32 + NrProgTags uint32 + ProgTags uint64 + RunTimeNs uint64 + RunCnt uint64 + RecursionMisses uint64 + VerifiedInsns uint32 + AttachBtfObjId BTFID + AttachBtfId TypeID + _ [4]byte +} + +type SkLookup struct { + Cookie uint64 + Family uint32 + Protocol uint32 + RemoteIp4 [4]uint8 + RemoteIp6 [16]uint8 + RemotePort uint16 + _ [2]byte + LocalIp4 [4]uint8 + LocalIp6 [16]uint8 + LocalPort uint32 + IngressIfindex uint32 + _ [4]byte +} + +type XdpMd struct { + Data uint32 + DataEnd uint32 + DataMeta uint32 + IngressIfindex uint32 + RxQueueIndex uint32 + EgressIfindex uint32 +} + +type BtfGetFdByIdAttr struct{ Id uint32 } + +func BtfGetFdById(attr *BtfGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type BtfGetNextIdAttr struct { + Id BTFID + NextId BTFID +} + +func BtfGetNextId(attr *BtfGetNextIdAttr) error { + _, err := BPF(BPF_BTF_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type BtfLoadAttr struct { + Btf Pointer + BtfLogBuf Pointer + BtfSize uint32 + BtfLogSize uint32 + BtfLogLevel uint32 + BtfLogTrueSize uint32 +} + +func BtfLoad(attr *BtfLoadAttr) (*FD, error) { + fd, err := BPF(BPF_BTF_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type EnableStatsAttr struct{ Type uint32 } + +func EnableStats(attr *EnableStatsAttr) (*FD, error) { + fd, err := BPF(BPF_ENABLE_STATS, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type IterCreateAttr struct { + LinkFd uint32 + Flags uint32 +} + +func IterCreate(attr *IterCreateAttr) (*FD, error) { + fd, err := BPF(BPF_ITER_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId TypeID + _ [44]byte +} + +func LinkCreate(attr *LinkCreateAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateIterAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + IterInfo Pointer + IterInfoLen uint32 + _ [36]byte +} + +func LinkCreateIter(attr *LinkCreateIterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateKprobeMultiAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + KprobeMultiFlags uint32 + Count uint32 + Syms Pointer + Addrs Pointer + Cookies Pointer + _ [16]byte +} + +func LinkCreateKprobeMulti(attr *LinkCreateKprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateNetfilterAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Pf uint32 + Hooknum uint32 + Priority int32 + NetfilterFlags uint32 + _ [32]byte +} + +func LinkCreateNetfilter(attr *LinkCreateNetfilterAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateNetkitAttr struct { + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateNetkit(attr *LinkCreateNetkitAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreatePerfEventAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + BpfCookie uint64 + _ [40]byte +} + +func LinkCreatePerfEvent(attr *LinkCreatePerfEventAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateTcxAttr struct { + ProgFd uint32 + TargetIfindex uint32 + AttachType AttachType + Flags uint32 + RelativeFdOrId uint32 + _ [4]byte + ExpectedRevision uint64 + _ [32]byte +} + +func LinkCreateTcx(attr *LinkCreateTcxAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateTracingAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + TargetBtfId BTFID + _ [4]byte + Cookie uint64 + _ [32]byte +} + +func LinkCreateTracing(attr *LinkCreateTracingAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkCreateUprobeMultiAttr struct { + ProgFd uint32 + TargetFd uint32 + AttachType AttachType + Flags uint32 + Path Pointer + Offsets Pointer + RefCtrOffsets Pointer + Cookies Pointer + Count uint32 + UprobeMultiFlags uint32 + Pid uint32 + _ [4]byte +} + +func LinkCreateUprobeMulti(attr *LinkCreateUprobeMultiAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkGetFdByIdAttr struct{ Id LinkID } + +func LinkGetFdById(attr *LinkGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_LINK_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type LinkGetNextIdAttr struct { + Id LinkID + NextId LinkID +} + +func LinkGetNextId(attr *LinkGetNextIdAttr) error { + _, err := BPF(BPF_LINK_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type LinkUpdateAttr struct { + LinkFd uint32 + NewProgFd uint32 + Flags uint32 + OldProgFd uint32 +} + +func LinkUpdate(attr *LinkUpdateAttr) error { + _, err := BPF(BPF_LINK_UPDATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapCreateAttr struct { + MapType MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + MapFlags uint32 + InnerMapFd uint32 + NumaNode uint32 + MapName ObjName + MapIfindex uint32 + BtfFd uint32 + BtfKeyTypeId TypeID + BtfValueTypeId TypeID + BtfVmlinuxValueTypeId TypeID + MapExtra uint64 +} + +func MapCreate(attr *MapCreateAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_CREATE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapDeleteBatch(attr *MapDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapDeleteElem(attr *MapDeleteElemAttr) error { + _, err := BPF(BPF_MAP_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapFreezeAttr struct{ MapFd uint32 } + +func MapFreeze(attr *MapFreezeAttr) error { + _, err := BPF(BPF_MAP_FREEZE, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetFdByIdAttr struct{ Id uint32 } + +func MapGetFdById(attr *MapGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_MAP_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type MapGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func MapGetNextId(attr *MapGetNextIdAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapGetNextKeyAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + NextKey Pointer +} + +func MapGetNextKey(attr *MapGetNextKeyAttr) error { + _, err := BPF(BPF_MAP_GET_NEXT_KEY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupAndDeleteBatch(attr *MapLookupAndDeleteBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupAndDeleteElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupAndDeleteElem(attr *MapLookupAndDeleteElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_AND_DELETE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapLookupBatch(attr *MapLookupBatchAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapLookupElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapLookupElem(attr *MapLookupElemAttr) error { + _, err := BPF(BPF_MAP_LOOKUP_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateBatchAttr struct { + InBatch Pointer + OutBatch Pointer + Keys Pointer + Values Pointer + Count uint32 + MapFd uint32 + ElemFlags uint64 + Flags uint64 +} + +func MapUpdateBatch(attr *MapUpdateBatchAttr) error { + _, err := BPF(BPF_MAP_UPDATE_BATCH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type MapUpdateElemAttr struct { + MapFd uint32 + _ [4]byte + Key Pointer + Value Pointer + Flags uint64 +} + +func MapUpdateElem(attr *MapUpdateElemAttr) error { + _, err := BPF(BPF_MAP_UPDATE_ELEM, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjGetAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 + PathFd int32 + _ [4]byte +} + +func ObjGet(attr *ObjGetAttr) (*FD, error) { + fd, err := BPF(BPF_OBJ_GET, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ObjGetInfoByFdAttr struct { + BpfFd uint32 + InfoLen uint32 + Info Pointer +} + +func ObjGetInfoByFd(attr *ObjGetInfoByFdAttr) error { + _, err := BPF(BPF_OBJ_GET_INFO_BY_FD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ObjPinAttr struct { + Pathname Pointer + BpfFd uint32 + FileFlags uint32 + PathFd int32 + _ [4]byte +} + +func ObjPin(attr *ObjPinAttr) error { + _, err := BPF(BPF_OBJ_PIN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgAttachAttr struct { + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + ReplaceBpfFd uint32 + RelativeFdOrId uint32 + ExpectedRevision uint64 +} + +func ProgAttach(attr *ProgAttachAttr) error { + _, err := BPF(BPF_PROG_ATTACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgBindMapAttr struct { + ProgFd uint32 + MapFd uint32 + Flags uint32 +} + +func ProgBindMap(attr *ProgBindMapAttr) error { + _, err := BPF(BPF_PROG_BIND_MAP, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgDetachAttr struct { + TargetFdOrIfindex uint32 + AttachBpfFd uint32 + AttachType uint32 + AttachFlags uint32 + _ [4]byte + RelativeFdOrId uint32 + ExpectedRevision uint64 +} + +func ProgDetach(attr *ProgDetachAttr) error { + _, err := BPF(BPF_PROG_DETACH, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgGetFdByIdAttr struct{ Id uint32 } + +func ProgGetFdById(attr *ProgGetFdByIdAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_GET_FD_BY_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgGetNextIdAttr struct { + Id uint32 + NextId uint32 +} + +func ProgGetNextId(attr *ProgGetNextIdAttr) error { + _, err := BPF(BPF_PROG_GET_NEXT_ID, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgLoadAttr struct { + ProgType ProgType + InsnCnt uint32 + Insns Pointer + License Pointer + LogLevel LogLevel + LogSize uint32 + LogBuf Pointer + KernVersion uint32 + ProgFlags uint32 + ProgName ObjName + ProgIfindex uint32 + ExpectedAttachType AttachType + ProgBtfFd uint32 + FuncInfoRecSize uint32 + FuncInfo Pointer + FuncInfoCnt uint32 + LineInfoRecSize uint32 + LineInfo Pointer + LineInfoCnt uint32 + AttachBtfId TypeID + AttachBtfObjFd uint32 + CoreReloCnt uint32 + FdArray Pointer + CoreRelos Pointer + CoreReloRecSize uint32 + LogTrueSize uint32 +} + +func ProgLoad(attr *ProgLoadAttr) (*FD, error) { + fd, err := BPF(BPF_PROG_LOAD, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type ProgQueryAttr struct { + TargetFdOrIfindex uint32 + AttachType AttachType + QueryFlags uint32 + AttachFlags uint32 + ProgIds Pointer + Count uint32 + _ [4]byte + ProgAttachFlags Pointer + LinkIds Pointer + LinkAttachFlags Pointer + Revision uint64 +} + +func ProgQuery(attr *ProgQueryAttr) error { + _, err := BPF(BPF_PROG_QUERY, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type ProgRunAttr struct { + ProgFd uint32 + Retval uint32 + DataSizeIn uint32 + DataSizeOut uint32 + DataIn Pointer + DataOut Pointer + Repeat uint32 + Duration uint32 + CtxSizeIn uint32 + CtxSizeOut uint32 + CtxIn Pointer + CtxOut Pointer + Flags uint32 + Cpu uint32 + BatchSize uint32 + _ [4]byte +} + +func ProgRun(attr *ProgRunAttr) error { + _, err := BPF(BPF_PROG_TEST_RUN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + return err +} + +type RawTracepointOpenAttr struct { + Name Pointer + ProgFd uint32 + _ [4]byte +} + +func RawTracepointOpen(attr *RawTracepointOpenAttr) (*FD, error) { + fd, err := BPF(BPF_RAW_TRACEPOINT_OPEN, unsafe.Pointer(attr), unsafe.Sizeof(*attr)) + if err != nil { + return nil, err + } + return NewFD(int(fd)) +} + +type CgroupLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + CgroupId uint64 + AttachType AttachType + _ [36]byte +} + +type IterLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + TargetName Pointer + TargetNameLen uint32 +} + +type KprobeLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType + _ [4]byte + FuncName Pointer + NameLen uint32 + Offset uint32 + Addr uint64 + Missed uint64 + _ [8]byte +} + +type KprobeMultiLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Addrs Pointer + Count uint32 + Flags uint32 + Missed uint64 + _ [24]byte +} + +type NetNsLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + NetnsIno uint32 + AttachType AttachType + _ [40]byte +} + +type NetfilterLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 + _ [32]byte +} + +type NetkitLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + AttachType AttachType + _ [40]byte +} + +type PerfEventLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + PerfEventType PerfEventType +} + +type RawTracepointLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + TpName Pointer + TpNameLen uint32 + _ [36]byte +} + +type TcxLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + AttachType AttachType + _ [40]byte +} + +type TracingLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + AttachType AttachType + TargetObjId uint32 + TargetBtfId TypeID + _ [36]byte +} + +type XDPLinkInfo struct { + Type LinkType + Id LinkID + ProgId uint32 + _ [4]byte + Ifindex uint32 + _ [44]byte +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go new file mode 100644 index 0000000000..1b4f052ee2 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/buffer.go @@ -0,0 +1,85 @@ +package sysenc + +import ( + "unsafe" + + "github.com/cilium/ebpf/internal/sys" +) + +type Buffer struct { + ptr unsafe.Pointer + // Size of the buffer. syscallPointerOnly if created from UnsafeBuffer or when using + // zero-copy unmarshaling. + size int +} + +const syscallPointerOnly = -1 + +func newBuffer(buf []byte) Buffer { + if len(buf) == 0 { + return Buffer{} + } + return Buffer{unsafe.Pointer(&buf[0]), len(buf)} +} + +// UnsafeBuffer constructs a Buffer for zero-copy unmarshaling. +// +// [Pointer] is the only valid method to call on such a Buffer. +// Use [SyscallBuffer] instead if possible. +func UnsafeBuffer(ptr unsafe.Pointer) Buffer { + return Buffer{ptr, syscallPointerOnly} +} + +// SyscallOutput prepares a Buffer for a syscall to write into. +// +// size is the length of the desired buffer in bytes. +// The buffer may point at the underlying memory of dst, in which case [Unmarshal] +// becomes a no-op. +// +// The contents of the buffer are undefined and may be non-zero. +func SyscallOutput(dst any, size int) Buffer { + if dstBuf := unsafeBackingMemory(dst); len(dstBuf) == size { + buf := newBuffer(dstBuf) + buf.size = syscallPointerOnly + return buf + } + + return newBuffer(make([]byte, size)) +} + +// CopyTo copies the buffer into dst. +// +// Returns the number of copied bytes. +func (b Buffer) CopyTo(dst []byte) int { + return copy(dst, b.Bytes()) +} + +// AppendTo appends the buffer onto dst. +func (b Buffer) AppendTo(dst []byte) []byte { + return append(dst, b.Bytes()...) +} + +// Pointer returns the location where a syscall should write. +func (b Buffer) Pointer() sys.Pointer { + // NB: This deliberately ignores b.length to support zero-copy + // marshaling / unmarshaling using unsafe.Pointer. + return sys.NewPointer(b.ptr) +} + +// Unmarshal the buffer into the provided value. +func (b Buffer) Unmarshal(data any) error { + if b.size == syscallPointerOnly { + return nil + } + + return Unmarshal(data, b.Bytes()) +} + +// Bytes returns the buffer as a byte slice. Returns nil if the Buffer was +// created using UnsafeBuffer or by zero-copy unmarshaling. +func (b Buffer) Bytes() []byte { + if b.size == syscallPointerOnly { + return nil + } + return unsafe.Slice((*byte)(b.ptr), b.size) +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go new file mode 100644 index 0000000000..676ad98ba1 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/doc.go @@ -0,0 +1,3 @@ +// Package sysenc provides efficient conversion of Go values to system +// call interfaces. +package sysenc diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go new file mode 100644 index 0000000000..52d111e7af --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/layout.go @@ -0,0 +1,41 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at https://go.dev/LICENSE. + +package sysenc + +import ( + "reflect" + "sync" +) + +var hasUnexportedFieldsCache sync.Map // map[reflect.Type]bool + +func hasUnexportedFields(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Slice, reflect.Array, reflect.Pointer: + return hasUnexportedFields(typ.Elem()) + + case reflect.Struct: + if unexported, ok := hasUnexportedFieldsCache.Load(typ); ok { + return unexported.(bool) + } + + unexported := false + for i, n := 0, typ.NumField(); i < n; i++ { + field := typ.Field(i) + // Package binary allows _ fields but always writes zeroes into them. + if (!field.IsExported() && field.Name != "_") || hasUnexportedFields(field.Type) { + unexported = true + break + } + } + + hasUnexportedFieldsCache.Store(typ, unexported) + return unexported + + default: + // NB: It's not clear what this means for Chan and so on. + return false + } +} diff --git a/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go new file mode 100644 index 0000000000..0026af8f24 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/sysenc/marshal.go @@ -0,0 +1,177 @@ +package sysenc + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "reflect" + "slices" + "sync" + "unsafe" + + "github.com/cilium/ebpf/internal" +) + +// Marshal turns data into a byte slice using the system's native endianness. +// +// If possible, avoids allocations by directly using the backing memory +// of data. This means that the variable must not be modified for the lifetime +// of the returned [Buffer]. +// +// Returns an error if the data can't be turned into a byte slice according to +// the behaviour of [binary.Write]. +func Marshal(data any, size int) (Buffer, error) { + if data == nil { + return Buffer{}, errors.New("can't marshal a nil value") + } + + var buf []byte + var err error + switch value := data.(type) { + case encoding.BinaryMarshaler: + buf, err = value.MarshalBinary() + case string: + buf = unsafe.Slice(unsafe.StringData(value), len(value)) + case []byte: + buf = value + case int16: + buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), uint16(value)) + case uint16: + buf = internal.NativeEndian.AppendUint16(make([]byte, 0, 2), value) + case int32: + buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), uint32(value)) + case uint32: + buf = internal.NativeEndian.AppendUint32(make([]byte, 0, 4), value) + case int64: + buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), uint64(value)) + case uint64: + buf = internal.NativeEndian.AppendUint64(make([]byte, 0, 8), value) + default: + if buf := unsafeBackingMemory(data); len(buf) == size { + return newBuffer(buf), nil + } + + wr := internal.NewBuffer(make([]byte, 0, size)) + defer internal.PutBuffer(wr) + + err = binary.Write(wr, internal.NativeEndian, value) + buf = wr.Bytes() + } + if err != nil { + return Buffer{}, err + } + + if len(buf) != size { + return Buffer{}, fmt.Errorf("%T doesn't marshal to %d bytes", data, size) + } + + return newBuffer(buf), nil +} + +var bytesReaderPool = sync.Pool{ + New: func() interface{} { + return new(bytes.Reader) + }, +} + +// Unmarshal a byte slice in the system's native endianness into data. +// +// Returns an error if buf can't be unmarshalled according to the behaviour +// of [binary.Read]. +func Unmarshal(data interface{}, buf []byte) error { + switch value := data.(type) { + case encoding.BinaryUnmarshaler: + return value.UnmarshalBinary(buf) + + case *string: + *value = string(buf) + return nil + + case *[]byte: + // Backwards compat: unmarshaling into a slice replaces the whole slice. + *value = slices.Clone(buf) + return nil + + default: + if dataBuf := unsafeBackingMemory(data); len(dataBuf) == len(buf) { + copy(dataBuf, buf) + return nil + } + + rd := bytesReaderPool.Get().(*bytes.Reader) + defer bytesReaderPool.Put(rd) + + rd.Reset(buf) + + if err := binary.Read(rd, internal.NativeEndian, value); err != nil { + return err + } + + if rd.Len() != 0 { + return fmt.Errorf("unmarshaling %T doesn't consume all data", data) + } + + return nil + } +} + +// unsafeBackingMemory returns the backing memory of data if it can be used +// instead of calling into package binary. +// +// Returns nil if the value is not a pointer or a slice, or if it contains +// padding or unexported fields. +func unsafeBackingMemory(data any) []byte { + if data == nil { + return nil + } + + value := reflect.ValueOf(data) + var valueSize int + switch value.Kind() { + case reflect.Pointer: + if value.IsNil() { + return nil + } + + if elemType := value.Type().Elem(); elemType.Kind() != reflect.Slice { + valueSize = int(elemType.Size()) + break + } + + // We're dealing with a pointer to a slice. Dereference and + // handle it like a regular slice. + value = value.Elem() + fallthrough + + case reflect.Slice: + valueSize = int(value.Type().Elem().Size()) * value.Len() + + default: + // Prevent Value.UnsafePointer from panicking. + return nil + } + + // Some nil pointer types currently crash binary.Size. Call it after our own + // code so that the panic isn't reachable. + // See https://github.com/golang/go/issues/60892 + if size := binary.Size(data); size == -1 || size != valueSize { + // The type contains padding or unsupported types. + return nil + } + + if hasUnexportedFields(reflect.TypeOf(data)) { + return nil + } + + // Reinterpret the pointer as a byte slice. This violates the unsafe.Pointer + // rules because it's very unlikely that the source data has "an equivalent + // memory layout". However, we can make it safe-ish because of the + // following reasons: + // - There is no alignment mismatch since we cast to a type with an + // alignment of 1. + // - There are no pointers in the source type so we don't upset the GC. + // - The length is verified at runtime. + return unsafe.Slice((*byte)(value.UnsafePointer()), valueSize) +} diff --git a/vendor/github.com/cilium/ebpf/internal/testutils/fdtrace/fd_trace.go b/vendor/github.com/cilium/ebpf/internal/testutils/fdtrace/fd_trace.go new file mode 100644 index 0000000000..562df2cc0c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/testutils/fdtrace/fd_trace.go @@ -0,0 +1,103 @@ +package fdtrace + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sync" + "sync/atomic" +) + +// foundLeak is atomic since the GC may collect objects in parallel. +var foundLeak atomic.Bool + +func onLeakFD(fs *runtime.Frames) { + foundLeak.Store(true) + fmt.Fprintln(os.Stderr, "leaked fd created at:") + fmt.Fprintln(os.Stderr, formatFrames(fs)) +} + +// fds is a registry of all file descriptors wrapped into sys.fds that were +// created while an fd tracer was active. +var fds *sync.Map // map[int]*runtime.Frames + +// TraceFD associates raw with the current execution stack. +// +// skip controls how many entries of the stack the function should skip. +func TraceFD(raw int, skip int) { + if fds == nil { + return + } + + // Attempt to store the caller's stack for the given fd value. + // Panic if fds contains an existing stack for the fd. + old, exist := fds.LoadOrStore(raw, callersFrames(skip)) + if exist { + f := old.(*runtime.Frames) + panic(fmt.Sprintf("found existing stack for fd %d:\n%s", raw, formatFrames(f))) + } +} + +// ForgetFD removes any existing association for raw. +func ForgetFD(raw int) { + if fds != nil { + fds.Delete(raw) + } +} + +// LeakFD indicates that raw was leaked. +// +// Calling the function with a value that was not passed to [TraceFD] before +// is undefined. +func LeakFD(raw int) { + if fds == nil { + return + } + + // Invoke the fd leak callback. Calls LoadAndDelete to guarantee the callback + // is invoked at most once for one sys.FD allocation, runtime.Frames can only + // be unwound once. + f, ok := fds.LoadAndDelete(raw) + if ok { + onLeakFD(f.(*runtime.Frames)) + } +} + +// flushFrames removes all elements from fds and returns them as a slice. This +// deals with the fact that a runtime.Frames can only be unwound once using +// Next(). +func flushFrames() []*runtime.Frames { + var frames []*runtime.Frames + fds.Range(func(key, value any) bool { + frames = append(frames, value.(*runtime.Frames)) + fds.Delete(key) + return true + }) + return frames +} + +func callersFrames(skip int) *runtime.Frames { + c := make([]uintptr, 32) + + // Skip runtime.Callers and this function. + i := runtime.Callers(skip+2, c) + if i == 0 { + return nil + } + + return runtime.CallersFrames(c) +} + +// formatFrames formats a runtime.Frames as a human-readable string. +func formatFrames(fs *runtime.Frames) string { + var b bytes.Buffer + for { + f, more := fs.Next() + b.WriteString(fmt.Sprintf("\t%s+%#x\n\t\t%s:%d\n", f.Function, f.PC-f.Entry, f.File, f.Line)) + if !more { + break + } + } + return b.String() +} diff --git a/vendor/github.com/cilium/ebpf/internal/testutils/fdtrace/main.go b/vendor/github.com/cilium/ebpf/internal/testutils/fdtrace/main.go new file mode 100644 index 0000000000..c1f7b42d91 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/testutils/fdtrace/main.go @@ -0,0 +1,31 @@ +package fdtrace + +import ( + "os" + "sync" +) + +type testingM interface { + Run() int +} + +// TestMain runs m with fd tracing enabled. +// +// The function calls [os.Exit] and does not return. +func TestMain(m testingM) { + fds = new(sync.Map) + + ret := m.Run() + + if fs := flushFrames(); len(fs) != 0 { + for _, f := range fs { + onLeakFD(f) + } + } + + if foundLeak.Load() { + ret = 99 + } + + os.Exit(ret) +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go new file mode 100644 index 0000000000..062bef9ec3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/kprobe.go @@ -0,0 +1,364 @@ +package tracefs + +import ( + "crypto/rand" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "syscall" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + ErrInvalidInput = errors.New("invalid input") + + ErrInvalidMaxActive = errors.New("can only set maxactive on kretprobes") +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -type=ProbeType -linecomment + +type ProbeType uint8 + +const ( + Kprobe ProbeType = iota // kprobe + Uprobe // uprobe +) + +func (pt ProbeType) eventsFile() (*os.File, error) { + path, err := sanitizeTracefsPath(fmt.Sprintf("%s_events", pt.String())) + if err != nil { + return nil, err + } + + return os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0666) +} + +type ProbeArgs struct { + Type ProbeType + Symbol, Group, Path string + Offset, RefCtrOffset, Cookie uint64 + Pid, RetprobeMaxActive int + Ret bool +} + +// RandomGroup generates a pseudorandom string for use as a tracefs group name. +// Returns an error when the output string would exceed 63 characters (kernel +// limitation), when rand.Read() fails or when prefix contains characters not +// allowed by IsValidTraceID. +func RandomGroup(prefix string) (string, error) { + if !validIdentifier(prefix) { + return "", fmt.Errorf("prefix '%s' must be alphanumeric or underscore: %w", prefix, ErrInvalidInput) + } + + b := make([]byte, 8) + if _, err := rand.Read(b); err != nil { + return "", fmt.Errorf("reading random bytes: %w", err) + } + + group := fmt.Sprintf("%s_%x", prefix, b) + if len(group) > 63 { + return "", fmt.Errorf("group name '%s' cannot be longer than 63 characters: %w", group, ErrInvalidInput) + } + + return group, nil +} + +// validIdentifier implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_]*$". +// +// Trace event groups, names and kernel symbols must adhere to this set +// of characters. Non-empty, first character must not be a number, all +// characters must be alphanumeric or underscore. +func validIdentifier(s string) bool { + if len(s) < 1 { + return false + } + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + default: + return false + } + } + + return true +} + +func sanitizeTracefsPath(path ...string) (string, error) { + base, err := getTracefsPath() + if err != nil { + return "", err + } + l := filepath.Join(path...) + p := filepath.Join(base, l) + if !strings.HasPrefix(p, base) { + return "", fmt.Errorf("path '%s' attempts to escape base path '%s': %w", l, base, ErrInvalidInput) + } + return p, nil +} + +// getTracefsPath will return a correct path to the tracefs mount point. +// Since kernel 4.1 tracefs should be mounted by default at /sys/kernel/tracing, +// but may be also be available at /sys/kernel/debug/tracing if debugfs is mounted. +// The available tracefs paths will depends on distribution choices. +var getTracefsPath = sync.OnceValues(func() (string, error) { + for _, p := range []struct { + path string + fsType int64 + }{ + {"/sys/kernel/tracing", unix.TRACEFS_MAGIC}, + {"/sys/kernel/debug/tracing", unix.TRACEFS_MAGIC}, + // RHEL/CentOS + {"/sys/kernel/debug/tracing", unix.DEBUGFS_MAGIC}, + } { + if fsType, err := linux.FSType(p.path); err == nil && fsType == p.fsType { + return p.path, nil + } + } + + return "", errors.New("neither debugfs nor tracefs are mounted") +}) + +// sanitizeIdentifier replaces every invalid character for the tracefs api with an underscore. +// +// It is equivalent to calling regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAllString("_"). +func sanitizeIdentifier(s string) string { + var skip bool + return strings.Map(func(c rune) rune { + switch { + case c >= 'a' && c <= 'z', + c >= 'A' && c <= 'Z', + c >= '0' && c <= '9': + skip = false + return c + + case skip: + return -1 + + default: + skip = true + return '_' + } + }, s) +} + +// EventID reads a trace event's ID from tracefs given its group and name. +// The kernel requires group and name to be alphanumeric or underscore. +func EventID(group, name string) (uint64, error) { + if !validIdentifier(group) { + return 0, fmt.Errorf("invalid tracefs group: %q", group) + } + + if !validIdentifier(name) { + return 0, fmt.Errorf("invalid tracefs name: %q", name) + } + + path, err := sanitizeTracefsPath("events", group, name, "id") + if err != nil { + return 0, err + } + tid, err := internal.ReadUint64FromFile("%d\n", path) + if errors.Is(err, os.ErrNotExist) { + return 0, err + } + if err != nil { + return 0, fmt.Errorf("reading trace event ID of %s/%s: %w", group, name, err) + } + + return tid, nil +} + +func probePrefix(ret bool, maxActive int) string { + if ret { + if maxActive > 0 { + return fmt.Sprintf("r%d", maxActive) + } + return "r" + } + return "p" +} + +// Event represents an entry in a tracefs probe events file. +type Event struct { + typ ProbeType + group, name string + // event id allocated by the kernel. 0 if the event has already been removed. + id uint64 +} + +// NewEvent creates a new ephemeral trace event. +// +// Returns os.ErrNotExist if symbol is not a valid +// kernel symbol, or if it is not traceable with kprobes. Returns os.ErrExist +// if a probe with the same group and symbol already exists. Returns an error if +// args.RetprobeMaxActive is used on non kprobe types. Returns ErrNotSupported if +// the kernel is too old to support kretprobe maxactive. +func NewEvent(args ProbeArgs) (*Event, error) { + // Before attempting to create a trace event through tracefs, + // check if an event with the same group and name already exists. + // Kernels 4.x and earlier don't return os.ErrExist on writing a duplicate + // entry, so we need to rely on reads for detecting uniqueness. + eventName := sanitizeIdentifier(args.Symbol) + _, err := EventID(args.Group, eventName) + if err == nil { + return nil, fmt.Errorf("trace event %s/%s: %w", args.Group, eventName, os.ErrExist) + } + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("trace event %s/%s: %w (unknown symbol?)", args.Group, eventName, err) + } + if !errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("checking trace event %s/%s: %w", args.Group, eventName, err) + } + + // Open the kprobe_events file in tracefs. + f, err := args.Type.eventsFile() + if err != nil { + return nil, err + } + defer f.Close() + + var pe, token string + switch args.Type { + case Kprobe: + // The kprobe_events syntax is as follows (see Documentation/trace/kprobetrace.txt): + // p[:[GRP/]EVENT] [MOD:]SYM[+offs]|MEMADDR [FETCHARGS] : Set a probe + // r[MAXACTIVE][:[GRP/]EVENT] [MOD:]SYM[+0] [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/r_my_kretprobe nf_conntrack_destroy + // p:ebpf_5678/p_my_kprobe __x64_sys_execve + // + // Leaving the kretprobe's MAXACTIVE set to 0 (or absent) will make the + // kernel default to NR_CPUS. This is desired in most eBPF cases since + // subsampling or rate limiting logic can be more accurately implemented in + // the eBPF program itself. + // See Documentation/kprobes.txt for more details. + if args.RetprobeMaxActive != 0 && !args.Ret { + return nil, ErrInvalidMaxActive + } + token = KprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, args.RetprobeMaxActive), args.Group, eventName, token) + case Uprobe: + // The uprobe_events syntax is as follows: + // p[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a probe + // r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] : Set a return probe + // -:[GRP/]EVENT : Clear a probe + // + // Some examples: + // r:ebpf_1234/readline /bin/bash:0x12345 + // p:ebpf_5678/main_mySymbol /bin/mybin:0x12345(0x123) + // + // See Documentation/trace/uprobetracer.txt for more details. + if args.RetprobeMaxActive != 0 { + return nil, ErrInvalidMaxActive + } + token = UprobeToken(args) + pe = fmt.Sprintf("%s:%s/%s %s", probePrefix(args.Ret, 0), args.Group, eventName, token) + } + _, err = f.WriteString(pe) + + // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL + // when trying to create a retprobe for a missing symbol. + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("token %s: not found: %w", token, err) + } + // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. The exact conditions that trigger this error are + // arch specific however. + if errors.Is(err, syscall.EILSEQ) { + return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) + } + // ERANGE is returned when the `SYM[+offs]` token is too big and cannot + // be resolved. + if errors.Is(err, syscall.ERANGE) { + return nil, fmt.Errorf("token %s: offset too big: %w", token, os.ErrNotExist) + } + + if err != nil { + return nil, fmt.Errorf("token %s: writing '%s': %w", token, pe, err) + } + + // Get the newly-created trace event's id. + tid, err := EventID(args.Group, eventName) + if args.RetprobeMaxActive != 0 && errors.Is(err, os.ErrNotExist) { + // Kernels < 4.12 don't support maxactive and therefore auto generate + // group and event names from the symbol and offset. The symbol is used + // without any sanitization. + // See https://elixir.bootlin.com/linux/v4.10/source/kernel/trace/trace_kprobe.c#L712 + event := fmt.Sprintf("kprobes/r_%s_%d", args.Symbol, args.Offset) + if err := removeEvent(args.Type, event); err != nil { + return nil, fmt.Errorf("failed to remove spurious maxactive event: %s", err) + } + return nil, fmt.Errorf("create trace event with non-default maxactive: %w", internal.ErrNotSupported) + } + if err != nil { + return nil, fmt.Errorf("get trace event id: %w", err) + } + + evt := &Event{args.Type, args.Group, eventName, tid} + runtime.SetFinalizer(evt, (*Event).Close) + return evt, nil +} + +// Close removes the event from tracefs. +// +// Returns os.ErrClosed if the event has already been closed before. +func (evt *Event) Close() error { + if evt.id == 0 { + return os.ErrClosed + } + + evt.id = 0 + runtime.SetFinalizer(evt, nil) + pe := fmt.Sprintf("%s/%s", evt.group, evt.name) + return removeEvent(evt.typ, pe) +} + +func removeEvent(typ ProbeType, pe string) error { + f, err := typ.eventsFile() + if err != nil { + return err + } + defer f.Close() + + // See [k,u]probe_events syntax above. The probe type does not need to be specified + // for removals. + if _, err = f.WriteString("-:" + pe); err != nil { + return fmt.Errorf("remove event %q from %s: %w", pe, f.Name(), err) + } + + return nil +} + +// ID returns the tracefs ID associated with the event. +func (evt *Event) ID() uint64 { + return evt.id +} + +// Group returns the tracefs group used by the event. +func (evt *Event) Group() string { + return evt.group +} + +// KprobeToken creates the SYM[+offs] token for the tracefs api. +func KprobeToken(args ProbeArgs) string { + po := args.Symbol + + if args.Offset != 0 { + po += fmt.Sprintf("+%#x", args.Offset) + } + + return po +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go new file mode 100644 index 0000000000..87cb0a059b --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/probetype_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=ProbeType -linecomment"; DO NOT EDIT. + +package tracefs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Kprobe-0] + _ = x[Uprobe-1] +} + +const _ProbeType_name = "kprobeuprobe" + +var _ProbeType_index = [...]uint8{0, 6, 12} + +func (i ProbeType) String() string { + if i >= ProbeType(len(_ProbeType_index)-1) { + return "ProbeType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProbeType_name[_ProbeType_index[i]:_ProbeType_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go new file mode 100644 index 0000000000..994f31260d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/tracefs/uprobe.go @@ -0,0 +1,16 @@ +package tracefs + +import "fmt" + +// UprobeToken creates the PATH:OFFSET(REF_CTR_OFFSET) token for the tracefs api. +func UprobeToken(args ProbeArgs) string { + po := fmt.Sprintf("%s:%#x", args.Path, args.Offset) + + if args.RefCtrOffset != 0 { + // This is not documented in Documentation/trace/uprobetracer.txt. + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/trace/trace.c#L5564 + po += fmt.Sprintf("(%#x)", args.RefCtrOffset) + } + + return po +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/doc.go b/vendor/github.com/cilium/ebpf/internal/unix/doc.go new file mode 100644 index 0000000000..d168d36f18 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/doc.go @@ -0,0 +1,11 @@ +// Package unix re-exports Linux specific parts of golang.org/x/sys/unix. +// +// It avoids breaking compilation on other OS by providing stubs as follows: +// - Invoking a function always returns an error. +// - Errnos have distinct, non-zero values. +// - Constants have distinct but meaningless values. +// - Types use the same names for members, but may or may not follow the +// Linux layout. +package unix + +// Note: please don't add any custom API to this package. Use internal/sys instead. diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go new file mode 100644 index 0000000000..144e608d1c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_linux.go @@ -0,0 +1,217 @@ +//go:build linux + +package unix + +import ( + "syscall" + + linux "golang.org/x/sys/unix" +) + +const ( + ENOENT = linux.ENOENT + EEXIST = linux.EEXIST + EAGAIN = linux.EAGAIN + ENOSPC = linux.ENOSPC + EINVAL = linux.EINVAL + EPOLLIN = linux.EPOLLIN + EINTR = linux.EINTR + EPERM = linux.EPERM + ESRCH = linux.ESRCH + ENODEV = linux.ENODEV + EBADF = linux.EBADF + E2BIG = linux.E2BIG + EFAULT = linux.EFAULT + EACCES = linux.EACCES + EILSEQ = linux.EILSEQ + EOPNOTSUPP = linux.EOPNOTSUPP + ESTALE = linux.ESTALE +) + +const ( + BPF_F_NO_PREALLOC = linux.BPF_F_NO_PREALLOC + BPF_F_NUMA_NODE = linux.BPF_F_NUMA_NODE + BPF_F_RDONLY = linux.BPF_F_RDONLY + BPF_F_WRONLY = linux.BPF_F_WRONLY + BPF_F_RDONLY_PROG = linux.BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG = linux.BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE = linux.BPF_F_SLEEPABLE + BPF_F_XDP_HAS_FRAGS = linux.BPF_F_XDP_HAS_FRAGS + BPF_F_MMAPABLE = linux.BPF_F_MMAPABLE + BPF_F_INNER_MAP = linux.BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN = linux.BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN = linux.BPF_F_UPROBE_MULTI_RETURN + BPF_F_LOCK = linux.BPF_F_LOCK + BPF_OBJ_NAME_LEN = linux.BPF_OBJ_NAME_LEN + BPF_TAG_SIZE = linux.BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT = linux.BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT = linux.BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ = linux.BPF_RINGBUF_HDR_SZ + SYS_BPF = linux.SYS_BPF + F_DUPFD_CLOEXEC = linux.F_DUPFD_CLOEXEC + EPOLL_CTL_ADD = linux.EPOLL_CTL_ADD + EPOLL_CLOEXEC = linux.EPOLL_CLOEXEC + O_CLOEXEC = linux.O_CLOEXEC + O_NONBLOCK = linux.O_NONBLOCK + PROT_NONE = linux.PROT_NONE + PROT_READ = linux.PROT_READ + PROT_WRITE = linux.PROT_WRITE + MAP_ANON = linux.MAP_ANON + MAP_SHARED = linux.MAP_SHARED + MAP_PRIVATE = linux.MAP_PRIVATE + PERF_ATTR_SIZE_VER1 = linux.PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE = linux.PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT = linux.PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT = linux.PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE = linux.PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE = linux.PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF = linux.PERF_EVENT_IOC_SET_BPF + PerfBitWatermark = linux.PerfBitWatermark + PerfBitWriteBackward = linux.PerfBitWriteBackward + PERF_SAMPLE_RAW = linux.PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC = linux.PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY = linux.RLIM_INFINITY + RLIMIT_MEMLOCK = linux.RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME = linux.BPF_STATS_RUN_TIME + PERF_RECORD_LOST = linux.PERF_RECORD_LOST + PERF_RECORD_SAMPLE = linux.PERF_RECORD_SAMPLE + AT_FDCWD = linux.AT_FDCWD + RENAME_NOREPLACE = linux.RENAME_NOREPLACE + SO_ATTACH_BPF = linux.SO_ATTACH_BPF + SO_DETACH_BPF = linux.SO_DETACH_BPF + SOL_SOCKET = linux.SOL_SOCKET + SIGPROF = linux.SIGPROF + SIGUSR1 = linux.SIGUSR1 + SIG_BLOCK = linux.SIG_BLOCK + SIG_UNBLOCK = linux.SIG_UNBLOCK + BPF_FS_MAGIC = linux.BPF_FS_MAGIC + TRACEFS_MAGIC = linux.TRACEFS_MAGIC + DEBUGFS_MAGIC = linux.DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP = linux.BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP = linux.BPF_RB_FORCE_WAKEUP + AF_UNSPEC = linux.AF_UNSPEC + IFF_UP = linux.IFF_UP +) + +type Statfs_t = linux.Statfs_t +type Stat_t = linux.Stat_t +type Rlimit = linux.Rlimit +type Signal = linux.Signal +type Sigset_t = linux.Sigset_t +type PerfEventMmapPage = linux.PerfEventMmapPage +type EpollEvent = linux.EpollEvent +type PerfEventAttr = linux.PerfEventAttr +type Utsname = linux.Utsname +type CPUSet = linux.CPUSet + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return linux.Syscall(trap, a1, a2, a3) +} + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + return linux.PthreadSigmask(how, set, oldset) +} + +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return linux.FcntlInt(fd, cmd, arg) +} + +func IoctlSetInt(fd int, req uint, value int) error { + return linux.IoctlSetInt(fd, req, value) +} + +func Statfs(path string, buf *Statfs_t) (err error) { + return linux.Statfs(path, buf) +} + +func Close(fd int) (err error) { + return linux.Close(fd) +} + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return linux.EpollWait(epfd, events, msec) +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return linux.EpollCtl(epfd, op, fd, event) +} + +func Eventfd(initval uint, flags int) (fd int, err error) { + return linux.Eventfd(initval, flags) +} + +func Write(fd int, p []byte) (n int, err error) { + return linux.Write(fd, p) +} + +func EpollCreate1(flag int) (fd int, err error) { + return linux.EpollCreate1(flag) +} + +func SetNonblock(fd int, nonblocking bool) (err error) { + return linux.SetNonblock(fd, nonblocking) +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return linux.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return linux.Munmap(b) +} + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return linux.PerfEventOpen(attr, pid, cpu, groupFd, flags) +} + +func Uname(buf *Utsname) (err error) { + return linux.Uname(buf) +} + +func Getpid() int { + return linux.Getpid() +} + +func Gettid() int { + return linux.Gettid() +} + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + return linux.Tgkill(tgid, tid, sig) +} + +func BytePtrFromString(s string) (*byte, error) { + return linux.BytePtrFromString(s) +} + +func ByteSliceToString(s []byte) string { + return linux.ByteSliceToString(s) +} + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { + return linux.Renameat2(olddirfd, oldpath, newdirfd, newpath, flags) +} + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return linux.Prlimit(pid, resource, new, old) +} + +func Open(path string, mode int, perm uint32) (int, error) { + return linux.Open(path, mode, perm) +} + +func Fstat(fd int, stat *Stat_t) error { + return linux.Fstat(fd, stat) +} + +func SetsockoptInt(fd, level, opt, value int) error { + return linux.SetsockoptInt(fd, level, opt, value) +} + +func SchedSetaffinity(pid int, set *CPUSet) error { + return linux.SchedSetaffinity(pid, set) +} + +func SchedGetaffinity(pid int, set *CPUSet) error { + return linux.SchedGetaffinity(pid, set) +} diff --git a/vendor/github.com/cilium/ebpf/internal/unix/types_other.go b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go new file mode 100644 index 0000000000..06cc3a0966 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/unix/types_other.go @@ -0,0 +1,312 @@ +//go:build !linux + +package unix + +import ( + "fmt" + "runtime" + "syscall" +) + +var errNonLinux = fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) + +// Errnos are distinct and non-zero. +const ( + ENOENT syscall.Errno = iota + 1 + EEXIST + EAGAIN + ENOSPC + EINVAL + EINTR + EPERM + ESRCH + ENODEV + EBADF + E2BIG + EFAULT + EACCES + EILSEQ + EOPNOTSUPP + ESTALE +) + +// Constants are distinct to avoid breaking switch statements. +const ( + BPF_F_NO_PREALLOC = iota + BPF_F_NUMA_NODE + BPF_F_RDONLY + BPF_F_WRONLY + BPF_F_RDONLY_PROG + BPF_F_WRONLY_PROG + BPF_F_SLEEPABLE + BPF_F_MMAPABLE + BPF_F_INNER_MAP + BPF_F_KPROBE_MULTI_RETURN + BPF_F_UPROBE_MULTI_RETURN + BPF_F_XDP_HAS_FRAGS + BPF_OBJ_NAME_LEN + BPF_TAG_SIZE + BPF_RINGBUF_BUSY_BIT + BPF_RINGBUF_DISCARD_BIT + BPF_RINGBUF_HDR_SZ + SYS_BPF + F_DUPFD_CLOEXEC + EPOLLIN + EPOLL_CTL_ADD + EPOLL_CLOEXEC + O_CLOEXEC + O_NONBLOCK + PROT_NONE + PROT_READ + PROT_WRITE + MAP_ANON + MAP_SHARED + MAP_PRIVATE + PERF_ATTR_SIZE_VER1 + PERF_TYPE_SOFTWARE + PERF_TYPE_TRACEPOINT + PERF_COUNT_SW_BPF_OUTPUT + PERF_EVENT_IOC_DISABLE + PERF_EVENT_IOC_ENABLE + PERF_EVENT_IOC_SET_BPF + PerfBitWatermark + PerfBitWriteBackward + PERF_SAMPLE_RAW + PERF_FLAG_FD_CLOEXEC + RLIM_INFINITY + RLIMIT_MEMLOCK + BPF_STATS_RUN_TIME + PERF_RECORD_LOST + PERF_RECORD_SAMPLE + AT_FDCWD + RENAME_NOREPLACE + SO_ATTACH_BPF + SO_DETACH_BPF + SOL_SOCKET + SIGPROF + SIGUSR1 + SIG_BLOCK + SIG_UNBLOCK + BPF_FS_MAGIC + TRACEFS_MAGIC + DEBUGFS_MAGIC + BPF_RB_NO_WAKEUP + BPF_RB_FORCE_WAKEUP + BPF_F_LOCK + AF_UNSPEC + IFF_UP +) + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid [2]int32 + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +type Stat_t struct { + Dev uint64 + Ino uint64 + Nlink uint64 + Mode uint32 + Uid uint32 + Gid uint32 + _ int32 + Rdev uint64 + Size int64 + Blksize int64 + Blocks int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type Signal int + +type Sigset_t struct { + Val [4]uint64 +} + +func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { + return 0, 0, syscall.ENOTSUP +} + +func PthreadSigmask(how int, set, oldset *Sigset_t) error { + return errNonLinux +} + +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return -1, errNonLinux +} + +func IoctlSetInt(fd int, req uint, value int) error { + return errNonLinux +} + +func Statfs(path string, buf *Statfs_t) error { + return errNonLinux +} + +func Close(fd int) (err error) { + return errNonLinux +} + +type EpollEvent struct { + Events uint32 + Fd int32 + Pad int32 +} + +func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { + return 0, errNonLinux +} + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + return errNonLinux +} + +func Eventfd(initval uint, flags int) (fd int, err error) { + return 0, errNonLinux +} + +func Write(fd int, p []byte) (n int, err error) { + return 0, errNonLinux +} + +func EpollCreate1(flag int) (fd int, err error) { + return 0, errNonLinux +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +func SetNonblock(fd int, nonblocking bool) (err error) { + return errNonLinux +} + +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return []byte{}, errNonLinux +} + +func Munmap(b []byte) (err error) { + return errNonLinux +} + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + Sample_max_stack uint16 +} + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + return 0, errNonLinux +} + +type Utsname struct { + Release [65]byte + Version [65]byte +} + +func Uname(buf *Utsname) (err error) { + return errNonLinux +} + +func Getpid() int { + return -1 +} + +func Gettid() int { + return -1 +} + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + return errNonLinux +} + +func BytePtrFromString(s string) (*byte, error) { + return nil, errNonLinux +} + +func ByteSliceToString(s []byte) string { + return "" +} + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) error { + return errNonLinux +} + +func Prlimit(pid, resource int, new, old *Rlimit) error { + return errNonLinux +} + +func Open(path string, mode int, perm uint32) (int, error) { + return -1, errNonLinux +} + +func Fstat(fd int, stat *Stat_t) error { + return errNonLinux +} + +func SetsockoptInt(fd, level, opt, value int) error { + return errNonLinux +} + +type CPUSet struct{} + +func (*CPUSet) Set(int) {} + +func SchedSetaffinity(pid int, set *CPUSet) error { + return errNonLinux +} + +func SchedGetaffinity(pid int, set *CPUSet) error { + return errNonLinux +} diff --git a/vendor/github.com/cilium/ebpf/internal/version.go b/vendor/github.com/cilium/ebpf/internal/version.go new file mode 100644 index 0000000000..a230830b01 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/internal/version.go @@ -0,0 +1,77 @@ +package internal + +import ( + "fmt" +) + +const ( + // Version constant used in ELF binaries indicating that the loader needs to + // substitute the eBPF program's version with the value of the kernel's + // KERNEL_VERSION compile-time macro. Used for compatibility with BCC, gobpf + // and RedSift. + MagicKernelVersion = 0xFFFFFFFE +) + +// A Version in the form Major.Minor.Patch. +type Version [3]uint16 + +// NewVersion creates a version from a string like "Major.Minor.Patch". +// +// Patch is optional. +func NewVersion(ver string) (Version, error) { + var major, minor, patch uint16 + n, _ := fmt.Sscanf(ver, "%d.%d.%d", &major, &minor, &patch) + if n < 2 { + return Version{}, fmt.Errorf("invalid version: %s", ver) + } + return Version{major, minor, patch}, nil +} + +// NewVersionFromCode creates a version from a LINUX_VERSION_CODE. +func NewVersionFromCode(code uint32) Version { + return Version{ + uint16(uint8(code >> 16)), + uint16(uint8(code >> 8)), + uint16(uint8(code)), + } +} + +func (v Version) String() string { + if v[2] == 0 { + return fmt.Sprintf("v%d.%d", v[0], v[1]) + } + return fmt.Sprintf("v%d.%d.%d", v[0], v[1], v[2]) +} + +// Less returns true if the version is less than another version. +func (v Version) Less(other Version) bool { + for i, a := range v { + if a == other[i] { + continue + } + return a < other[i] + } + return false +} + +// Unspecified returns true if the version is all zero. +func (v Version) Unspecified() bool { + return v[0] == 0 && v[1] == 0 && v[2] == 0 +} + +// Kernel implements the kernel's KERNEL_VERSION macro from linux/version.h. +// It represents the kernel version and patch level as a single value. +func (v Version) Kernel() uint32 { + + // Kernels 4.4 and 4.9 have their SUBLEVEL clamped to 255 to avoid + // overflowing into PATCHLEVEL. + // See kernel commit 9b82f13e7ef3 ("kbuild: clamp SUBLEVEL to 255"). + s := v[2] + if s > 255 { + s = 255 + } + + // Truncate members to uint8 to prevent them from spilling over into + // each other when overflowing 8 bits. + return uint32(uint8(v[0]))<<16 | uint32(uint8(v[1]))<<8 | uint32(uint8(s)) +} diff --git a/vendor/github.com/cilium/ebpf/link/anchor.go b/vendor/github.com/cilium/ebpf/link/anchor.go new file mode 100644 index 0000000000..1a3b5f7681 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/anchor.go @@ -0,0 +1,137 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +const anchorFlags = sys.BPF_F_REPLACE | + sys.BPF_F_BEFORE | + sys.BPF_F_AFTER | + sys.BPF_F_ID | + sys.BPF_F_LINK_MPROG + +// Anchor is a reference to a link or program. +// +// It is used to describe where an attachment or detachment should take place +// for link types which support multiple attachment. +type Anchor interface { + // anchor returns an fd or ID and a set of flags. + // + // By default fdOrID is taken to reference a program, but BPF_F_LINK_MPROG + // changes this to refer to a link instead. + // + // BPF_F_BEFORE, BPF_F_AFTER, BPF_F_REPLACE modify where a link or program + // is attached. The default behaviour if none of these flags is specified + // matches BPF_F_AFTER. + anchor() (fdOrID, flags uint32, _ error) +} + +type firstAnchor struct{} + +func (firstAnchor) anchor() (fdOrID, flags uint32, _ error) { + return 0, sys.BPF_F_BEFORE, nil +} + +// Head is the position before all other programs or links. +func Head() Anchor { + return firstAnchor{} +} + +type lastAnchor struct{} + +func (lastAnchor) anchor() (fdOrID, flags uint32, _ error) { + return 0, sys.BPF_F_AFTER, nil +} + +// Tail is the position after all other programs or links. +func Tail() Anchor { + return lastAnchor{} +} + +// Before is the position just in front of target. +func BeforeLink(target Link) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterLink(target Link) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Before is the position just in front of target. +func BeforeLinkByID(target ID) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterLinkByID(target ID) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Before is the position just in front of target. +func BeforeProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Replace the target itself. +func ReplaceProgram(target *ebpf.Program) Anchor { + return anchor{target, sys.BPF_F_REPLACE} +} + +// Before is the position just in front of target. +func BeforeProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_BEFORE} +} + +// After is the position just after target. +func AfterProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_AFTER} +} + +// Replace the target itself. +func ReplaceProgramByID(target ebpf.ProgramID) Anchor { + return anchor{target, sys.BPF_F_REPLACE} +} + +type anchor struct { + target any + position uint32 +} + +func (ap anchor) anchor() (fdOrID, flags uint32, _ error) { + var typeFlag uint32 + switch target := ap.target.(type) { + case *ebpf.Program: + fd := target.FD() + if fd < 0 { + return 0, 0, sys.ErrClosedFd + } + fdOrID = uint32(fd) + typeFlag = 0 + case ebpf.ProgramID: + fdOrID = uint32(target) + typeFlag = sys.BPF_F_ID + case interface{ FD() int }: + fd := target.FD() + if fd < 0 { + return 0, 0, sys.ErrClosedFd + } + fdOrID = uint32(fd) + typeFlag = sys.BPF_F_LINK_MPROG + case ID: + fdOrID = uint32(target) + typeFlag = sys.BPF_F_LINK_MPROG | sys.BPF_F_ID + default: + return 0, 0, fmt.Errorf("invalid target %T", ap.target) + } + + return fdOrID, ap.position | typeFlag, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/cgroup.go b/vendor/github.com/cilium/ebpf/link/cgroup.go new file mode 100644 index 0000000000..f17d34f03c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/cgroup.go @@ -0,0 +1,208 @@ +package link + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type cgroupAttachFlags uint32 + +const ( + // Allow programs attached to sub-cgroups to override the verdict of this + // program. + flagAllowOverride cgroupAttachFlags = 1 << iota + // Allow attaching multiple programs to the cgroup. Only works if the cgroup + // has zero or more programs attached using the Multi flag. Implies override. + flagAllowMulti + // Set automatically by progAttachCgroup.Update(). Used for updating a + // specific given program attached in multi-mode. + flagReplace +) + +type CgroupOptions struct { + // Path to a cgroupv2 folder. + Path string + // One of the AttachCgroup* constants + Attach ebpf.AttachType + // Program must be of type CGroup*, and the attach type must match Attach. + Program *ebpf.Program +} + +// AttachCgroup links a BPF program to a cgroup. +// +// If the running kernel doesn't support bpf_link, attempts to emulate its +// semantics using the legacy PROG_ATTACH mechanism. If bpf_link is not +// available, the returned [Link] will not support pinning to bpffs. +// +// If you need more control over attachment flags or the attachment mechanism +// used, look at [RawAttachProgram] and [AttachRawLink] instead. +func AttachCgroup(opts CgroupOptions) (cg Link, err error) { + cgroup, err := os.Open(opts.Path) + if err != nil { + return nil, fmt.Errorf("can't open cgroup: %s", err) + } + defer func() { + if _, ok := cg.(*progAttachCgroup); ok { + // Skip closing the cgroup handle if we return a valid progAttachCgroup, + // where the handle is retained to implement Update(). + return + } + cgroup.Close() + }() + + cg, err = newLinkCgroup(cgroup, opts.Attach, opts.Program) + if err == nil { + return cg, nil + } + + if errors.Is(err, ErrNotSupported) { + cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowMulti) + } + if errors.Is(err, ErrNotSupported) { + cg, err = newProgAttachCgroup(cgroup, opts.Attach, opts.Program, flagAllowOverride) + } + if err != nil { + return nil, err + } + + return cg, nil +} + +type progAttachCgroup struct { + cgroup *os.File + current *ebpf.Program + attachType ebpf.AttachType + flags cgroupAttachFlags +} + +var _ Link = (*progAttachCgroup)(nil) + +func (cg *progAttachCgroup) isLink() {} + +// newProgAttachCgroup attaches prog to cgroup using BPF_PROG_ATTACH. +// cgroup and prog are retained by [progAttachCgroup]. +func newProgAttachCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program, flags cgroupAttachFlags) (*progAttachCgroup, error) { + if flags&flagAllowMulti > 0 { + if err := haveProgAttachReplace(); err != nil { + return nil, fmt.Errorf("can't support multiple programs: %w", err) + } + } + + // Use a program handle that cannot be closed by the caller. + clone, err := prog.Clone() + if err != nil { + return nil, err + } + + err = RawAttachProgram(RawAttachProgramOptions{ + Target: int(cgroup.Fd()), + Program: clone, + Flags: uint32(flags), + Attach: attach, + }) + if err != nil { + clone.Close() + return nil, fmt.Errorf("cgroup: %w", err) + } + + return &progAttachCgroup{cgroup, clone, attach, flags}, nil +} + +func (cg *progAttachCgroup) Close() error { + defer cg.cgroup.Close() + defer cg.current.Close() + + err := RawDetachProgram(RawDetachProgramOptions{ + Target: int(cg.cgroup.Fd()), + Program: cg.current, + Attach: cg.attachType, + }) + if err != nil { + return fmt.Errorf("close cgroup: %s", err) + } + return nil +} + +func (cg *progAttachCgroup) Update(prog *ebpf.Program) error { + new, err := prog.Clone() + if err != nil { + return err + } + + args := RawAttachProgramOptions{ + Target: int(cg.cgroup.Fd()), + Program: prog, + Attach: cg.attachType, + Flags: uint32(cg.flags), + } + + if cg.flags&flagAllowMulti > 0 { + // Atomically replacing multiple programs requires at least + // 5.5 (commit 7dd68b3279f17921 "bpf: Support replacing cgroup-bpf + // program in MULTI mode") + args.Anchor = ReplaceProgram(cg.current) + } + + if err := RawAttachProgram(args); err != nil { + new.Close() + return fmt.Errorf("can't update cgroup: %s", err) + } + + cg.current.Close() + cg.current = new + return nil +} + +func (cg *progAttachCgroup) Pin(string) error { + return fmt.Errorf("can't pin cgroup: %w", ErrNotSupported) +} + +func (cg *progAttachCgroup) Unpin() error { + return fmt.Errorf("can't unpin cgroup: %w", ErrNotSupported) +} + +func (cg *progAttachCgroup) Info() (*Info, error) { + return nil, fmt.Errorf("can't get cgroup info: %w", ErrNotSupported) +} + +type linkCgroup struct { + RawLink +} + +var _ Link = (*linkCgroup)(nil) + +// newLinkCgroup attaches prog to cgroup using BPF_LINK_CREATE. +func newLinkCgroup(cgroup *os.File, attach ebpf.AttachType, prog *ebpf.Program) (*linkCgroup, error) { + link, err := AttachRawLink(RawLinkOptions{ + Target: int(cgroup.Fd()), + Program: prog, + Attach: attach, + }) + if err != nil { + return nil, err + } + + return &linkCgroup{*link}, err +} + +func (cg *linkCgroup) Info() (*Info, error) { + var info sys.CgroupLinkInfo + if err := sys.ObjInfo(cg.fd, &info); err != nil { + return nil, fmt.Errorf("cgroup link info: %s", err) + } + extra := &CgroupInfo{ + CgroupId: info.CgroupId, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/doc.go b/vendor/github.com/cilium/ebpf/link/doc.go new file mode 100644 index 0000000000..2bde35ed7a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/doc.go @@ -0,0 +1,2 @@ +// Package link allows attaching eBPF programs to various kernel hooks. +package link diff --git a/vendor/github.com/cilium/ebpf/link/iter.go b/vendor/github.com/cilium/ebpf/link/iter.go new file mode 100644 index 0000000000..0a39faef88 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/iter.go @@ -0,0 +1,84 @@ +package link + +import ( + "fmt" + "io" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type IterOptions struct { + // Program must be of type Tracing with attach type + // AttachTraceIter. The kind of iterator to attach to is + // determined at load time via the AttachTo field. + // + // AttachTo requires the kernel to include BTF of itself, + // and it to be compiled with a recent pahole (>= 1.16). + Program *ebpf.Program + + // Map specifies the target map for bpf_map_elem and sockmap iterators. + // It may be nil. + Map *ebpf.Map +} + +// AttachIter attaches a BPF seq_file iterator. +func AttachIter(opts IterOptions) (*Iter, error) { + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + var info bpfIterLinkInfoMap + if opts.Map != nil { + mapFd := opts.Map.FD() + if mapFd < 0 { + return nil, fmt.Errorf("invalid map: %w", sys.ErrClosedFd) + } + info.map_fd = uint32(mapFd) + } + + attr := sys.LinkCreateIterAttr{ + ProgFd: uint32(progFd), + AttachType: sys.AttachType(ebpf.AttachTraceIter), + IterInfo: sys.NewPointer(unsafe.Pointer(&info)), + IterInfoLen: uint32(unsafe.Sizeof(info)), + } + + fd, err := sys.LinkCreateIter(&attr) + if err != nil { + if haveFeatErr := haveBPFLink(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("can't link iterator: %w", err) + } + + return &Iter{RawLink{fd, ""}}, err +} + +// Iter represents an attached bpf_iter. +type Iter struct { + RawLink +} + +// Open creates a new instance of the iterator. +// +// Reading from the returned reader triggers the BPF program. +func (it *Iter) Open() (io.ReadCloser, error) { + attr := &sys.IterCreateAttr{ + LinkFd: it.fd.Uint(), + } + + fd, err := sys.IterCreate(attr) + if err != nil { + return nil, fmt.Errorf("can't create iterator: %w", err) + } + + return fd.File("bpf_iter"), nil +} + +// union bpf_iter_link_info.map +type bpfIterLinkInfoMap struct { + map_fd uint32 +} diff --git a/vendor/github.com/cilium/ebpf/link/kprobe.go b/vendor/github.com/cilium/ebpf/link/kprobe.go new file mode 100644 index 0000000000..6f93a27a25 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/kprobe.go @@ -0,0 +1,369 @@ +package link + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" + "github.com/cilium/ebpf/internal/unix" +) + +// KprobeOptions defines additional parameters that will be used +// when loading Kprobes. +type KprobeOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 + // Offset of the kprobe relative to the traced symbol. + // Can be used to insert kprobes at arbitrary offsets in kernel functions, + // e.g. in places where functions have been inlined. + Offset uint64 + // Increase the maximum number of concurrent invocations of a kretprobe. + // Required when tracing some long running functions in the kernel. + // + // Deprecated: this setting forces the use of an outdated kernel API and is not portable + // across kernel versions. + RetprobeMaxActive int + // Prefix used for the event name if the kprobe must be attached using tracefs. + // The group name will be formatted as `_`. + // The default empty string is equivalent to "ebpf" as the prefix. + TraceFSPrefix string +} + +func (ko *KprobeOptions) cookie() uint64 { + if ko == nil { + return 0 + } + return ko.Cookie +} + +// Kprobe attaches the given eBPF program to a perf event that fires when the +// given kernel symbol starts executing. See /proc/kallsyms for available +// symbols. For example, printk(): +// +// kp, err := Kprobe("printk", prog, nil) +// +// Losing the reference to the resulting Link (kp) will close the Kprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// If attaching to symbol fails, automatically retries with the running +// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls +// in a portable fashion. +// +// On kernels 6.11 and later, setting a kprobe on a nonexistent symbol using +// tracefs incorrectly returns [unix.EINVAL] instead of [os.ErrNotExist]. +// +// The returned Link may implement [PerfEvent]. +func Kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, false) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(k, prog, opts.cookie()) + if err != nil { + k.Close() + return nil, err + } + + return lnk, nil +} + +// Kretprobe attaches the given eBPF program to a perf event that fires right +// before the given kernel symbol exits, with the function stack left intact. +// See /proc/kallsyms for available symbols. For example, printk(): +// +// kp, err := Kretprobe("printk", prog, nil) +// +// Losing the reference to the resulting Link (kp) will close the Kretprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// If attaching to symbol fails, automatically retries with the running +// platform's syscall prefix (e.g. __x64_) to support attaching to syscalls +// in a portable fashion. +// +// On kernels 5.10 and earlier, setting a kretprobe on a nonexistent symbol +// incorrectly returns [unix.EINVAL] instead of [os.ErrNotExist]. +// +// The returned Link may implement [PerfEvent]. +func Kretprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions) (Link, error) { + k, err := kprobe(symbol, prog, opts, true) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(k, prog, opts.cookie()) + if err != nil { + k.Close() + return nil, err + } + + return lnk, nil +} + +// isValidKprobeSymbol implements the equivalent of a regex match +// against "^[a-zA-Z_][0-9a-zA-Z_.]*$". +func isValidKprobeSymbol(s string) bool { + if len(s) < 1 { + return false + } + + for i, c := range []byte(s) { + switch { + case c >= 'a' && c <= 'z': + case c >= 'A' && c <= 'Z': + case c == '_': + case i > 0 && c >= '0' && c <= '9': + + // Allow `.` in symbol name. GCC-compiled kernel may change symbol name + // to have a `.isra.$n` suffix, like `udp_send_skb.isra.52`. + // See: https://gcc.gnu.org/gcc-10/changes.html + case i > 0 && c == '.': + + default: + return false + } + } + + return true +} + +// kprobe opens a perf event on the given symbol and attaches prog to it. +// If ret is true, create a kretprobe. +func kprobe(symbol string, prog *ebpf.Program, opts *KprobeOptions, ret bool) (*perfEvent, error) { + if symbol == "" { + return nil, fmt.Errorf("symbol name cannot be empty: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if !isValidKprobeSymbol(symbol) { + return nil, fmt.Errorf("symbol '%s' must be a valid symbol in /proc/kallsyms: %w", symbol, errInvalidInput) + } + if prog.Type() != ebpf.Kprobe { + return nil, fmt.Errorf("eBPF program type %s is not a Kprobe: %w", prog.Type(), errInvalidInput) + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Kprobe, + Pid: perfAllThreads, + Symbol: symbol, + Ret: ret, + } + + if opts != nil { + args.RetprobeMaxActive = opts.RetprobeMaxActive + args.Cookie = opts.Cookie + args.Offset = opts.Offset + args.Group = opts.TraceFSPrefix + } + + // Use kprobe PMU if the kernel has it available. + tp, err := pmuProbe(args) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { + if prefix := linux.PlatformPrefix(); prefix != "" { + args.Symbol = prefix + symbol + tp, err = pmuProbe(args) + } + } + if err == nil { + return tp, nil + } + if !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("creating perf_kprobe PMU (arch-specific fallback for %q): %w", symbol, err) + } + + // Use tracefs if kprobe PMU is missing. + args.Symbol = symbol + tp, err = tracefsProbe(args) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL) { + if prefix := linux.PlatformPrefix(); prefix != "" { + args.Symbol = prefix + symbol + tp, err = tracefsProbe(args) + } + } + if err != nil { + return nil, fmt.Errorf("creating tracefs event (arch-specific fallback for %q): %w", symbol, err) + } + + return tp, nil +} + +// pmuProbe opens a perf event based on a Performance Monitoring Unit. +// +// Requires at least a 4.17 kernel. +// e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU" +// 33ea4b24277b "perf/core: Implement the 'perf_uprobe' PMU" +// +// Returns ErrNotSupported if the kernel doesn't support perf_[k,u]probe PMU +func pmuProbe(args tracefs.ProbeArgs) (*perfEvent, error) { + // Getting the PMU type will fail if the kernel doesn't support + // the perf_[k,u]probe PMU. + eventType, err := internal.ReadUint64FromFileOnce("%d\n", "/sys/bus/event_source/devices", args.Type.String(), "type") + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("%s: %w", args.Type, ErrNotSupported) + } + if err != nil { + return nil, err + } + + // Use tracefs if we want to set kretprobe's retprobeMaxActive. + if args.RetprobeMaxActive != 0 { + return nil, fmt.Errorf("pmu probe: non-zero retprobeMaxActive: %w", ErrNotSupported) + } + + var config uint64 + if args.Ret { + bit, err := internal.ReadUint64FromFileOnce("config:%d\n", "/sys/bus/event_source/devices", args.Type.String(), "/format/retprobe") + if err != nil { + return nil, err + } + config |= 1 << bit + } + + var ( + attr unix.PerfEventAttr + sp unsafe.Pointer + token string + ) + switch args.Type { + case tracefs.Kprobe: + // Create a pointer to a NUL-terminated string for the kernel. + sp, err = unsafeStringPtr(args.Symbol) + if err != nil { + return nil, err + } + + token = tracefs.KprobeToken(args) + + attr = unix.PerfEventAttr{ + // The minimum size required for PMU kprobes is PERF_ATTR_SIZE_VER1, + // since it added the config2 (Ext2) field. Use Ext2 as probe_offset. + Size: unix.PERF_ATTR_SIZE_VER1, + Type: uint32(eventType), // PMU event type read from sysfs + Ext1: uint64(uintptr(sp)), // Kernel symbol to trace + Ext2: args.Offset, // Kernel symbol offset + Config: config, // Retprobe flag + } + case tracefs.Uprobe: + sp, err = unsafeStringPtr(args.Path) + if err != nil { + return nil, err + } + + if args.RefCtrOffset != 0 { + config |= args.RefCtrOffset << uprobeRefCtrOffsetShift + } + + token = tracefs.UprobeToken(args) + + attr = unix.PerfEventAttr{ + // The minimum size required for PMU uprobes is PERF_ATTR_SIZE_VER1, + // since it added the config2 (Ext2) field. The Size field controls the + // size of the internal buffer the kernel allocates for reading the + // perf_event_attr argument from userspace. + Size: unix.PERF_ATTR_SIZE_VER1, + Type: uint32(eventType), // PMU event type read from sysfs + Ext1: uint64(uintptr(sp)), // Uprobe path + Ext2: args.Offset, // Uprobe offset + Config: config, // RefCtrOffset, Retprobe flag + } + } + + cpu := 0 + if args.Pid != perfAllThreads { + cpu = -1 + } + rawFd, err := unix.PerfEventOpen(&attr, args.Pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC) + + // On some old kernels, kprobe PMU doesn't allow `.` in symbol names and + // return -EINVAL. Return ErrNotSupported to allow falling back to tracefs. + // https://github.com/torvalds/linux/blob/94710cac0ef4/kernel/trace/trace_kprobe.c#L340-L343 + if errors.Is(err, unix.EINVAL) && strings.Contains(args.Symbol, ".") { + return nil, fmt.Errorf("token %s: older kernels don't accept dots: %w", token, ErrNotSupported) + } + // Since commit 97c753e62e6c, ENOENT is correctly returned instead of EINVAL + // when trying to create a retprobe for a missing symbol. + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("token %s: not found: %w", token, err) + } + // Since commit ab105a4fb894, EILSEQ is returned when a kprobe sym+offset is resolved + // to an invalid insn boundary. The exact conditions that trigger this error are + // arch specific however. + if errors.Is(err, unix.EILSEQ) { + return nil, fmt.Errorf("token %s: bad insn boundary: %w", token, os.ErrNotExist) + } + // Since at least commit cb9a19fe4aa51, ENOTSUPP is returned + // when attempting to set a uprobe on a trap instruction. + if errors.Is(err, sys.ENOTSUPP) { + return nil, fmt.Errorf("token %s: failed setting uprobe on offset %#x (possible trap insn): %w", token, args.Offset, err) + } + + if err != nil { + return nil, fmt.Errorf("token %s: opening perf event: %w", token, err) + } + + // Ensure the string pointer is not collected before PerfEventOpen returns. + runtime.KeepAlive(sp) + + fd, err := sys.NewFD(rawFd) + if err != nil { + return nil, err + } + + // Kernel has perf_[k,u]probe PMU available, initialize perf event. + return newPerfEvent(fd, nil), nil +} + +// tracefsProbe creates a trace event by writing an entry to /[k,u]probe_events. +// A new trace event group name is generated on every call to support creating +// multiple trace events for the same kernel or userspace symbol. +// Path and offset are only set in the case of uprobe(s) and are used to set +// the executable/library path on the filesystem and the offset where the probe is inserted. +// A perf event is then opened on the newly-created trace event and returned to the caller. +func tracefsProbe(args tracefs.ProbeArgs) (*perfEvent, error) { + groupPrefix := "ebpf" + if args.Group != "" { + groupPrefix = args.Group + } + + // Generate a random string for each trace event we attempt to create. + // This value is used as the 'group' token in tracefs to allow creating + // multiple kprobe trace events with the same name. + group, err := tracefs.RandomGroup(groupPrefix) + if err != nil { + return nil, fmt.Errorf("randomizing group name: %w", err) + } + args.Group = group + + // Create the [k,u]probe trace event using tracefs. + evt, err := tracefs.NewEvent(args) + if err != nil { + return nil, fmt.Errorf("creating probe entry on tracefs: %w", err) + } + + // Kprobes are ephemeral tracepoints and share the same perf event type. + fd, err := openTracepointPerfEvent(evt.ID(), args.Pid) + if err != nil { + // Make sure we clean up the created tracefs event when we return error. + // If a livepatch handler is already active on the symbol, the write to + // tracefs will succeed, a trace event will show up, but creating the + // perf event will fail with EBUSY. + _ = evt.Close() + return nil, err + } + + return newPerfEvent(fd, evt), nil +} diff --git a/vendor/github.com/cilium/ebpf/link/kprobe_multi.go b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go new file mode 100644 index 0000000000..094cb0538c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/kprobe_multi.go @@ -0,0 +1,191 @@ +package link + +import ( + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// KprobeMultiOptions defines additional parameters that will be used +// when opening a KprobeMulti Link. +type KprobeMultiOptions struct { + // Symbols takes a list of kernel symbol names to attach an ebpf program to. + // + // Mutually exclusive with Addresses. + Symbols []string + + // Addresses takes a list of kernel symbol addresses in case they can not + // be referred to by name. + // + // Note that only start addresses can be specified, since the fprobe API + // limits the attach point to the function entry or return. + // + // Mutually exclusive with Symbols. + Addresses []uintptr + + // Cookies specifies arbitrary values that can be fetched from an eBPF + // program via `bpf_get_attach_cookie()`. + // + // If set, its length should be equal to the length of Symbols or Addresses. + // Each Cookie is assigned to the Symbol or Address specified at the + // corresponding slice index. + Cookies []uint64 +} + +// KprobeMulti attaches the given eBPF program to the entry point of a given set +// of kernel symbols. +// +// The difference with Kprobe() is that multi-kprobe accomplishes this in a +// single system call, making it significantly faster than attaching many +// probes one at a time. +// +// Requires at least Linux 5.18. +func KprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) { + return kprobeMulti(prog, opts, 0) +} + +// KretprobeMulti attaches the given eBPF program to the return point of a given +// set of kernel symbols. +// +// The difference with Kretprobe() is that multi-kprobe accomplishes this in a +// single system call, making it significantly faster than attaching many +// probes one at a time. +// +// Requires at least Linux 5.18. +func KretprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions) (Link, error) { + return kprobeMulti(prog, opts, sys.BPF_F_KPROBE_MULTI_RETURN) +} + +func kprobeMulti(prog *ebpf.Program, opts KprobeMultiOptions, flags uint32) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + + syms := uint32(len(opts.Symbols)) + addrs := uint32(len(opts.Addresses)) + cookies := uint32(len(opts.Cookies)) + + if syms == 0 && addrs == 0 { + return nil, fmt.Errorf("one of Symbols or Addresses is required: %w", errInvalidInput) + } + if syms != 0 && addrs != 0 { + return nil, fmt.Errorf("Symbols and Addresses are mutually exclusive: %w", errInvalidInput) + } + if cookies > 0 && cookies != syms && cookies != addrs { + return nil, fmt.Errorf("Cookies must be exactly Symbols or Addresses in length: %w", errInvalidInput) + } + + attr := &sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_MULTI, + KprobeMultiFlags: flags, + } + + switch { + case syms != 0: + attr.Count = syms + attr.Syms = sys.NewStringSlicePointer(opts.Symbols) + + case addrs != 0: + attr.Count = addrs + attr.Addrs = sys.NewPointer(unsafe.Pointer(&opts.Addresses[0])) + } + + if cookies != 0 { + attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0])) + } + + fd, err := sys.LinkCreateKprobeMulti(attr) + if errors.Is(err, unix.ESRCH) { + return nil, fmt.Errorf("couldn't find one or more symbols: %w", os.ErrNotExist) + } + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("%w (missing kernel symbol or prog's AttachType not AttachTraceKprobeMulti?)", err) + } + + if err != nil { + if haveFeatErr := haveBPFLinkKprobeMulti(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, err + } + + return &kprobeMultiLink{RawLink{fd, ""}}, nil +} + +type kprobeMultiLink struct { + RawLink +} + +var _ Link = (*kprobeMultiLink)(nil) + +func (kml *kprobeMultiLink) Update(_ *ebpf.Program) error { + return fmt.Errorf("update kprobe_multi: %w", ErrNotSupported) +} + +func (kml *kprobeMultiLink) Info() (*Info, error) { + var info sys.KprobeMultiLinkInfo + if err := sys.ObjInfo(kml.fd, &info); err != nil { + return nil, fmt.Errorf("kprobe multi link info: %s", err) + } + extra := &KprobeMultiInfo{ + count: info.Count, + flags: info.Flags, + missed: info.Missed, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +var haveBPFLinkKprobeMulti = internal.NewFeatureTest("bpf_link_kprobe_multi", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_kpm_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceKprobeMulti, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return internal.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + fd, err := sys.LinkCreateKprobeMulti(&sys.LinkCreateKprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_KPROBE_MULTI, + Count: 1, + Syms: sys.NewStringSlicePointer([]string{"vprintk"}), + }) + switch { + case errors.Is(err, unix.EINVAL): + return internal.ErrNotSupported + // If CONFIG_FPROBE isn't set. + case errors.Is(err, unix.EOPNOTSUPP): + return internal.ErrNotSupported + case err != nil: + return err + } + + fd.Close() + + return nil +}, "5.18") diff --git a/vendor/github.com/cilium/ebpf/link/link.go b/vendor/github.com/cilium/ebpf/link/link.go new file mode 100644 index 0000000000..796769f8ea --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/link.go @@ -0,0 +1,537 @@ +package link + +import ( + "errors" + "fmt" + "os" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" +) + +var ErrNotSupported = internal.ErrNotSupported + +// Link represents a Program attached to a BPF hook. +type Link interface { + // Replace the current program with a new program. + // + // Passing a nil program is an error. May return an error wrapping ErrNotSupported. + Update(*ebpf.Program) error + + // Persist a link by pinning it into a bpffs. + // + // May return an error wrapping ErrNotSupported. + Pin(string) error + + // Undo a previous call to Pin. + // + // May return an error wrapping ErrNotSupported. + Unpin() error + + // Close frees resources. + // + // The link will be broken unless it has been successfully pinned. + // A link may continue past the lifetime of the process if Close is + // not called. + Close() error + + // Info returns metadata on a link. + // + // May return an error wrapping ErrNotSupported. + Info() (*Info, error) + + // Prevent external users from implementing this interface. + isLink() +} + +// NewLinkFromFD creates a link from a raw fd. +// +// Deprecated: use [NewFromFD] instead. +func NewLinkFromFD(fd int) (Link, error) { + return NewFromFD(fd) +} + +// NewFromFD creates a link from a raw fd. +// +// You should not use fd after calling this function. +func NewFromFD(fd int) (Link, error) { + sysFD, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return wrapRawLink(&RawLink{fd: sysFD}) +} + +// NewFromID returns the link associated with the given id. +// +// Returns ErrNotExist if there is no link with the given id. +func NewFromID(id ID) (Link, error) { + getFdAttr := &sys.LinkGetFdByIdAttr{Id: id} + fd, err := sys.LinkGetFdById(getFdAttr) + if err != nil { + return nil, fmt.Errorf("get link fd from ID %d: %w", id, err) + } + + return wrapRawLink(&RawLink{fd, ""}) +} + +// LoadPinnedLink loads a Link from a pin (file) on the BPF virtual filesystem. +// +// Requires at least Linux 5.7. +func LoadPinnedLink(fileName string, opts *ebpf.LoadPinOptions) (Link, error) { + raw, err := loadPinnedRawLink(fileName, opts) + if err != nil { + return nil, err + } + + return wrapRawLink(raw) +} + +// wrap a RawLink in a more specific type if possible. +// +// The function takes ownership of raw and closes it on error. +func wrapRawLink(raw *RawLink) (_ Link, err error) { + defer func() { + if err != nil { + raw.Close() + } + }() + + info, err := raw.Info() + if err != nil { + return nil, err + } + + switch info.Type { + case RawTracepointType: + return &rawTracepoint{*raw}, nil + case TracingType: + return &tracing{*raw}, nil + case CgroupType: + return &linkCgroup{*raw}, nil + case IterType: + return &Iter{*raw}, nil + case NetNsType: + return &NetNsLink{*raw}, nil + case KprobeMultiType: + return &kprobeMultiLink{*raw}, nil + case UprobeMultiType: + return &uprobeMultiLink{*raw}, nil + case PerfEventType: + return &perfEventLink{*raw, nil}, nil + case TCXType: + return &tcxLink{*raw}, nil + case NetfilterType: + return &netfilterLink{*raw}, nil + case NetkitType: + return &netkitLink{*raw}, nil + case XDPType: + return &xdpLink{*raw}, nil + default: + return raw, nil + } +} + +// ID uniquely identifies a BPF link. +type ID = sys.LinkID + +// RawLinkOptions control the creation of a raw link. +type RawLinkOptions struct { + // File descriptor to attach to. This differs for each attach type. + Target int + // Program to attach. + Program *ebpf.Program + // Attach must match the attach type of Program. + Attach ebpf.AttachType + // BTF is the BTF of the attachment target. + BTF btf.TypeID + // Flags control the attach behaviour. + Flags uint32 +} + +// Info contains metadata on a link. +type Info struct { + Type Type + ID ID + Program ebpf.ProgramID + extra interface{} +} + +type TracingInfo struct { + AttachType sys.AttachType + TargetObjId uint32 + TargetBtfId sys.TypeID +} + +type CgroupInfo struct { + CgroupId uint64 + AttachType sys.AttachType + _ [4]byte +} + +type NetNsInfo struct { + NetnsIno uint32 + AttachType sys.AttachType +} + +type TCXInfo struct { + Ifindex uint32 + AttachType sys.AttachType +} + +type XDPInfo struct { + Ifindex uint32 +} + +type NetfilterInfo struct { + Pf uint32 + Hooknum uint32 + Priority int32 + Flags uint32 +} + +type NetkitInfo struct { + Ifindex uint32 + AttachType sys.AttachType +} + +type KprobeMultiInfo struct { + count uint32 + flags uint32 + missed uint64 +} + +// AddressCount is the number of addresses hooked by the kprobe. +func (kpm *KprobeMultiInfo) AddressCount() (uint32, bool) { + return kpm.count, kpm.count > 0 +} + +func (kpm *KprobeMultiInfo) Flags() (uint32, bool) { + return kpm.flags, kpm.count > 0 +} + +func (kpm *KprobeMultiInfo) Missed() (uint64, bool) { + return kpm.missed, kpm.count > 0 +} + +type PerfEventInfo struct { + Type sys.PerfEventType + extra interface{} +} + +func (r *PerfEventInfo) Kprobe() *KprobeInfo { + e, _ := r.extra.(*KprobeInfo) + return e +} + +type KprobeInfo struct { + address uint64 + missed uint64 +} + +func (kp *KprobeInfo) Address() (uint64, bool) { + return kp.address, kp.address > 0 +} + +func (kp *KprobeInfo) Missed() (uint64, bool) { + return kp.missed, kp.address > 0 +} + +// Tracing returns tracing type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Tracing() *TracingInfo { + e, _ := r.extra.(*TracingInfo) + return e +} + +// Cgroup returns cgroup type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Cgroup() *CgroupInfo { + e, _ := r.extra.(*CgroupInfo) + return e +} + +// NetNs returns netns type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) NetNs() *NetNsInfo { + e, _ := r.extra.(*NetNsInfo) + return e +} + +// XDP returns XDP type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) XDP() *XDPInfo { + e, _ := r.extra.(*XDPInfo) + return e +} + +// TCX returns TCX type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) TCX() *TCXInfo { + e, _ := r.extra.(*TCXInfo) + return e +} + +// Netfilter returns netfilter type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netfilter() *NetfilterInfo { + e, _ := r.extra.(*NetfilterInfo) + return e +} + +// Netkit returns netkit type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) Netkit() *NetkitInfo { + e, _ := r.extra.(*NetkitInfo) + return e +} + +// KprobeMulti returns kprobe-multi type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) KprobeMulti() *KprobeMultiInfo { + e, _ := r.extra.(*KprobeMultiInfo) + return e +} + +// PerfEvent returns perf-event type-specific link info. +// +// Returns nil if the type-specific link info isn't available. +func (r Info) PerfEvent() *PerfEventInfo { + e, _ := r.extra.(*PerfEventInfo) + return e +} + +// RawLink is the low-level API to bpf_link. +// +// You should consider using the higher level interfaces in this +// package instead. +type RawLink struct { + fd *sys.FD + pinnedPath string +} + +// AttachRawLink creates a raw link. +func AttachRawLink(opts RawLinkOptions) (*RawLink, error) { + if err := haveBPFLink(); err != nil { + return nil, err + } + + if opts.Target < 0 { + return nil, fmt.Errorf("invalid target: %s", sys.ErrClosedFd) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + attr := sys.LinkCreateAttr{ + TargetFd: uint32(opts.Target), + ProgFd: uint32(progFd), + AttachType: sys.AttachType(opts.Attach), + TargetBtfId: opts.BTF, + Flags: opts.Flags, + } + fd, err := sys.LinkCreate(&attr) + if err != nil { + return nil, fmt.Errorf("create link: %w", err) + } + + return &RawLink{fd, ""}, nil +} + +func loadPinnedRawLink(fileName string, opts *ebpf.LoadPinOptions) (*RawLink, error) { + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, fmt.Errorf("load pinned link: %w", err) + } + + if typ != sys.BPF_TYPE_LINK { + _ = fd.Close() + return nil, fmt.Errorf("%s is not a Link", fileName) + } + + return &RawLink{fd, fileName}, nil +} + +func (l *RawLink) isLink() {} + +// FD returns the raw file descriptor. +func (l *RawLink) FD() int { + return l.fd.Int() +} + +// Close breaks the link. +// +// Use Pin if you want to make the link persistent. +func (l *RawLink) Close() error { + return l.fd.Close() +} + +// Pin persists a link past the lifetime of the process. +// +// Calling Close on a pinned Link will not break the link +// until the pin is removed. +func (l *RawLink) Pin(fileName string) error { + if err := sys.Pin(l.pinnedPath, fileName, l.fd); err != nil { + return err + } + l.pinnedPath = fileName + return nil +} + +// Unpin implements the Link interface. +func (l *RawLink) Unpin() error { + if err := sys.Unpin(l.pinnedPath); err != nil { + return err + } + l.pinnedPath = "" + return nil +} + +// IsPinned returns true if the Link has a non-empty pinned path. +func (l *RawLink) IsPinned() bool { + return l.pinnedPath != "" +} + +// Update implements the Link interface. +func (l *RawLink) Update(new *ebpf.Program) error { + return l.UpdateArgs(RawLinkUpdateOptions{ + New: new, + }) +} + +// RawLinkUpdateOptions control the behaviour of RawLink.UpdateArgs. +type RawLinkUpdateOptions struct { + New *ebpf.Program + Old *ebpf.Program + Flags uint32 +} + +// UpdateArgs updates a link based on args. +func (l *RawLink) UpdateArgs(opts RawLinkUpdateOptions) error { + newFd := opts.New.FD() + if newFd < 0 { + return fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + var oldFd int + if opts.Old != nil { + oldFd = opts.Old.FD() + if oldFd < 0 { + return fmt.Errorf("invalid replacement program: %s", sys.ErrClosedFd) + } + } + + attr := sys.LinkUpdateAttr{ + LinkFd: l.fd.Uint(), + NewProgFd: uint32(newFd), + OldProgFd: uint32(oldFd), + Flags: opts.Flags, + } + return sys.LinkUpdate(&attr) +} + +// Info returns metadata about the link. +// +// Linktype specific metadata is not included and can be retrieved +// via the linktype specific Info() method. +func (l *RawLink) Info() (*Info, error) { + var info sys.LinkInfo + + if err := sys.ObjInfo(l.fd, &info); err != nil { + return nil, fmt.Errorf("link info: %s", err) + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + nil, + }, nil +} + +// Iterator allows iterating over links attached into the kernel. +type Iterator struct { + // The ID of the current link. Only valid after a call to Next + ID ID + // The current link. Only valid until a call to Next. + // See Take if you want to retain the link. + Link Link + err error +} + +// Next retrieves the next link. +// +// Returns true if another link was found. Call [Iterator.Err] after the function returns false. +func (it *Iterator) Next() bool { + id := it.ID + for { + getIdAttr := &sys.LinkGetNextIdAttr{Id: id} + err := sys.LinkGetNextId(getIdAttr) + if errors.Is(err, os.ErrNotExist) { + // There are no more links. + break + } else if err != nil { + it.err = fmt.Errorf("get next link ID: %w", err) + break + } + + id = getIdAttr.NextId + l, err := NewFromID(id) + if errors.Is(err, os.ErrNotExist) { + // Couldn't load the link fast enough. Try next ID. + continue + } else if err != nil { + it.err = fmt.Errorf("get link for ID %d: %w", id, err) + break + } + + if it.Link != nil { + it.Link.Close() + } + it.ID, it.Link = id, l + return true + } + + // No more links or we encountered an error. + if it.Link != nil { + it.Link.Close() + } + it.Link = nil + return false +} + +// Take the ownership of the current link. +// +// It's the callers responsibility to close the link. +func (it *Iterator) Take() Link { + l := it.Link + it.Link = nil + return l +} + +// Err returns an error if iteration failed for some reason. +func (it *Iterator) Err() error { + return it.err +} + +func (it *Iterator) Close() { + if it.Link != nil { + it.Link.Close() + } +} diff --git a/vendor/github.com/cilium/ebpf/link/netfilter.go b/vendor/github.com/cilium/ebpf/link/netfilter.go new file mode 100644 index 0000000000..9436d11df9 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/netfilter.go @@ -0,0 +1,90 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +const NetfilterIPDefrag NetfilterAttachFlags = 0 // Enable IP packet defragmentation + +type NetfilterAttachFlags uint32 + +type NetfilterOptions struct { + // Program must be a netfilter BPF program. + Program *ebpf.Program + // The protocol family. + ProtocolFamily uint32 + // The number of the hook you are interested in. + HookNumber uint32 + // Priority within hook + Priority int32 + // Extra link flags + Flags uint32 + // Netfilter flags + NetfilterFlags NetfilterAttachFlags +} + +type netfilterLink struct { + RawLink +} + +// AttachNetfilter links a netfilter BPF program to a netfilter hook. +func AttachNetfilter(opts NetfilterOptions) (Link, error) { + if opts.Program == nil { + return nil, fmt.Errorf("netfilter program is nil") + } + + if t := opts.Program.Type(); t != ebpf.Netfilter { + return nil, fmt.Errorf("invalid program type %s, expected netfilter", t) + } + + progFd := opts.Program.FD() + if progFd < 0 { + return nil, fmt.Errorf("invalid program: %s", sys.ErrClosedFd) + } + + attr := sys.LinkCreateNetfilterAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.BPF_NETFILTER, + Flags: opts.Flags, + Pf: uint32(opts.ProtocolFamily), + Hooknum: uint32(opts.HookNumber), + Priority: opts.Priority, + NetfilterFlags: uint32(opts.NetfilterFlags), + } + + fd, err := sys.LinkCreateNetfilter(&attr) + if err != nil { + return nil, fmt.Errorf("attach netfilter link: %w", err) + } + + return &netfilterLink{RawLink{fd, ""}}, nil +} + +func (*netfilterLink) Update(_ *ebpf.Program) error { + return fmt.Errorf("netfilter update: %w", ErrNotSupported) +} + +func (nf *netfilterLink) Info() (*Info, error) { + var info sys.NetfilterLinkInfo + if err := sys.ObjInfo(nf.fd, &info); err != nil { + return nil, fmt.Errorf("netfilter link info: %s", err) + } + extra := &NetfilterInfo{ + Pf: info.Pf, + Hooknum: info.Hooknum, + Priority: info.Priority, + Flags: info.Flags, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +var _ Link = (*netfilterLink)(nil) diff --git a/vendor/github.com/cilium/ebpf/link/netkit.go b/vendor/github.com/cilium/ebpf/link/netkit.go new file mode 100644 index 0000000000..5eee3b023a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/netkit.go @@ -0,0 +1,89 @@ +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type NetkitOptions struct { + // Index of the interface to attach to. + Interface int + // Program to attach. + Program *ebpf.Program + // One of the AttachNetkit* constants. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Only attach if the expected revision matches. + ExpectedRevision uint64 + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional. + Flags uint32 +} + +func AttachNetkit(opts NetkitOptions) (Link, error) { + if opts.Interface < 0 { + return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface) + } + + if opts.Flags&anchorFlags != 0 { + return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.LinkCreateNetkitAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.AttachType(opts.Attach), + TargetIfindex: uint32(opts.Interface), + ExpectedRevision: opts.ExpectedRevision, + Flags: opts.Flags, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return nil, fmt.Errorf("attach netkit link: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.Flags |= flags + } + + fd, err := sys.LinkCreateNetkit(&attr) + runtime.KeepAlive(opts.Program) + runtime.KeepAlive(opts.Anchor) + if err != nil { + if haveFeatErr := haveNetkit(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("attach netkit link: %w", err) + } + + return &netkitLink{RawLink{fd, ""}}, nil +} + +type netkitLink struct { + RawLink +} + +var _ Link = (*netkitLink)(nil) + +func (netkit *netkitLink) Info() (*Info, error) { + var info sys.NetkitLinkInfo + if err := sys.ObjInfo(netkit.fd, &info); err != nil { + return nil, fmt.Errorf("netkit link info: %s", err) + } + extra := &NetkitInfo{ + Ifindex: info.Ifindex, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/netns.go b/vendor/github.com/cilium/ebpf/link/netns.go new file mode 100644 index 0000000000..b1edd340a3 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/netns.go @@ -0,0 +1,55 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +// NetNsLink is a program attached to a network namespace. +type NetNsLink struct { + RawLink +} + +// AttachNetNs attaches a program to a network namespace. +func AttachNetNs(ns int, prog *ebpf.Program) (*NetNsLink, error) { + var attach ebpf.AttachType + switch t := prog.Type(); t { + case ebpf.FlowDissector: + attach = ebpf.AttachFlowDissector + case ebpf.SkLookup: + attach = ebpf.AttachSkLookup + default: + return nil, fmt.Errorf("can't attach %v to network namespace", t) + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: ns, + Program: prog, + Attach: attach, + }) + if err != nil { + return nil, err + } + + return &NetNsLink{*link}, nil +} + +func (ns *NetNsLink) Info() (*Info, error) { + var info sys.NetNsLinkInfo + if err := sys.ObjInfo(ns.fd, &info); err != nil { + return nil, fmt.Errorf("netns link info: %s", err) + } + extra := &NetNsInfo{ + NetnsIno: info.NetnsIno, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/perf_event.go b/vendor/github.com/cilium/ebpf/link/perf_event.go new file mode 100644 index 0000000000..7440e8b292 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/perf_event.go @@ -0,0 +1,332 @@ +package link + +import ( + "errors" + "fmt" + "os" + "runtime" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" + "github.com/cilium/ebpf/internal/unix" +) + +// Getting the terminology right is usually the hardest part. For posterity and +// for staying sane during implementation: +// +// - trace event: Representation of a kernel runtime hook. Filesystem entries +// under /events. Can be tracepoints (static), kprobes or uprobes. +// Can be instantiated into perf events (see below). +// - tracepoint: A predetermined hook point in the kernel. Exposed as trace +// events in (sub)directories under /events. Cannot be closed or +// removed, they are static. +// - k(ret)probe: Ephemeral trace events based on entry or exit points of +// exported kernel symbols. kprobe-based (tracefs) trace events can be +// created system-wide by writing to the /kprobe_events file, or +// they can be scoped to the current process by creating PMU perf events. +// - u(ret)probe: Ephemeral trace events based on user provides ELF binaries +// and offsets. uprobe-based (tracefs) trace events can be +// created system-wide by writing to the /uprobe_events file, or +// they can be scoped to the current process by creating PMU perf events. +// - perf event: An object instantiated based on an existing trace event or +// kernel symbol. Referred to by fd in userspace. +// Exactly one eBPF program can be attached to a perf event. Multiple perf +// events can be created from a single trace event. Closing a perf event +// stops any further invocations of the attached eBPF program. + +var ( + errInvalidInput = tracefs.ErrInvalidInput +) + +const ( + perfAllThreads = -1 +) + +// A perfEvent represents a perf event kernel object. Exactly one eBPF program +// can be attached to it. It is created based on a tracefs trace event or a +// Performance Monitoring Unit (PMU). +type perfEvent struct { + // Trace event backing this perfEvent. May be nil. + tracefsEvent *tracefs.Event + + // This is the perf event FD. + fd *sys.FD +} + +func newPerfEvent(fd *sys.FD, event *tracefs.Event) *perfEvent { + pe := &perfEvent{event, fd} + // Both event and fd have their own finalizer, but we want to + // guarantee that they are closed in a certain order. + runtime.SetFinalizer(pe, (*perfEvent).Close) + return pe +} + +func (pe *perfEvent) Close() error { + runtime.SetFinalizer(pe, nil) + + if err := pe.fd.Close(); err != nil { + return fmt.Errorf("closing perf event fd: %w", err) + } + + if pe.tracefsEvent != nil { + return pe.tracefsEvent.Close() + } + + return nil +} + +// PerfEvent is implemented by some Link types which use a perf event under +// the hood. +type PerfEvent interface { + // PerfEvent returns a file for the underlying perf event. + // + // It is the callers responsibility to close the returned file. + // + // Making changes to the associated perf event lead to + // undefined behaviour. + PerfEvent() (*os.File, error) +} + +// perfEventLink represents a bpf perf link. +type perfEventLink struct { + RawLink + pe *perfEvent +} + +func (pl *perfEventLink) isLink() {} + +func (pl *perfEventLink) Close() error { + if err := pl.fd.Close(); err != nil { + return fmt.Errorf("perf link close: %w", err) + } + + // when created from pinned link + if pl.pe == nil { + return nil + } + + if err := pl.pe.Close(); err != nil { + return fmt.Errorf("perf event close: %w", err) + } + return nil +} + +func (pl *perfEventLink) Update(_ *ebpf.Program) error { + return fmt.Errorf("perf event link update: %w", ErrNotSupported) +} + +var _ PerfEvent = (*perfEventLink)(nil) + +func (pl *perfEventLink) PerfEvent() (*os.File, error) { + // when created from pinned link + if pl.pe == nil { + return nil, ErrNotSupported + } + + fd, err := pl.pe.fd.Dup() + if err != nil { + return nil, err + } + + return fd.File("perf-event"), nil +} + +func (pl *perfEventLink) Info() (*Info, error) { + var info sys.PerfEventLinkInfo + if err := sys.ObjInfo(pl.fd, &info); err != nil { + return nil, fmt.Errorf("perf event link info: %s", err) + } + + var extra2 interface{} + switch info.PerfEventType { + case sys.BPF_PERF_EVENT_KPROBE, sys.BPF_PERF_EVENT_KRETPROBE: + var kprobeInfo sys.KprobeLinkInfo + if err := sys.ObjInfo(pl.fd, &kprobeInfo); err != nil { + return nil, fmt.Errorf("kprobe link info: %s", err) + } + extra2 = &KprobeInfo{ + address: kprobeInfo.Addr, + missed: kprobeInfo.Missed, + } + } + + extra := &PerfEventInfo{ + Type: info.PerfEventType, + extra: extra2, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +// perfEventIoctl implements Link and handles the perf event lifecycle +// via ioctl(). +type perfEventIoctl struct { + *perfEvent +} + +func (pi *perfEventIoctl) isLink() {} + +// Since 4.15 (e87c6bc3852b "bpf: permit multiple bpf attachments for a single perf event"), +// calling PERF_EVENT_IOC_SET_BPF appends the given program to a prog_array +// owned by the perf event, which means multiple programs can be attached +// simultaneously. +// +// Before 4.15, calling PERF_EVENT_IOC_SET_BPF more than once on a perf event +// returns EEXIST. +// +// Detaching a program from a perf event is currently not possible, so a +// program replacement mechanism cannot be implemented for perf events. +func (pi *perfEventIoctl) Update(_ *ebpf.Program) error { + return fmt.Errorf("perf event ioctl update: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Pin(string) error { + return fmt.Errorf("perf event ioctl pin: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Unpin() error { + return fmt.Errorf("perf event ioctl unpin: %w", ErrNotSupported) +} + +func (pi *perfEventIoctl) Info() (*Info, error) { + return nil, fmt.Errorf("perf event ioctl info: %w", ErrNotSupported) +} + +var _ PerfEvent = (*perfEventIoctl)(nil) + +func (pi *perfEventIoctl) PerfEvent() (*os.File, error) { + fd, err := pi.fd.Dup() + if err != nil { + return nil, err + } + + return fd.File("perf-event"), nil +} + +// attach the given eBPF prog to the perf event stored in pe. +// pe must contain a valid perf event fd. +// prog's type must match the program type stored in pe. +func attachPerfEvent(pe *perfEvent, prog *ebpf.Program, cookie uint64) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + if prog.FD() < 0 { + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) + } + + if err := haveBPFLinkPerfEvent(); err == nil { + return attachPerfEventLink(pe, prog, cookie) + } + + if cookie != 0 { + return nil, fmt.Errorf("cookies are not supported: %w", ErrNotSupported) + } + + return attachPerfEventIoctl(pe, prog) +} + +func attachPerfEventIoctl(pe *perfEvent, prog *ebpf.Program) (*perfEventIoctl, error) { + // Assign the eBPF program to the perf event. + err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_SET_BPF, prog.FD()) + if err != nil { + return nil, fmt.Errorf("setting perf event bpf program: %w", err) + } + + // PERF_EVENT_IOC_ENABLE and _DISABLE ignore their given values. + if err := unix.IoctlSetInt(pe.fd.Int(), unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { + return nil, fmt.Errorf("enable perf event: %s", err) + } + + return &perfEventIoctl{pe}, nil +} + +// Use the bpf api to attach the perf event (BPF_LINK_TYPE_PERF_EVENT, 5.15+). +// +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +func attachPerfEventLink(pe *perfEvent, prog *ebpf.Program, cookie uint64) (*perfEventLink, error) { + fd, err := sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + TargetFd: pe.fd.Uint(), + AttachType: sys.BPF_PERF_EVENT, + BpfCookie: cookie, + }) + if err != nil { + return nil, fmt.Errorf("cannot create bpf perf link: %v", err) + } + + return &perfEventLink{RawLink{fd: fd}, pe}, nil +} + +// unsafeStringPtr returns an unsafe.Pointer to a NUL-terminated copy of str. +func unsafeStringPtr(str string) (unsafe.Pointer, error) { + p, err := unix.BytePtrFromString(str) + if err != nil { + return nil, err + } + return unsafe.Pointer(p), nil +} + +// openTracepointPerfEvent opens a tracepoint-type perf event. System-wide +// [k,u]probes created by writing to /[k,u]probe_events are tracepoints +// behind the scenes, and can be attached to using these perf events. +func openTracepointPerfEvent(tid uint64, pid int) (*sys.FD, error) { + attr := unix.PerfEventAttr{ + Type: unix.PERF_TYPE_TRACEPOINT, + Config: tid, + Sample_type: unix.PERF_SAMPLE_RAW, + Sample: 1, + Wakeup: 1, + } + + cpu := 0 + if pid != perfAllThreads { + cpu = -1 + } + fd, err := unix.PerfEventOpen(&attr, pid, cpu, -1, unix.PERF_FLAG_FD_CLOEXEC) + if err != nil { + return nil, fmt.Errorf("opening tracepoint perf event: %w", err) + } + + return sys.NewFD(fd) +} + +// Probe BPF perf link. +// +// https://elixir.bootlin.com/linux/v5.16.8/source/kernel/bpf/syscall.c#L4307 +// https://github.com/torvalds/linux/commit/b89fbfbb854c9afc3047e8273cc3a694650b802e +var haveBPFLinkPerfEvent = internal.NewFeatureTest("bpf_link_perf_event", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_bpf_perf_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + return err + } + defer prog.Close() + + _, err = sys.LinkCreatePerfEvent(&sys.LinkCreatePerfEventAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_PERF_EVENT, + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}, "5.15") diff --git a/vendor/github.com/cilium/ebpf/link/program.go b/vendor/github.com/cilium/ebpf/link/program.go new file mode 100644 index 0000000000..d8a2a15f93 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/program.go @@ -0,0 +1,107 @@ +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type RawAttachProgramOptions struct { + // Target to query. This is usually a file descriptor but may refer to + // something else based on the attach type. + Target int + // Program to attach. + Program *ebpf.Program + // Attach must match the attach type of Program. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and F_REPLACE. Optional. + Flags uint32 + // Only attach if the internal revision matches the given value. + ExpectedRevision uint64 +} + +// RawAttachProgram is a low level wrapper around BPF_PROG_ATTACH. +// +// You should use one of the higher level abstractions available in this +// package if possible. +func RawAttachProgram(opts RawAttachProgramOptions) error { + if opts.Flags&anchorFlags != 0 { + return fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.ProgAttachAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + AttachFlags: uint32(opts.Flags), + ExpectedRevision: opts.ExpectedRevision, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return fmt.Errorf("attach program: %w", err) + } + + if flags == sys.BPF_F_REPLACE { + // Ensure that replacing a program works on old kernels. + attr.ReplaceBpfFd = fdOrID + } else { + attr.RelativeFdOrId = fdOrID + attr.AttachFlags |= flags + } + } + + if err := sys.ProgAttach(&attr); err != nil { + if haveFeatErr := haveProgAttach(); haveFeatErr != nil { + return haveFeatErr + } + return fmt.Errorf("attach program: %w", err) + } + runtime.KeepAlive(opts.Program) + + return nil +} + +type RawDetachProgramOptions RawAttachProgramOptions + +// RawDetachProgram is a low level wrapper around BPF_PROG_DETACH. +// +// You should use one of the higher level abstractions available in this +// package if possible. +func RawDetachProgram(opts RawDetachProgramOptions) error { + if opts.Flags&anchorFlags != 0 { + return fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.ProgDetachAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachBpfFd: uint32(opts.Program.FD()), + AttachType: uint32(opts.Attach), + ExpectedRevision: opts.ExpectedRevision, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return fmt.Errorf("detach program: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.AttachFlags |= flags + } + + if err := sys.ProgDetach(&attr); err != nil { + if haveFeatErr := haveProgAttach(); haveFeatErr != nil { + return haveFeatErr + } + return fmt.Errorf("can't detach program: %w", err) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/link/query.go b/vendor/github.com/cilium/ebpf/link/query.go new file mode 100644 index 0000000000..fe534f8efa --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/query.go @@ -0,0 +1,111 @@ +package link + +import ( + "fmt" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +// QueryOptions defines additional parameters when querying for programs. +type QueryOptions struct { + // Target to query. This is usually a file descriptor but may refer to + // something else based on the attach type. + Target int + // Attach specifies the AttachType of the programs queried for + Attach ebpf.AttachType + // QueryFlags are flags for BPF_PROG_QUERY, e.g. BPF_F_QUERY_EFFECTIVE + QueryFlags uint32 +} + +// QueryResult describes which programs and links are active. +type QueryResult struct { + // List of attached programs. + Programs []AttachedProgram + + // Incremented by one every time the set of attached programs changes. + // May be zero if not supported by the [ebpf.AttachType]. + Revision uint64 +} + +// HaveLinkInfo returns true if the kernel supports querying link information +// for a particular [ebpf.AttachType]. +func (qr *QueryResult) HaveLinkInfo() bool { + return qr.Revision > 0 +} + +type AttachedProgram struct { + ID ebpf.ProgramID + linkID ID +} + +// LinkID returns the ID associated with the program. +// +// Returns 0, false if the kernel doesn't support retrieving the ID or if the +// program wasn't attached via a link. See [QueryResult.HaveLinkInfo] if you +// need to tell the two apart. +func (ap *AttachedProgram) LinkID() (ID, bool) { + return ap.linkID, ap.linkID != 0 +} + +// QueryPrograms retrieves a list of programs for the given AttachType. +// +// Returns a slice of attached programs, which may be empty. +// revision counts how many times the set of attached programs has changed and +// may be zero if not supported by the [ebpf.AttachType]. +// Returns ErrNotSupportd on a kernel without BPF_PROG_QUERY +func QueryPrograms(opts QueryOptions) (*QueryResult, error) { + // query the number of programs to allocate correct slice size + attr := sys.ProgQueryAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachType: sys.AttachType(opts.Attach), + QueryFlags: opts.QueryFlags, + } + err := sys.ProgQuery(&attr) + if err != nil { + if haveFeatErr := haveProgQuery(); haveFeatErr != nil { + return nil, fmt.Errorf("query programs: %w", haveFeatErr) + } + return nil, fmt.Errorf("query programs: %w", err) + } + if attr.Count == 0 { + return &QueryResult{Revision: attr.Revision}, nil + } + + // The minimum bpf_mprog revision is 1, so we can use the field to detect + // whether the attach type supports link ids. + haveLinkIDs := attr.Revision != 0 + + count := attr.Count + progIds := make([]ebpf.ProgramID, count) + attr = sys.ProgQueryAttr{ + TargetFdOrIfindex: uint32(opts.Target), + AttachType: sys.AttachType(opts.Attach), + QueryFlags: opts.QueryFlags, + Count: count, + ProgIds: sys.NewPointer(unsafe.Pointer(&progIds[0])), + } + + var linkIds []ID + if haveLinkIDs { + linkIds = make([]ID, count) + attr.LinkIds = sys.NewPointer(unsafe.Pointer(&linkIds[0])) + } + + if err := sys.ProgQuery(&attr); err != nil { + return nil, fmt.Errorf("query programs: %w", err) + } + + // NB: attr.Count might have changed between the two syscalls. + var programs []AttachedProgram + for i, id := range progIds[:attr.Count] { + ap := AttachedProgram{ID: id} + if haveLinkIDs { + ap.linkID = linkIds[i] + } + programs = append(programs, ap) + } + + return &QueryResult{programs, attr.Revision}, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go new file mode 100644 index 0000000000..925e621cbb --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/raw_tracepoint.go @@ -0,0 +1,87 @@ +package link + +import ( + "errors" + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type RawTracepointOptions struct { + // Tracepoint name. + Name string + // Program must be of type RawTracepoint* + Program *ebpf.Program +} + +// AttachRawTracepoint links a BPF program to a raw_tracepoint. +// +// Requires at least Linux 4.17. +func AttachRawTracepoint(opts RawTracepointOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.RawTracepoint && t != ebpf.RawTracepointWritable { + return nil, fmt.Errorf("invalid program type %s, expected RawTracepoint(Writable)", t) + } + if opts.Program.FD() < 0 { + return nil, fmt.Errorf("invalid program: %w", sys.ErrClosedFd) + } + + fd, err := sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + Name: sys.NewStringPointer(opts.Name), + ProgFd: uint32(opts.Program.FD()), + }) + if err != nil { + return nil, err + } + + err = haveBPFLink() + if errors.Is(err, ErrNotSupported) { + // Prior to commit 70ed506c3bbc ("bpf: Introduce pinnable bpf_link abstraction") + // raw_tracepoints are just a plain fd. + return &simpleRawTracepoint{fd}, nil + } + + if err != nil { + return nil, err + } + + return &rawTracepoint{RawLink{fd: fd}}, nil +} + +type simpleRawTracepoint struct { + fd *sys.FD +} + +var _ Link = (*simpleRawTracepoint)(nil) + +func (frt *simpleRawTracepoint) isLink() {} + +func (frt *simpleRawTracepoint) Close() error { + return frt.fd.Close() +} + +func (frt *simpleRawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) +} + +func (frt *simpleRawTracepoint) Pin(string) error { + return fmt.Errorf("pin raw_tracepoint: %w", ErrNotSupported) +} + +func (frt *simpleRawTracepoint) Unpin() error { + return fmt.Errorf("unpin raw_tracepoint: %w", ErrNotSupported) +} + +func (frt *simpleRawTracepoint) Info() (*Info, error) { + return nil, fmt.Errorf("can't get raw_tracepoint info: %w", ErrNotSupported) +} + +type rawTracepoint struct { + RawLink +} + +var _ Link = (*rawTracepoint)(nil) + +func (rt *rawTracepoint) Update(_ *ebpf.Program) error { + return fmt.Errorf("update raw_tracepoint: %w", ErrNotSupported) +} diff --git a/vendor/github.com/cilium/ebpf/link/socket_filter.go b/vendor/github.com/cilium/ebpf/link/socket_filter.go new file mode 100644 index 0000000000..84f0b656f8 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/socket_filter.go @@ -0,0 +1,40 @@ +package link + +import ( + "syscall" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/unix" +) + +// AttachSocketFilter attaches a SocketFilter BPF program to a socket. +func AttachSocketFilter(conn syscall.Conn, program *ebpf.Program) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_ATTACH_BPF, program.FD()) + }) + if ssoErr != nil { + return ssoErr + } + return err +} + +// DetachSocketFilter detaches a SocketFilter BPF program from a socket. +func DetachSocketFilter(conn syscall.Conn) error { + rawConn, err := conn.SyscallConn() + if err != nil { + return err + } + var ssoErr error + err = rawConn.Control(func(fd uintptr) { + ssoErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_DETACH_BPF, 0) + }) + if ssoErr != nil { + return ssoErr + } + return err +} diff --git a/vendor/github.com/cilium/ebpf/link/syscalls.go b/vendor/github.com/cilium/ebpf/link/syscalls.go new file mode 100644 index 0000000000..25951b017d --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/syscalls.go @@ -0,0 +1,200 @@ +package link + +import ( + "errors" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// Type is the kind of link. +type Type = sys.LinkType + +// Valid link types. +const ( + UnspecifiedType = sys.BPF_LINK_TYPE_UNSPEC + RawTracepointType = sys.BPF_LINK_TYPE_RAW_TRACEPOINT + TracingType = sys.BPF_LINK_TYPE_TRACING + CgroupType = sys.BPF_LINK_TYPE_CGROUP + IterType = sys.BPF_LINK_TYPE_ITER + NetNsType = sys.BPF_LINK_TYPE_NETNS + XDPType = sys.BPF_LINK_TYPE_XDP + PerfEventType = sys.BPF_LINK_TYPE_PERF_EVENT + KprobeMultiType = sys.BPF_LINK_TYPE_KPROBE_MULTI + TCXType = sys.BPF_LINK_TYPE_TCX + UprobeMultiType = sys.BPF_LINK_TYPE_UPROBE_MULTI + NetfilterType = sys.BPF_LINK_TYPE_NETFILTER + NetkitType = sys.BPF_LINK_TYPE_NETKIT +) + +var haveProgAttach = internal.NewFeatureTest("BPF_PROG_ATTACH", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + if err != nil { + return internal.ErrNotSupported + } + + // BPF_PROG_ATTACH was introduced at the same time as CGgroupSKB, + // so being able to load the program is enough to infer that we + // have the syscall. + prog.Close() + return nil +}, "4.10") + +var haveProgAttachReplace = internal.NewFeatureTest("BPF_PROG_ATTACH atomic replacement of MULTI progs", func() error { + if err := haveProgAttach(); err != nil { + return err + } + + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.CGroupSKB, + AttachType: ebpf.AttachCGroupInetIngress, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + + // We know that we have BPF_PROG_ATTACH since we can load CGroupSKB programs. + // If passing BPF_F_REPLACE gives us EINVAL we know that the feature isn't + // present. + attr := sys.ProgAttachAttr{ + // We rely on this being checked after attachFlags. + TargetFdOrIfindex: ^uint32(0), + AttachBpfFd: uint32(prog.FD()), + AttachType: uint32(ebpf.AttachCGroupInetIngress), + AttachFlags: uint32(flagReplace), + } + + err = sys.ProgAttach(&attr) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}, "5.5") + +var haveBPFLink = internal.NewFeatureTest("bpf_link", func() error { + attr := sys.LinkCreateAttr{ + // This is a hopefully invalid file descriptor, which triggers EBADF. + TargetFd: ^uint32(0), + ProgFd: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), + } + _, err := sys.LinkCreate(&attr) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}, "5.7") + +var haveProgQuery = internal.NewFeatureTest("BPF_PROG_QUERY", func() error { + attr := sys.ProgQueryAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect EBADF here + // as an indication that the feature is present. + TargetFdOrIfindex: ^uint32(0), + AttachType: sys.AttachType(ebpf.AttachCGroupInetIngress), + } + + err := sys.ProgQuery(&attr) + + if errors.Is(err, unix.EBADF) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}, "4.15") + +var haveTCX = internal.NewFeatureTest("tcx", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + attr := sys.LinkCreateTcxAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect ENODEV here + // as an indication that the feature is present. + TargetIfindex: ^uint32(0), + ProgFd: uint32(prog.FD()), + AttachType: sys.AttachType(ebpf.AttachTCXIngress), + } + + _, err = sys.LinkCreateTcx(&attr) + + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}, "6.6") + +var haveNetkit = internal.NewFeatureTest("netkit", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Type: ebpf.SchedCLS, + License: "MIT", + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + }) + + if err != nil { + return internal.ErrNotSupported + } + + defer prog.Close() + attr := sys.LinkCreateNetkitAttr{ + // We rely on this being checked during the syscall. + // With an otherwise correct payload we expect ENODEV here + // as an indication that the feature is present. + TargetIfindex: ^uint32(0), + ProgFd: uint32(prog.FD()), + AttachType: sys.AttachType(ebpf.AttachNetkitPrimary), + } + + _, err = sys.LinkCreateNetkit(&attr) + + if errors.Is(err, unix.ENODEV) { + return nil + } + if err != nil { + return ErrNotSupported + } + return errors.New("syscall succeeded unexpectedly") +}, "6.7") diff --git a/vendor/github.com/cilium/ebpf/link/tcx.go b/vendor/github.com/cilium/ebpf/link/tcx.go new file mode 100644 index 0000000000..ac045b71da --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tcx.go @@ -0,0 +1,89 @@ +package link + +import ( + "fmt" + "runtime" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +type TCXOptions struct { + // Index of the interface to attach to. + Interface int + // Program to attach. + Program *ebpf.Program + // One of the AttachTCX* constants. + Attach ebpf.AttachType + // Attach relative to an anchor. Optional. + Anchor Anchor + // Only attach if the expected revision matches. + ExpectedRevision uint64 + // Flags control the attach behaviour. Specify an Anchor instead of + // F_LINK, F_ID, F_BEFORE, F_AFTER and R_REPLACE. Optional. + Flags uint32 +} + +func AttachTCX(opts TCXOptions) (Link, error) { + if opts.Interface < 0 { + return nil, fmt.Errorf("interface %d is out of bounds", opts.Interface) + } + + if opts.Flags&anchorFlags != 0 { + return nil, fmt.Errorf("disallowed flags: use Anchor to specify attach target") + } + + attr := sys.LinkCreateTcxAttr{ + ProgFd: uint32(opts.Program.FD()), + AttachType: sys.AttachType(opts.Attach), + TargetIfindex: uint32(opts.Interface), + ExpectedRevision: opts.ExpectedRevision, + Flags: opts.Flags, + } + + if opts.Anchor != nil { + fdOrID, flags, err := opts.Anchor.anchor() + if err != nil { + return nil, fmt.Errorf("attach tcx link: %w", err) + } + + attr.RelativeFdOrId = fdOrID + attr.Flags |= flags + } + + fd, err := sys.LinkCreateTcx(&attr) + runtime.KeepAlive(opts.Program) + runtime.KeepAlive(opts.Anchor) + if err != nil { + if haveFeatErr := haveTCX(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, fmt.Errorf("attach tcx link: %w", err) + } + + return &tcxLink{RawLink{fd, ""}}, nil +} + +type tcxLink struct { + RawLink +} + +var _ Link = (*tcxLink)(nil) + +func (tcx *tcxLink) Info() (*Info, error) { + var info sys.TcxLinkInfo + if err := sys.ObjInfo(tcx.fd, &info); err != nil { + return nil, fmt.Errorf("tcx link info: %s", err) + } + extra := &TCXInfo{ + Ifindex: info.Ifindex, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/tracepoint.go b/vendor/github.com/cilium/ebpf/link/tracepoint.go new file mode 100644 index 0000000000..6fc78b9828 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tracepoint.go @@ -0,0 +1,70 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/tracefs" +) + +// TracepointOptions defines additional parameters that will be used +// when loading Tracepoints. +type TracepointOptions struct { + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 +} + +// Tracepoint attaches the given eBPF program to the tracepoint with the given +// group and name. See /sys/kernel/tracing/events to find available +// tracepoints. The top-level directory is the group, the event's subdirectory +// is the name. Example: +// +// tp, err := Tracepoint("syscalls", "sys_enter_fork", prog, nil) +// +// Losing the reference to the resulting Link (tp) will close the Tracepoint +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Note that attaching eBPF programs to syscalls (sys_enter_*/sys_exit_*) is +// only possible as of kernel 4.14 (commit cf5f5ce). +// +// The returned Link may implement [PerfEvent]. +func Tracepoint(group, name string, prog *ebpf.Program, opts *TracepointOptions) (Link, error) { + if group == "" || name == "" { + return nil, fmt.Errorf("group and name cannot be empty: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.TracePoint { + return nil, fmt.Errorf("eBPF program type %s is not a Tracepoint: %w", prog.Type(), errInvalidInput) + } + + tid, err := tracefs.EventID(group, name) + if err != nil { + return nil, err + } + + fd, err := openTracepointPerfEvent(tid, perfAllThreads) + if err != nil { + return nil, err + } + + var cookie uint64 + if opts != nil { + cookie = opts.Cookie + } + + pe := newPerfEvent(fd, nil) + + lnk, err := attachPerfEvent(pe, prog, cookie) + if err != nil { + pe.Close() + return nil, err + } + + return lnk, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/tracing.go b/vendor/github.com/cilium/ebpf/link/tracing.go new file mode 100644 index 0000000000..a461007f9f --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/tracing.go @@ -0,0 +1,218 @@ +package link + +import ( + "errors" + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +type tracing struct { + RawLink +} + +func (f *tracing) Update(_ *ebpf.Program) error { + return fmt.Errorf("tracing update: %w", ErrNotSupported) +} + +func (f *tracing) Info() (*Info, error) { + var info sys.TracingLinkInfo + if err := sys.ObjInfo(f.fd, &info); err != nil { + return nil, fmt.Errorf("tracing link info: %s", err) + } + extra := &TracingInfo{ + TargetObjId: info.TargetObjId, + TargetBtfId: info.TargetBtfId, + AttachType: info.AttachType, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} + +// AttachFreplace attaches the given eBPF program to the function it replaces. +// +// The program and name can either be provided at link time, or can be provided +// at program load time. If they were provided at load time, they should be nil +// and empty respectively here, as they will be ignored by the kernel. +// Examples: +// +// AttachFreplace(dispatcher, "function", replacement) +// AttachFreplace(nil, "", replacement) +func AttachFreplace(targetProg *ebpf.Program, name string, prog *ebpf.Program) (Link, error) { + if (name == "") != (targetProg == nil) { + return nil, fmt.Errorf("must provide both or neither of name and targetProg: %w", errInvalidInput) + } + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Extension { + return nil, fmt.Errorf("eBPF program type %s is not an Extension: %w", prog.Type(), errInvalidInput) + } + + var ( + target int + typeID btf.TypeID + ) + if targetProg != nil { + btfHandle, err := targetProg.Handle() + if err != nil { + return nil, err + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return nil, err + } + + var function *btf.Func + if err := spec.TypeByName(name, &function); err != nil { + return nil, err + } + + target = targetProg.FD() + typeID, err = spec.TypeID(function) + if err != nil { + return nil, err + } + } + + link, err := AttachRawLink(RawLinkOptions{ + Target: target, + Program: prog, + Attach: ebpf.AttachNone, + BTF: typeID, + }) + if errors.Is(err, sys.ENOTSUPP) { + // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke. + return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported) + } + if err != nil { + return nil, err + } + + return &tracing{*link}, nil +} + +type TracingOptions struct { + // Program must be of type Tracing with attach type + // AttachTraceFEntry/AttachTraceFExit/AttachModifyReturn or + // AttachTraceRawTp. + Program *ebpf.Program + // Program attach type. Can be one of: + // - AttachTraceFEntry + // - AttachTraceFExit + // - AttachModifyReturn + // - AttachTraceRawTp + // This field is optional. + AttachType ebpf.AttachType + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + Cookie uint64 +} + +type LSMOptions struct { + // Program must be of type LSM with attach type + // AttachLSMMac. + Program *ebpf.Program + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + Cookie uint64 +} + +// attachBTFID links all BPF program types (Tracing/LSM) that they attach to a btf_id. +func attachBTFID(program *ebpf.Program, at ebpf.AttachType, cookie uint64) (Link, error) { + if program.FD() < 0 { + return nil, fmt.Errorf("invalid program %w", sys.ErrClosedFd) + } + + var ( + fd *sys.FD + err error + ) + switch at { + case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachTraceRawTp, + ebpf.AttachModifyReturn, ebpf.AttachLSMMac: + // Attach via BPF link + fd, err = sys.LinkCreateTracing(&sys.LinkCreateTracingAttr{ + ProgFd: uint32(program.FD()), + AttachType: sys.AttachType(at), + Cookie: cookie, + }) + if err == nil { + break + } + if !errors.Is(err, unix.EINVAL) && !errors.Is(err, sys.ENOTSUPP) { + return nil, fmt.Errorf("create tracing link: %w", err) + } + fallthrough + case ebpf.AttachNone: + // Attach via RawTracepointOpen + if cookie > 0 { + return nil, fmt.Errorf("create raw tracepoint with cookie: %w", ErrNotSupported) + } + + fd, err = sys.RawTracepointOpen(&sys.RawTracepointOpenAttr{ + ProgFd: uint32(program.FD()), + }) + if errors.Is(err, sys.ENOTSUPP) { + // This may be returned by bpf_tracing_prog_attach via bpf_arch_text_poke. + return nil, fmt.Errorf("create raw tracepoint: %w", ErrNotSupported) + } + if err != nil { + return nil, fmt.Errorf("create raw tracepoint: %w", err) + } + default: + return nil, fmt.Errorf("invalid attach type: %s", at.String()) + } + + raw := RawLink{fd: fd} + info, err := raw.Info() + if err != nil { + raw.Close() + return nil, err + } + + if info.Type == RawTracepointType { + // Sadness upon sadness: a Tracing program with AttachRawTp returns + // a raw_tracepoint link. Other types return a tracing link. + return &rawTracepoint{raw}, nil + } + return &tracing{raw}, nil +} + +// AttachTracing links a tracing (fentry/fexit/fmod_ret) BPF program or +// a BTF-powered raw tracepoint (tp_btf) BPF Program to a BPF hook defined +// in kernel modules. +func AttachTracing(opts TracingOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.Tracing { + return nil, fmt.Errorf("invalid program type %s, expected Tracing", t) + } + + switch opts.AttachType { + case ebpf.AttachTraceFEntry, ebpf.AttachTraceFExit, ebpf.AttachModifyReturn, + ebpf.AttachTraceRawTp, ebpf.AttachNone: + default: + return nil, fmt.Errorf("invalid attach type: %s", opts.AttachType.String()) + } + + return attachBTFID(opts.Program, opts.AttachType, opts.Cookie) +} + +// AttachLSM links a Linux security module (LSM) BPF Program to a BPF +// hook defined in kernel modules. +func AttachLSM(opts LSMOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.LSM { + return nil, fmt.Errorf("invalid program type %s, expected LSM", t) + } + + return attachBTFID(opts.Program, ebpf.AttachLSMMac, opts.Cookie) +} diff --git a/vendor/github.com/cilium/ebpf/link/uprobe.go b/vendor/github.com/cilium/ebpf/link/uprobe.go new file mode 100644 index 0000000000..1852a3fadd --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/uprobe.go @@ -0,0 +1,335 @@ +package link + +import ( + "debug/elf" + "errors" + "fmt" + "os" + "sync" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/tracefs" +) + +var ( + uprobeRefCtrOffsetPMUPath = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset" + // elixir.bootlin.com/linux/v5.15-rc7/source/kernel/events/core.c#L9799 + uprobeRefCtrOffsetShift = 32 + haveRefCtrOffsetPMU = internal.NewFeatureTest("RefCtrOffsetPMU", func() error { + _, err := os.Stat(uprobeRefCtrOffsetPMUPath) + if errors.Is(err, os.ErrNotExist) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + return nil + }, "4.20") + + // ErrNoSymbol indicates that the given symbol was not found + // in the ELF symbols table. + ErrNoSymbol = errors.New("not found") +) + +// Executable defines an executable program on the filesystem. +type Executable struct { + // Path of the executable on the filesystem. + path string + // Parsed ELF and dynamic symbols' cachedAddresses. + cachedAddresses map[string]uint64 + // Keep track of symbol table lazy load. + cacheAddressesOnce sync.Once +} + +// UprobeOptions defines additional parameters that will be used +// when loading Uprobes. +type UprobeOptions struct { + // Symbol address. Must be provided in case of external symbols (shared libs). + // If set, overrides the address eventually parsed from the executable. + Address uint64 + // The offset relative to given symbol. Useful when tracing an arbitrary point + // inside the frame of given symbol. + // + // Note: this field changed from being an absolute offset to being relative + // to Address. + Offset uint64 + // Only set the uprobe on the given process ID. Useful when tracing + // shared library calls or programs that have many running instances. + PID int + // Automatically manage SDT reference counts (semaphores). + // + // If this field is set, the Kernel will increment/decrement the + // semaphore located in the process memory at the provided address on + // probe attach/detach. + // + // See also: + // sourceware.org/systemtap/wiki/UserSpaceProbeImplementation (Semaphore Handling) + // github.com/torvalds/linux/commit/1cc33161a83d + // github.com/torvalds/linux/commit/a6ca88b241d5 + RefCtrOffset uint64 + // Arbitrary value that can be fetched from an eBPF program + // via `bpf_get_attach_cookie()`. + // + // Needs kernel 5.15+. + Cookie uint64 + // Prefix used for the event name if the uprobe must be attached using tracefs. + // The group name will be formatted as `_`. + // The default empty string is equivalent to "ebpf" as the prefix. + TraceFSPrefix string +} + +func (uo *UprobeOptions) cookie() uint64 { + if uo == nil { + return 0 + } + return uo.Cookie +} + +// To open a new Executable, use: +// +// OpenExecutable("/bin/bash") +// +// The returned value can then be used to open Uprobe(s). +func OpenExecutable(path string) (*Executable, error) { + if path == "" { + return nil, fmt.Errorf("path cannot be empty") + } + + f, err := internal.OpenSafeELFFile(path) + if err != nil { + return nil, fmt.Errorf("parse ELF file: %w", err) + } + defer f.Close() + + if f.Type != elf.ET_EXEC && f.Type != elf.ET_DYN { + // ELF is not an executable or a shared object. + return nil, errors.New("the given file is not an executable or a shared object") + } + + return &Executable{ + path: path, + cachedAddresses: make(map[string]uint64), + }, nil +} + +func (ex *Executable) load(f *internal.SafeELFFile) error { + syms, err := f.Symbols() + if err != nil && !errors.Is(err, elf.ErrNoSymbols) { + return err + } + + dynsyms, err := f.DynamicSymbols() + if err != nil && !errors.Is(err, elf.ErrNoSymbols) { + return err + } + + syms = append(syms, dynsyms...) + + for _, s := range syms { + if elf.ST_TYPE(s.Info) != elf.STT_FUNC { + // Symbol not associated with a function or other executable code. + continue + } + + address := s.Value + + // Loop over ELF segments. + for _, prog := range f.Progs { + // Skip uninteresting segments. + if prog.Type != elf.PT_LOAD || (prog.Flags&elf.PF_X) == 0 { + continue + } + + if prog.Vaddr <= s.Value && s.Value < (prog.Vaddr+prog.Memsz) { + // If the symbol value is contained in the segment, calculate + // the symbol offset. + // + // fn symbol offset = fn symbol VA - .text VA + .text offset + // + // stackoverflow.com/a/40249502 + address = s.Value - prog.Vaddr + prog.Off + break + } + } + + ex.cachedAddresses[s.Name] = address + } + + return nil +} + +// address calculates the address of a symbol in the executable. +// +// opts must not be nil. +func (ex *Executable) address(symbol string, address, offset uint64) (uint64, error) { + if address > 0 { + return address + offset, nil + } + + var err error + ex.cacheAddressesOnce.Do(func() { + var f *internal.SafeELFFile + f, err = internal.OpenSafeELFFile(ex.path) + if err != nil { + err = fmt.Errorf("parse ELF file: %w", err) + return + } + defer f.Close() + + err = ex.load(f) + }) + if err != nil { + return 0, fmt.Errorf("lazy load symbols: %w", err) + } + + address, ok := ex.cachedAddresses[symbol] + if !ok { + return 0, fmt.Errorf("symbol %s: %w", symbol, ErrNoSymbol) + } + + // Symbols with location 0 from section undef are shared library calls and + // are relocated before the binary is executed. Dynamic linking is not + // implemented by the library, so mark this as unsupported for now. + // + // Since only offset values are stored and not elf.Symbol, if the value is 0, + // assume it's an external symbol. + if address == 0 { + return 0, fmt.Errorf("cannot resolve %s library call '%s': %w "+ + "(consider providing UprobeOptions.Address)", ex.path, symbol, ErrNotSupported) + } + + return address + offset, nil +} + +// Uprobe attaches the given eBPF program to a perf event that fires when the +// given symbol starts executing in the given Executable. +// For example, /bin/bash::main(): +// +// ex, _ = OpenExecutable("/bin/bash") +// ex.Uprobe("main", prog, nil) +// +// When using symbols which belongs to shared libraries, +// an offset must be provided via options: +// +// up, err := ex.Uprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. +// +// The returned Link may implement [PerfEvent]. +func (ex *Executable) Uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { + u, err := ex.uprobe(symbol, prog, opts, false) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(u, prog, opts.cookie()) + if err != nil { + u.Close() + return nil, err + } + + return lnk, nil +} + +// Uretprobe attaches the given eBPF program to a perf event that fires right +// before the given symbol exits. For example, /bin/bash::main(): +// +// ex, _ = OpenExecutable("/bin/bash") +// ex.Uretprobe("main", prog, nil) +// +// When using symbols which belongs to shared libraries, +// an offset must be provided via options: +// +// up, err := ex.Uretprobe("main", prog, &UprobeOptions{Offset: 0x123}) +// +// Note: Setting the Offset field in the options supersedes the symbol's offset. +// +// Losing the reference to the resulting Link (up) will close the Uprobe +// and prevent further execution of prog. The Link must be Closed during +// program shutdown to avoid leaking system resources. +// +// Functions provided by shared libraries can currently not be traced and +// will result in an ErrNotSupported. +// +// The returned Link may implement [PerfEvent]. +func (ex *Executable) Uretprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions) (Link, error) { + u, err := ex.uprobe(symbol, prog, opts, true) + if err != nil { + return nil, err + } + + lnk, err := attachPerfEvent(u, prog, opts.cookie()) + if err != nil { + u.Close() + return nil, err + } + + return lnk, nil +} + +// uprobe opens a perf event for the given binary/symbol and attaches prog to it. +// If ret is true, create a uretprobe. +func (ex *Executable) uprobe(symbol string, prog *ebpf.Program, opts *UprobeOptions, ret bool) (*perfEvent, error) { + if prog == nil { + return nil, fmt.Errorf("prog cannot be nil: %w", errInvalidInput) + } + if prog.Type() != ebpf.Kprobe { + return nil, fmt.Errorf("eBPF program type %s is not Kprobe: %w", prog.Type(), errInvalidInput) + } + if opts == nil { + opts = &UprobeOptions{} + } + + offset, err := ex.address(symbol, opts.Address, opts.Offset) + if err != nil { + return nil, err + } + + pid := opts.PID + if pid == 0 { + pid = perfAllThreads + } + + if opts.RefCtrOffset != 0 { + if err := haveRefCtrOffsetPMU(); err != nil { + return nil, fmt.Errorf("uprobe ref_ctr_offset: %w", err) + } + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Uprobe, + Symbol: symbol, + Path: ex.path, + Offset: offset, + Pid: pid, + RefCtrOffset: opts.RefCtrOffset, + Ret: ret, + Cookie: opts.Cookie, + Group: opts.TraceFSPrefix, + } + + // Use uprobe PMU if the kernel has it available. + tp, err := pmuProbe(args) + if err == nil { + return tp, nil + } + if !errors.Is(err, ErrNotSupported) { + return nil, fmt.Errorf("creating perf_uprobe PMU: %w", err) + } + + // Use tracefs if uprobe PMU is missing. + tp, err = tracefsProbe(args) + if err != nil { + return nil, fmt.Errorf("creating trace event '%s:%s' in tracefs: %w", ex.path, symbol, err) + } + + return tp, nil +} diff --git a/vendor/github.com/cilium/ebpf/link/uprobe_multi.go b/vendor/github.com/cilium/ebpf/link/uprobe_multi.go new file mode 100644 index 0000000000..49dc18b449 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/uprobe_multi.go @@ -0,0 +1,219 @@ +package link + +import ( + "errors" + "fmt" + "os" + "unsafe" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/unix" +) + +// UprobeMultiOptions defines additional parameters that will be used +// when opening a UprobeMulti Link. +type UprobeMultiOptions struct { + // Symbol addresses. If set, overrides the addresses eventually parsed from + // the executable. Mutually exclusive with UprobeMulti's symbols argument. + Addresses []uint64 + + // Offsets into functions provided by UprobeMulti's symbols argument. + // For example: to set uprobes to main+5 and _start+10, call UprobeMulti + // with: + // symbols: "main", "_start" + // opt.Offsets: 5, 10 + Offsets []uint64 + + // Optional list of associated ref counter offsets. + RefCtrOffsets []uint64 + + // Optional list of associated BPF cookies. + Cookies []uint64 + + // Only set the uprobe_multi link on the given process ID, zero PID means + // system-wide. + PID uint32 +} + +func (ex *Executable) UprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) { + return ex.uprobeMulti(symbols, prog, opts, 0) +} + +func (ex *Executable) UretprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions) (Link, error) { + + // The return probe is not limited for symbols entry, so there's no special + // setup for return uprobes (other than the extra flag). The symbols, opts.Offsets + // and opts.Addresses arrays follow the same logic as for entry uprobes. + return ex.uprobeMulti(symbols, prog, opts, sys.BPF_F_UPROBE_MULTI_RETURN) +} + +func (ex *Executable) uprobeMulti(symbols []string, prog *ebpf.Program, opts *UprobeMultiOptions, flags uint32) (Link, error) { + if prog == nil { + return nil, errors.New("cannot attach a nil program") + } + + if opts == nil { + opts = &UprobeMultiOptions{} + } + + addresses, err := ex.addresses(symbols, opts.Addresses, opts.Offsets) + if err != nil { + return nil, err + } + + addrs := len(addresses) + cookies := len(opts.Cookies) + refCtrOffsets := len(opts.RefCtrOffsets) + + if addrs == 0 { + return nil, fmt.Errorf("Addresses are required: %w", errInvalidInput) + } + if refCtrOffsets > 0 && refCtrOffsets != addrs { + return nil, fmt.Errorf("RefCtrOffsets must be exactly Addresses in length: %w", errInvalidInput) + } + if cookies > 0 && cookies != addrs { + return nil, fmt.Errorf("Cookies must be exactly Addresses in length: %w", errInvalidInput) + } + + attr := &sys.LinkCreateUprobeMultiAttr{ + Path: sys.NewStringPointer(ex.path), + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_UPROBE_MULTI, + UprobeMultiFlags: flags, + Count: uint32(addrs), + Offsets: sys.NewPointer(unsafe.Pointer(&addresses[0])), + Pid: opts.PID, + } + + if refCtrOffsets != 0 { + attr.RefCtrOffsets = sys.NewPointer(unsafe.Pointer(&opts.RefCtrOffsets[0])) + } + if cookies != 0 { + attr.Cookies = sys.NewPointer(unsafe.Pointer(&opts.Cookies[0])) + } + + fd, err := sys.LinkCreateUprobeMulti(attr) + if errors.Is(err, unix.ESRCH) { + return nil, fmt.Errorf("%w (specified pid not found?)", os.ErrNotExist) + } + // Since Linux commit 46ba0e49b642 ("bpf: fix multi-uprobe PID filtering + // logic"), if the provided pid overflows MaxInt32 (turning it negative), the + // kernel will return EINVAL instead of ESRCH. + if errors.Is(err, unix.EINVAL) { + return nil, fmt.Errorf("%w (invalid pid, missing symbol or prog's AttachType not AttachTraceUprobeMulti?)", err) + } + + if err != nil { + if haveFeatErr := haveBPFLinkUprobeMulti(); haveFeatErr != nil { + return nil, haveFeatErr + } + return nil, err + } + + return &uprobeMultiLink{RawLink{fd, ""}}, nil +} + +func (ex *Executable) addresses(symbols []string, addresses, offsets []uint64) ([]uint64, error) { + n := len(symbols) + if n == 0 { + n = len(addresses) + } + + if n == 0 { + return nil, fmt.Errorf("%w: neither symbols nor addresses given", errInvalidInput) + } + + if symbols != nil && len(symbols) != n { + return nil, fmt.Errorf("%w: have %d symbols but want %d", errInvalidInput, len(symbols), n) + } + + if addresses != nil && len(addresses) != n { + return nil, fmt.Errorf("%w: have %d addresses but want %d", errInvalidInput, len(addresses), n) + } + + if offsets != nil && len(offsets) != n { + return nil, fmt.Errorf("%w: have %d offsets but want %d", errInvalidInput, len(offsets), n) + } + + results := make([]uint64, 0, n) + for i := 0; i < n; i++ { + var sym string + if symbols != nil { + sym = symbols[i] + } + + var addr, off uint64 + if addresses != nil { + addr = addresses[i] + } + + if offsets != nil { + off = offsets[i] + } + + result, err := ex.address(sym, addr, off) + if err != nil { + return nil, err + } + + results = append(results, result) + } + + return results, nil +} + +type uprobeMultiLink struct { + RawLink +} + +var _ Link = (*uprobeMultiLink)(nil) + +func (kml *uprobeMultiLink) Update(_ *ebpf.Program) error { + return fmt.Errorf("update uprobe_multi: %w", ErrNotSupported) +} + +var haveBPFLinkUprobeMulti = internal.NewFeatureTest("bpf_link_uprobe_multi", func() error { + prog, err := ebpf.NewProgram(&ebpf.ProgramSpec{ + Name: "probe_upm_link", + Type: ebpf.Kprobe, + Instructions: asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + }, + AttachType: ebpf.AttachTraceUprobeMulti, + License: "MIT", + }) + if errors.Is(err, unix.E2BIG) { + // Kernel doesn't support AttachType field. + return internal.ErrNotSupported + } + if err != nil { + return err + } + defer prog.Close() + + // We try to create uprobe multi link on '/' path which results in + // error with -EBADF in case uprobe multi link is supported. + fd, err := sys.LinkCreateUprobeMulti(&sys.LinkCreateUprobeMultiAttr{ + ProgFd: uint32(prog.FD()), + AttachType: sys.BPF_TRACE_UPROBE_MULTI, + Path: sys.NewStringPointer("/"), + Offsets: sys.NewPointer(unsafe.Pointer(&[]uint64{0})), + Count: 1, + }) + switch { + case errors.Is(err, unix.EBADF): + return nil + case errors.Is(err, unix.EINVAL): + return internal.ErrNotSupported + case err != nil: + return err + } + + // should not happen + fd.Close() + return errors.New("successfully attached uprobe_multi to /, kernel bug?") +}, "6.6") diff --git a/vendor/github.com/cilium/ebpf/link/xdp.go b/vendor/github.com/cilium/ebpf/link/xdp.go new file mode 100644 index 0000000000..2ec441229a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/link/xdp.go @@ -0,0 +1,80 @@ +package link + +import ( + "fmt" + + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/internal/sys" +) + +// XDPAttachFlags represents how XDP program will be attached to interface. +type XDPAttachFlags uint32 + +const ( + // XDPGenericMode (SKB) links XDP BPF program for drivers which do + // not yet support native XDP. + XDPGenericMode XDPAttachFlags = 1 << (iota + 1) + // XDPDriverMode links XDP BPF program into the driver’s receive path. + XDPDriverMode + // XDPOffloadMode offloads the entire XDP BPF program into hardware. + XDPOffloadMode +) + +type XDPOptions struct { + // Program must be an XDP BPF program. + Program *ebpf.Program + + // Interface is the interface index to attach program to. + Interface int + + // Flags is one of XDPAttachFlags (optional). + // + // Only one XDP mode should be set, without flag defaults + // to driver/generic mode (best effort). + Flags XDPAttachFlags +} + +// AttachXDP links an XDP BPF program to an XDP hook. +func AttachXDP(opts XDPOptions) (Link, error) { + if t := opts.Program.Type(); t != ebpf.XDP { + return nil, fmt.Errorf("invalid program type %s, expected XDP", t) + } + + if opts.Interface < 1 { + return nil, fmt.Errorf("invalid interface index: %d", opts.Interface) + } + + rawLink, err := AttachRawLink(RawLinkOptions{ + Program: opts.Program, + Attach: ebpf.AttachXDP, + Target: opts.Interface, + Flags: uint32(opts.Flags), + }) + + if err != nil { + return nil, fmt.Errorf("failed to attach link: %w", err) + } + + return &xdpLink{*rawLink}, nil +} + +type xdpLink struct { + RawLink +} + +func (xdp *xdpLink) Info() (*Info, error) { + var info sys.XDPLinkInfo + if err := sys.ObjInfo(xdp.fd, &info); err != nil { + return nil, fmt.Errorf("xdp link info: %s", err) + } + extra := &XDPInfo{ + Ifindex: info.Ifindex, + } + + return &Info{ + info.Type, + info.Id, + ebpf.ProgramID(info.ProgId), + extra, + }, nil +} diff --git a/vendor/github.com/cilium/ebpf/linker.go b/vendor/github.com/cilium/ebpf/linker.go new file mode 100644 index 0000000000..6f97af2784 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/linker.go @@ -0,0 +1,500 @@ +package ebpf + +import ( + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "io" + "io/fs" + "math" + "slices" + "strings" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" +) + +// handles stores handle objects to avoid gc cleanup +type handles []*btf.Handle + +func (hs *handles) add(h *btf.Handle) (int, error) { + if h == nil { + return 0, nil + } + + if len(*hs) == math.MaxInt16 { + return 0, fmt.Errorf("can't add more than %d module FDs to fdArray", math.MaxInt16) + } + + *hs = append(*hs, h) + + // return length of slice so that indexes start at 1 + return len(*hs), nil +} + +func (hs handles) fdArray() []int32 { + // first element of fda is reserved as no module can be indexed with 0 + fda := []int32{0} + for _, h := range hs { + fda = append(fda, int32(h.FD())) + } + + return fda +} + +func (hs *handles) Close() error { + var errs []error + for _, h := range *hs { + errs = append(errs, h.Close()) + } + return errors.Join(errs...) +} + +// splitSymbols splits insns into subsections delimited by Symbol Instructions. +// insns cannot be empty and must start with a Symbol Instruction. +// +// The resulting map is indexed by Symbol name. +func splitSymbols(insns asm.Instructions) (map[string]asm.Instructions, error) { + if len(insns) == 0 { + return nil, errors.New("insns is empty") + } + + currentSym := insns[0].Symbol() + if currentSym == "" { + return nil, errors.New("insns must start with a Symbol") + } + + start := 0 + progs := make(map[string]asm.Instructions) + for i, ins := range insns[1:] { + i := i + 1 + + sym := ins.Symbol() + if sym == "" { + continue + } + + // New symbol, flush the old one out. + progs[currentSym] = slices.Clone(insns[start:i]) + + if progs[sym] != nil { + return nil, fmt.Errorf("insns contains duplicate Symbol %s", sym) + } + currentSym = sym + start = i + } + + if tail := insns[start:]; len(tail) > 0 { + progs[currentSym] = slices.Clone(tail) + } + + return progs, nil +} + +// The linker is responsible for resolving bpf-to-bpf calls between programs +// within an ELF. Each BPF program must be a self-contained binary blob, +// so when an instruction in one ELF program section wants to jump to +// a function in another, the linker needs to pull in the bytecode +// (and BTF info) of the target function and concatenate the instruction +// streams. +// +// Later on in the pipeline, all call sites are fixed up with relative jumps +// within this newly-created instruction stream to then finally hand off to +// the kernel with BPF_PROG_LOAD. +// +// Each function is denoted by an ELF symbol and the compiler takes care of +// register setup before each jump instruction. + +// hasFunctionReferences returns true if insns contains one or more bpf2bpf +// function references. +func hasFunctionReferences(insns asm.Instructions) bool { + for _, i := range insns { + if i.IsFunctionReference() { + return true + } + } + return false +} + +// applyRelocations collects and applies any CO-RE relocations in insns. +// +// Passing a nil target will relocate against the running kernel. insns are +// modified in place. +func applyRelocations(insns asm.Instructions, targets []*btf.Spec, kmodName string, bo binary.ByteOrder, b *btf.Builder) error { + var relos []*btf.CORERelocation + var reloInsns []*asm.Instruction + iter := insns.Iterate() + for iter.Next() { + if relo := btf.CORERelocationMetadata(iter.Ins); relo != nil { + relos = append(relos, relo) + reloInsns = append(reloInsns, iter.Ins) + } + } + + if len(relos) == 0 { + return nil + } + + if bo == nil { + bo = internal.NativeEndian + } + + if len(targets) == 0 { + kernelTarget, err := btf.LoadKernelSpec() + if err != nil { + return fmt.Errorf("load kernel spec: %w", err) + } + targets = append(targets, kernelTarget) + + if kmodName != "" { + kmodTarget, err := btf.LoadKernelModuleSpec(kmodName) + // Ignore ErrNotExists to cater to kernels which have CONFIG_DEBUG_INFO_BTF_MODULES disabled. + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("load kernel module spec: %w", err) + } + if err == nil { + targets = append(targets, kmodTarget) + } + } + } + + fixups, err := btf.CORERelocate(relos, targets, bo, b.Add) + if err != nil { + return err + } + + for i, fixup := range fixups { + if err := fixup.Apply(reloInsns[i]); err != nil { + return fmt.Errorf("fixup for %s: %w", relos[i], err) + } + } + + return nil +} + +// flattenPrograms resolves bpf-to-bpf calls for a set of programs. +// +// Links all programs in names by modifying their ProgramSpec in progs. +func flattenPrograms(progs map[string]*ProgramSpec, names []string) { + // Pre-calculate all function references. + refs := make(map[*ProgramSpec][]string) + for _, prog := range progs { + refs[prog] = prog.Instructions.FunctionReferences() + } + + // Create a flattened instruction stream, but don't modify progs yet to + // avoid linking multiple times. + flattened := make([]asm.Instructions, 0, len(names)) + for _, name := range names { + flattened = append(flattened, flattenInstructions(name, progs, refs)) + } + + // Finally, assign the flattened instructions. + for i, name := range names { + progs[name].Instructions = flattened[i] + } +} + +// flattenInstructions resolves bpf-to-bpf calls for a single program. +// +// Flattens the instructions of prog by concatenating the instructions of all +// direct and indirect dependencies. +// +// progs contains all referenceable programs, while refs contain the direct +// dependencies of each program. +func flattenInstructions(name string, progs map[string]*ProgramSpec, refs map[*ProgramSpec][]string) asm.Instructions { + prog := progs[name] + + insns := make(asm.Instructions, len(prog.Instructions)) + copy(insns, prog.Instructions) + + // Add all direct references of prog to the list of to be linked programs. + pending := make([]string, len(refs[prog])) + copy(pending, refs[prog]) + + // All references for which we've appended instructions. + linked := make(map[string]bool) + + // Iterate all pending references. We can't use a range since pending is + // modified in the body below. + for len(pending) > 0 { + var ref string + ref, pending = pending[0], pending[1:] + + if linked[ref] { + // We've already linked this ref, don't append instructions again. + continue + } + + progRef := progs[ref] + if progRef == nil { + // We don't have instructions that go with this reference. This + // happens when calling extern functions. + continue + } + + insns = append(insns, progRef.Instructions...) + linked[ref] = true + + // Make sure we link indirect references. + pending = append(pending, refs[progRef]...) + } + + return insns +} + +// fixupAndValidate is called by the ELF reader right before marshaling the +// instruction stream. It performs last-minute adjustments to the program and +// runs some sanity checks before sending it off to the kernel. +func fixupAndValidate(insns asm.Instructions) error { + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + + // Map load was tagged with a Reference, but does not contain a Map pointer. + needsMap := ins.Reference() != "" || ins.Metadata.Get(kconfigMetaKey{}) != nil + if ins.IsLoadFromMap() && needsMap && ins.Map() == nil { + return fmt.Errorf("instruction %d: %w", iter.Index, asm.ErrUnsatisfiedMapReference) + } + + fixupProbeReadKernel(ins) + } + + return nil +} + +// POISON_CALL_KFUNC_BASE in libbpf. +// https://github.com/libbpf/libbpf/blob/2778cbce609aa1e2747a69349f7f46a2f94f0522/src/libbpf.c#L5767 +const kfuncCallPoisonBase = 2002000000 + +// fixupKfuncs loops over all instructions in search for kfunc calls. +// If at least one is found, the current kernels BTF and module BTFis are searched to set Instruction.Constant +// and Instruction.Offset to the correct values. +func fixupKfuncs(insns asm.Instructions) (_ handles, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + if metadata := ins.Metadata.Get(kfuncMetaKey{}); metadata != nil { + goto fixups + } + } + + return nil, nil + +fixups: + // only load the kernel spec if we found at least one kfunc call + kernelSpec, err := btf.LoadKernelSpec() + if err != nil { + return nil, err + } + + fdArray := make(handles, 0) + defer closeOnError(&fdArray) + + for { + ins := iter.Ins + + metadata := ins.Metadata.Get(kfuncMetaKey{}) + if metadata == nil { + if !iter.Next() { + // break loop if this was the last instruction in the stream. + break + } + continue + } + + // check meta, if no meta return err + kfm, _ := metadata.(*kfuncMeta) + if kfm == nil { + return nil, fmt.Errorf("kfuncMetaKey doesn't contain kfuncMeta") + } + + target := btf.Type((*btf.Func)(nil)) + spec, module, err := findTargetInKernel(kernelSpec, kfm.Func.Name, &target) + if kfm.Binding == elf.STB_WEAK && errors.Is(err, btf.ErrNotFound) { + if ins.IsKfuncCall() { + // If the kfunc call is weak and not found, poison the call. Use a recognizable constant + // to make it easier to debug. And set src to zero so the verifier doesn't complain + // about the invalid imm/offset values before dead-code elimination. + ins.Constant = kfuncCallPoisonBase + ins.Src = 0 + } else if ins.OpCode.IsDWordLoad() { + // If the kfunc DWordLoad is weak and not found, set its address to 0. + ins.Constant = 0 + ins.Src = 0 + } else { + return nil, fmt.Errorf("only kfunc calls and dword loads may have kfunc metadata") + } + + iter.Next() + continue + } + // Error on non-weak kfunc not found. + if errors.Is(err, btf.ErrNotFound) { + return nil, fmt.Errorf("kfunc %q: %w", kfm.Func.Name, ErrNotSupported) + } + if err != nil { + return nil, err + } + + idx, err := fdArray.add(module) + if err != nil { + return nil, err + } + + if err := btf.CheckTypeCompatibility(kfm.Func.Type, target.(*btf.Func).Type); err != nil { + return nil, &incompatibleKfuncError{kfm.Func.Name, err} + } + + id, err := spec.TypeID(target) + if err != nil { + return nil, err + } + + ins.Constant = int64(id) + ins.Offset = int16(idx) + + if !iter.Next() { + break + } + } + + return fdArray, nil +} + +type incompatibleKfuncError struct { + name string + err error +} + +func (ike *incompatibleKfuncError) Error() string { + return fmt.Sprintf("kfunc %q: %s", ike.name, ike.err) +} + +// fixupProbeReadKernel replaces calls to bpf_probe_read_{kernel,user}(_str) +// with bpf_probe_read(_str) on kernels that don't support it yet. +func fixupProbeReadKernel(ins *asm.Instruction) { + if !ins.IsBuiltinCall() { + return + } + + // Kernel supports bpf_probe_read_kernel, nothing to do. + if haveProbeReadKernel() == nil { + return + } + + switch asm.BuiltinFunc(ins.Constant) { + case asm.FnProbeReadKernel, asm.FnProbeReadUser: + ins.Constant = int64(asm.FnProbeRead) + case asm.FnProbeReadKernelStr, asm.FnProbeReadUserStr: + ins.Constant = int64(asm.FnProbeReadStr) + } +} + +// resolveKconfigReferences creates and populates a .kconfig map if necessary. +// +// Returns a nil Map and no error if no references exist. +func resolveKconfigReferences(insns asm.Instructions) (_ *Map, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + var spec *MapSpec + iter := insns.Iterate() + for iter.Next() { + meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta) + if meta != nil { + spec = meta.Map + break + } + } + + if spec == nil { + return nil, nil + } + + cpy := spec.Copy() + if err := resolveKconfig(cpy); err != nil { + return nil, err + } + + kconfig, err := NewMap(cpy) + if err != nil { + return nil, err + } + defer closeOnError(kconfig) + + // Resolve all instructions which load from .kconfig map with actual map + // and offset inside it. + iter = insns.Iterate() + for iter.Next() { + meta, _ := iter.Ins.Metadata.Get(kconfigMetaKey{}).(*kconfigMeta) + if meta == nil { + continue + } + + if meta.Map != spec { + return nil, fmt.Errorf("instruction %d: reference to multiple .kconfig maps is not allowed", iter.Index) + } + + if err := iter.Ins.AssociateMap(kconfig); err != nil { + return nil, fmt.Errorf("instruction %d: %w", iter.Index, err) + } + + // Encode a map read at the offset of the var in the datasec. + iter.Ins.Constant = int64(uint64(meta.Offset) << 32) + iter.Ins.Metadata.Set(kconfigMetaKey{}, nil) + } + + return kconfig, nil +} + +func resolveKsymReferences(insns asm.Instructions) error { + var missing []string + + iter := insns.Iterate() + for iter.Next() { + ins := iter.Ins + meta, _ := ins.Metadata.Get(ksymMetaKey{}).(*ksymMeta) + if meta == nil { + continue + } + + addr, err := kallsyms.Address(meta.Name) + if err != nil { + return fmt.Errorf("resolve ksym %s: %w", meta.Name, err) + } + if addr != 0 { + ins.Constant = int64(addr) + continue + } + + if meta.Binding == elf.STB_WEAK { + // A weak ksym variable in eBPF C means its resolution is optional. + // Set a zero constant explicitly for clarity. + ins.Constant = 0 + continue + } + + if !slices.Contains(missing, meta.Name) { + missing = append(missing, meta.Name) + } + } + + if len(missing) > 0 { + return fmt.Errorf("kernel is missing symbol: %s", strings.Join(missing, ",")) + } + + return nil +} diff --git a/vendor/github.com/cilium/ebpf/map.go b/vendor/github.com/cilium/ebpf/map.go new file mode 100644 index 0000000000..e5d8bd7809 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/map.go @@ -0,0 +1,1750 @@ +package ebpf + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "os" + "path/filepath" + "reflect" + "slices" + "strings" + "sync" + "time" + "unsafe" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" + "github.com/cilium/ebpf/internal/unix" +) + +// Errors returned by Map and MapIterator methods. +var ( + ErrKeyNotExist = errors.New("key does not exist") + ErrKeyExist = errors.New("key already exists") + ErrIterationAborted = errors.New("iteration aborted") + ErrMapIncompatible = errors.New("map spec is incompatible with existing map") + errMapNoBTFValue = errors.New("map spec does not contain a BTF Value") + + // pre-allocating these errors here since they may get called in hot code paths + // and cause unnecessary memory allocations + errMapLookupKeyNotExist = fmt.Errorf("lookup: %w", sysErrKeyNotExist) +) + +// MapOptions control loading a map into the kernel. +type MapOptions struct { + // The base path to pin maps in if requested via PinByName. + // Existing maps will be re-used if they are compatible, otherwise an + // error is returned. + PinPath string + LoadPinOptions LoadPinOptions +} + +// MapID represents the unique ID of an eBPF map +type MapID uint32 + +// MapSpec defines a Map. +type MapSpec struct { + // Name is passed to the kernel as a debug aid. Must only contain + // alpha numeric and '_' characters. + Name string + Type MapType + KeySize uint32 + ValueSize uint32 + MaxEntries uint32 + + // Flags is passed to the kernel and specifies additional map + // creation attributes. + Flags uint32 + + // Automatically pin and load a map from MapOptions.PinPath. + // Generates an error if an existing pinned map is incompatible with the MapSpec. + Pinning PinType + + // Specify numa node during map creation + // (effective only if sys.BPF_F_NUMA_NODE flag is set, + // which can be imported from golang.org/x/sys/unix) + NumaNode uint32 + + // The initial contents of the map. May be nil. + Contents []MapKV + + // InnerMap is used as a template for ArrayOfMaps and HashOfMaps + InnerMap *MapSpec + + // Extra trailing bytes found in the ELF map definition when using structs + // larger than libbpf's bpf_map_def. nil if no trailing bytes were present. + // Must be nil or empty before instantiating the MapSpec into a Map. + Extra *bytes.Reader + + // The key and value type of this map. May be nil. + Key, Value btf.Type +} + +func (ms *MapSpec) String() string { + return fmt.Sprintf("%s(keySize=%d, valueSize=%d, maxEntries=%d, flags=%d)", ms.Type, ms.KeySize, ms.ValueSize, ms.MaxEntries, ms.Flags) +} + +// Copy returns a copy of the spec. +// +// MapSpec.Contents is a shallow copy. +func (ms *MapSpec) Copy() *MapSpec { + if ms == nil { + return nil + } + + cpy := *ms + cpy.Contents = slices.Clone(cpy.Contents) + cpy.Key = btf.Copy(cpy.Key) + cpy.Value = btf.Copy(cpy.Value) + + if cpy.InnerMap == ms { + cpy.InnerMap = &cpy + } else { + cpy.InnerMap = ms.InnerMap.Copy() + } + + if cpy.Extra != nil { + extra := *cpy.Extra + cpy.Extra = &extra + } + + return &cpy +} + +// fixupMagicFields fills fields of MapSpec which are usually +// left empty in ELF or which depend on runtime information. +// +// The method doesn't modify Spec, instead returning a copy. +// The copy is only performed if fixups are necessary, so callers mustn't mutate +// the returned spec. +func (spec *MapSpec) fixupMagicFields() (*MapSpec, error) { + switch spec.Type { + case ArrayOfMaps, HashOfMaps: + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for map of map") + } + + spec = spec.Copy() + spec.ValueSize = 4 + + case PerfEventArray: + if spec.KeySize != 0 && spec.KeySize != 4 { + return nil, errors.New("KeySize must be zero or four for perf event array") + } + + if spec.ValueSize != 0 && spec.ValueSize != 4 { + return nil, errors.New("ValueSize must be zero or four for perf event array") + } + + spec = spec.Copy() + spec.KeySize = 4 + spec.ValueSize = 4 + + n, err := PossibleCPU() + if err != nil { + return nil, fmt.Errorf("fixup perf event array: %w", err) + } + + if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n { + // MaxEntries should be zero most of the time, but there is code + // out there which hardcodes large constants. Clamp the number + // of entries to the number of CPUs at most. Allow creating maps with + // less than n items since some kernel selftests relied on this + // behaviour in the past. + spec.MaxEntries = n + } + + case CPUMap: + n, err := PossibleCPU() + if err != nil { + return nil, fmt.Errorf("fixup cpu map: %w", err) + } + + if n := uint32(n); spec.MaxEntries == 0 || spec.MaxEntries > n { + // Perform clamping similar to PerfEventArray. + spec.MaxEntries = n + } + } + + return spec, nil +} + +// dataSection returns the contents and BTF Datasec descriptor of the spec. +func (ms *MapSpec) dataSection() ([]byte, *btf.Datasec, error) { + if ms.Value == nil { + return nil, nil, errMapNoBTFValue + } + + ds, ok := ms.Value.(*btf.Datasec) + if !ok { + return nil, nil, fmt.Errorf("map value BTF is a %T, not a *btf.Datasec", ms.Value) + } + + if n := len(ms.Contents); n != 1 { + return nil, nil, fmt.Errorf("expected one key, found %d", n) + } + + kv := ms.Contents[0] + value, ok := kv.Value.([]byte) + if !ok { + return nil, nil, fmt.Errorf("value at first map key is %T, not []byte", kv.Value) + } + + return value, ds, nil +} + +func (ms *MapSpec) readOnly() bool { + return (ms.Flags & sys.BPF_F_RDONLY_PROG) > 0 +} + +func (ms *MapSpec) writeOnly() bool { + return (ms.Flags & sys.BPF_F_WRONLY_PROG) > 0 +} + +// MapKV is used to initialize the contents of a Map. +type MapKV struct { + Key interface{} + Value interface{} +} + +// Compatible returns nil if an existing map may be used instead of creating +// one from the spec. +// +// Returns an error wrapping [ErrMapIncompatible] otherwise. +func (ms *MapSpec) Compatible(m *Map) error { + ms, err := ms.fixupMagicFields() + if err != nil { + return err + } + + diffs := []string{} + if m.typ != ms.Type { + diffs = append(diffs, fmt.Sprintf("Type: %s changed to %s", m.typ, ms.Type)) + } + if m.keySize != ms.KeySize { + diffs = append(diffs, fmt.Sprintf("KeySize: %d changed to %d", m.keySize, ms.KeySize)) + } + if m.valueSize != ms.ValueSize { + diffs = append(diffs, fmt.Sprintf("ValueSize: %d changed to %d", m.valueSize, ms.ValueSize)) + } + if m.maxEntries != ms.MaxEntries { + diffs = append(diffs, fmt.Sprintf("MaxEntries: %d changed to %d", m.maxEntries, ms.MaxEntries)) + } + + // BPF_F_RDONLY_PROG is set unconditionally for devmaps. Explicitly allow this + // mismatch. + if !((ms.Type == DevMap || ms.Type == DevMapHash) && m.flags^ms.Flags == sys.BPF_F_RDONLY_PROG) && + m.flags != ms.Flags { + diffs = append(diffs, fmt.Sprintf("Flags: %d changed to %d", m.flags, ms.Flags)) + } + + if len(diffs) == 0 { + return nil + } + + return fmt.Errorf("%s: %w", strings.Join(diffs, ", "), ErrMapIncompatible) +} + +// Map represents a Map file descriptor. +// +// It is not safe to close a map which is used by other goroutines. +// +// Methods which take interface{} arguments by default encode +// them using binary.Read/Write in the machine's native endianness. +// +// Implement encoding.BinaryMarshaler or encoding.BinaryUnmarshaler +// if you require custom encoding. +type Map struct { + name string + fd *sys.FD + typ MapType + keySize uint32 + valueSize uint32 + maxEntries uint32 + flags uint32 + pinnedPath string + // Per CPU maps return values larger than the size in the spec + fullValueSize int + + memory *Memory +} + +// NewMapFromFD creates a map from a raw fd. +// +// You should not use fd after calling this function. +func NewMapFromFD(fd int) (*Map, error) { + f, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return newMapFromFD(f) +} + +func newMapFromFD(fd *sys.FD) (*Map, error) { + info, err := newMapInfoFromFd(fd) + if err != nil { + fd.Close() + return nil, fmt.Errorf("get map info: %w", err) + } + + return newMap(fd, info.Name, info.Type, info.KeySize, info.ValueSize, info.MaxEntries, info.Flags) +} + +// NewMap creates a new Map. +// +// It's equivalent to calling NewMapWithOptions with default options. +func NewMap(spec *MapSpec) (*Map, error) { + return NewMapWithOptions(spec, MapOptions{}) +} + +// NewMapWithOptions creates a new Map. +// +// Creating a map for the first time will perform feature detection +// by creating small, temporary maps. +// +// The caller is responsible for ensuring the process' rlimit is set +// sufficiently high for locking memory during map creation. This can be done +// by calling rlimit.RemoveMemlock() prior to calling NewMapWithOptions. +// +// May return an error wrapping ErrMapIncompatible. +func NewMapWithOptions(spec *MapSpec, opts MapOptions) (*Map, error) { + m, err := newMapWithOptions(spec, opts) + if err != nil { + return nil, fmt.Errorf("creating map: %w", err) + } + + if err := m.finalize(spec); err != nil { + m.Close() + return nil, fmt.Errorf("populating map: %w", err) + } + + return m, nil +} + +func newMapWithOptions(spec *MapSpec, opts MapOptions) (_ *Map, err error) { + closeOnError := func(c io.Closer) { + if err != nil { + c.Close() + } + } + + switch spec.Pinning { + case PinByName: + if spec.Name == "" { + return nil, fmt.Errorf("pin by name: missing Name") + } + + if opts.PinPath == "" { + return nil, fmt.Errorf("pin by name: missing MapOptions.PinPath") + } + + path := filepath.Join(opts.PinPath, spec.Name) + m, err := LoadPinnedMap(path, &opts.LoadPinOptions) + if errors.Is(err, unix.ENOENT) { + break + } + if err != nil { + return nil, fmt.Errorf("load pinned map: %w", err) + } + defer closeOnError(m) + + if err := spec.Compatible(m); err != nil { + return nil, fmt.Errorf("use pinned map %s: %w", spec.Name, err) + } + + return m, nil + + case PinNone: + // Nothing to do here + + default: + return nil, fmt.Errorf("pin type %d: %w", int(spec.Pinning), ErrNotSupported) + } + + var innerFd *sys.FD + if spec.Type == ArrayOfMaps || spec.Type == HashOfMaps { + if spec.InnerMap == nil { + return nil, fmt.Errorf("%s requires InnerMap", spec.Type) + } + + if spec.InnerMap.Pinning != PinNone { + return nil, errors.New("inner maps cannot be pinned") + } + + template, err := spec.InnerMap.createMap(nil) + if err != nil { + return nil, fmt.Errorf("inner map: %w", err) + } + defer template.Close() + + // Intentionally skip populating and freezing (finalizing) + // the inner map template since it will be removed shortly. + + innerFd = template.fd + } + + m, err := spec.createMap(innerFd) + if err != nil { + return nil, err + } + defer closeOnError(m) + + if spec.Pinning == PinByName { + path := filepath.Join(opts.PinPath, spec.Name) + if err := m.Pin(path); err != nil { + return nil, fmt.Errorf("pin map to %s: %w", path, err) + } + } + + return m, nil +} + +// Memory returns a memory-mapped region for the Map. The Map must have been +// created with the BPF_F_MMAPABLE flag. Repeated calls to Memory return the +// same mapping. Callers are responsible for coordinating access to Memory. +func (m *Map) Memory() (*Memory, error) { + if m.memory != nil { + return m.memory, nil + } + + if m.flags&sys.BPF_F_MMAPABLE == 0 { + return nil, fmt.Errorf("Map was not created with the BPF_F_MMAPABLE flag: %w", ErrNotSupported) + } + + size, err := m.memorySize() + if err != nil { + return nil, err + } + + mm, err := newMemory(m.FD(), size) + if err != nil { + return nil, fmt.Errorf("creating new Memory: %w", err) + } + + m.memory = mm + + return mm, nil +} + +func (m *Map) memorySize() (int, error) { + switch m.Type() { + case Array: + // In Arrays, values are always laid out on 8-byte boundaries regardless of + // architecture. Multiply by MaxEntries and align the result to the host's + // page size. + size := int(internal.Align(m.ValueSize(), 8) * m.MaxEntries()) + size = internal.Align(size, os.Getpagesize()) + return size, nil + case Arena: + // For Arenas, MaxEntries denotes the maximum number of pages available to + // the arena. + return int(m.MaxEntries()) * os.Getpagesize(), nil + } + + return 0, fmt.Errorf("determine memory size of map type %s: %w", m.Type(), ErrNotSupported) +} + +// createMap validates the spec's properties and creates the map in the kernel +// using the given opts. It does not populate or freeze the map. +func (spec *MapSpec) createMap(inner *sys.FD) (_ *Map, err error) { + closeOnError := func(closer io.Closer) { + if err != nil { + closer.Close() + } + } + + // Kernels 4.13 through 5.4 used a struct bpf_map_def that contained + // additional 'inner_map_idx' and later 'numa_node' fields. + // In order to support loading these definitions, tolerate the presence of + // extra bytes, but require them to be zeroes. + if spec.Extra != nil { + if _, err := io.Copy(internal.DiscardZeroes{}, spec.Extra); err != nil { + return nil, errors.New("extra contains unhandled non-zero bytes, drain before creating map") + } + } + + spec, err = spec.fixupMagicFields() + if err != nil { + return nil, err + } + + attr := sys.MapCreateAttr{ + MapType: sys.MapType(spec.Type), + KeySize: spec.KeySize, + ValueSize: spec.ValueSize, + MaxEntries: spec.MaxEntries, + MapFlags: spec.Flags, + NumaNode: spec.NumaNode, + } + + if inner != nil { + attr.InnerMapFd = inner.Uint() + } + + if haveObjName() == nil { + attr.MapName = sys.NewObjName(spec.Name) + } + + if spec.Key != nil || spec.Value != nil { + handle, keyTypeID, valueTypeID, err := btf.MarshalMapKV(spec.Key, spec.Value) + if err != nil && !errors.Is(err, btf.ErrNotSupported) { + return nil, fmt.Errorf("load BTF: %w", err) + } + + if handle != nil { + defer handle.Close() + + // Use BTF k/v during map creation. + attr.BtfFd = uint32(handle.FD()) + attr.BtfKeyTypeId = keyTypeID + attr.BtfValueTypeId = valueTypeID + } + } + + fd, err := sys.MapCreate(&attr) + + // Some map types don't support BTF k/v in earlier kernel versions. + // Remove BTF metadata and retry map creation. + if (errors.Is(err, sys.ENOTSUPP) || errors.Is(err, unix.EINVAL)) && attr.BtfFd != 0 { + attr.BtfFd, attr.BtfKeyTypeId, attr.BtfValueTypeId = 0, 0, 0 + fd, err = sys.MapCreate(&attr) + } + if err != nil { + return nil, handleMapCreateError(attr, spec, err) + } + + defer closeOnError(fd) + m, err := newMap(fd, spec.Name, spec.Type, spec.KeySize, spec.ValueSize, spec.MaxEntries, spec.Flags) + if err != nil { + return nil, fmt.Errorf("map create: %w", err) + } + return m, nil +} + +func handleMapCreateError(attr sys.MapCreateAttr, spec *MapSpec, err error) error { + if errors.Is(err, unix.EPERM) { + return fmt.Errorf("map create: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } + if errors.Is(err, unix.EINVAL) && spec.MaxEntries == 0 { + return fmt.Errorf("map create: %w (MaxEntries may be incorrectly set to zero)", err) + } + if errors.Is(err, unix.EINVAL) && spec.Type == UnspecifiedMap { + return fmt.Errorf("map create: cannot use type %s", UnspecifiedMap) + } + if errors.Is(err, unix.EINVAL) && spec.Flags&sys.BPF_F_NO_PREALLOC > 0 { + return fmt.Errorf("map create: %w (noPrealloc flag may be incompatible with map type %s)", err, spec.Type) + } + + if spec.Type.canStoreMap() { + if haveFeatErr := haveNestedMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + + if spec.readOnly() || spec.writeOnly() { + if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&sys.BPF_F_MMAPABLE > 0 { + if haveFeatErr := haveMmapableMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&sys.BPF_F_INNER_MAP > 0 { + if haveFeatErr := haveInnerMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + if spec.Flags&sys.BPF_F_NO_PREALLOC > 0 { + if haveFeatErr := haveNoPreallocMaps(); haveFeatErr != nil { + return fmt.Errorf("map create: %w", haveFeatErr) + } + } + // BPF_MAP_TYPE_RINGBUF's max_entries must be a power-of-2 multiple of kernel's page size. + if errors.Is(err, unix.EINVAL) && + (attr.MapType == sys.BPF_MAP_TYPE_RINGBUF || attr.MapType == sys.BPF_MAP_TYPE_USER_RINGBUF) { + pageSize := uint32(os.Getpagesize()) + maxEntries := attr.MaxEntries + if maxEntries%pageSize != 0 || !internal.IsPow(maxEntries) { + return fmt.Errorf("map create: %w (ring map size %d not a multiple of page size %d)", err, maxEntries, pageSize) + } + } + + return fmt.Errorf("map create: %w", err) +} + +// newMap allocates and returns a new Map structure. +// Sets the fullValueSize on per-CPU maps. +func newMap(fd *sys.FD, name string, typ MapType, keySize, valueSize, maxEntries, flags uint32) (*Map, error) { + m := &Map{ + name, + fd, + typ, + keySize, + valueSize, + maxEntries, + flags, + "", + int(valueSize), + nil, + } + + if !typ.hasPerCPUValue() { + return m, nil + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + + m.fullValueSize = int(internal.Align(valueSize, 8)) * possibleCPUs + return m, nil +} + +func (m *Map) String() string { + if m.name != "" { + return fmt.Sprintf("%s(%s)#%v", m.typ, m.name, m.fd) + } + return fmt.Sprintf("%s#%v", m.typ, m.fd) +} + +// Type returns the underlying type of the map. +func (m *Map) Type() MapType { + return m.typ +} + +// KeySize returns the size of the map key in bytes. +func (m *Map) KeySize() uint32 { + return m.keySize +} + +// ValueSize returns the size of the map value in bytes. +func (m *Map) ValueSize() uint32 { + return m.valueSize +} + +// MaxEntries returns the maximum number of elements the map can hold. +func (m *Map) MaxEntries() uint32 { + return m.maxEntries +} + +// Flags returns the flags of the map. +func (m *Map) Flags() uint32 { + return m.flags +} + +// Info returns metadata about the map. This was first introduced in Linux 4.5, +// but newer kernels support more MapInfo fields with the introduction of more +// features. See [MapInfo] and its methods for more details. +// +// Returns an error wrapping ErrNotSupported if the kernel supports neither +// BPF_OBJ_GET_INFO_BY_FD nor reading map information from /proc/self/fdinfo. +func (m *Map) Info() (*MapInfo, error) { + return newMapInfoFromFd(m.fd) +} + +// Handle returns a reference to the Map's type information in the kernel. +// +// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// BTF associated with the Map. +func (m *Map) Handle() (*btf.Handle, error) { + info, err := m.Info() + if err != nil { + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("map %s: retrieve BTF ID: %w", m, ErrNotSupported) + } + + return btf.NewHandleFromID(id) +} + +// MapLookupFlags controls the behaviour of the map lookup calls. +type MapLookupFlags uint64 + +// LookupLock look up the value of a spin-locked map. +const LookupLock MapLookupFlags = sys.BPF_F_LOCK + +// Lookup retrieves a value from a Map. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) Lookup(key, valueOut interface{}) error { + return m.LookupWithFlags(key, valueOut, 0) +} + +// LookupWithFlags retrieves a value from a Map with flags. +// +// Passing LookupLock flag will look up the value of a spin-locked +// map without returning the lock. This must be specified if the +// elements contain a spinlock. +// +// Calls Close() on valueOut if it is of type **Map or **Program, +// and *valueOut is not nil. +// +// Returns an error if the key doesn't exist, see ErrKeyNotExist. +func (m *Map) LookupWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + if m.typ.hasPerCPUValue() { + return m.lookupPerCPU(key, valueOut, flags) + } + + valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize) + if err := m.lookup(key, valueBytes.Pointer(), flags); err != nil { + return err + } + + return m.unmarshalValue(valueOut, valueBytes) +} + +// LookupAndDelete retrieves and deletes a value from a Map. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDelete(key, valueOut interface{}) error { + return m.LookupAndDeleteWithFlags(key, valueOut, 0) +} + +// LookupAndDeleteWithFlags retrieves and deletes a value from a Map. +// +// Passing LookupLock flag will look up and delete the value of a spin-locked +// map without returning the lock. This must be specified if the elements +// contain a spinlock. +// +// Returns ErrKeyNotExist if the key doesn't exist. +func (m *Map) LookupAndDeleteWithFlags(key, valueOut interface{}, flags MapLookupFlags) error { + if m.typ.hasPerCPUValue() { + return m.lookupAndDeletePerCPU(key, valueOut, flags) + } + + valueBytes := makeMapSyscallOutput(valueOut, m.fullValueSize) + if err := m.lookupAndDelete(key, valueBytes.Pointer(), flags); err != nil { + return err + } + return m.unmarshalValue(valueOut, valueBytes) +} + +// LookupBytes gets a value from Map. +// +// Returns a nil value if a key doesn't exist. +func (m *Map) LookupBytes(key interface{}) ([]byte, error) { + valueBytes := make([]byte, m.fullValueSize) + valuePtr := sys.NewSlicePointer(valueBytes) + + err := m.lookup(key, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return nil, nil + } + + return valueBytes, err +} + +func (m *Map) lookupPerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut) + if err != nil { + return err + } + valueBytes := make([]byte, m.fullValueSize) + if err := m.lookup(key, sys.NewSlicePointer(valueBytes), flags); err != nil { + return err + } + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + +func (m *Map) lookup(key interface{}, valueOut sys.Pointer, flags MapLookupFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valueOut, + Flags: uint64(flags), + } + + if err = sys.MapLookupElem(&attr); err != nil { + if errors.Is(err, unix.ENOENT) { + return errMapLookupKeyNotExist + } + return fmt.Errorf("lookup: %w", wrapMapError(err)) + } + return nil +} + +func (m *Map) lookupAndDeletePerCPU(key, valueOut any, flags MapLookupFlags) error { + slice, err := ensurePerCPUSlice(valueOut) + if err != nil { + return err + } + valueBytes := make([]byte, m.fullValueSize) + if err := m.lookupAndDelete(key, sys.NewSlicePointer(valueBytes), flags); err != nil { + return err + } + return unmarshalPerCPUValue(slice, int(m.valueSize), valueBytes) +} + +// ensurePerCPUSlice allocates a slice for a per-CPU value if necessary. +func ensurePerCPUSlice(sliceOrPtr any) (any, error) { + sliceOrPtrType := reflect.TypeOf(sliceOrPtr) + if sliceOrPtrType.Kind() == reflect.Slice { + // The target is a slice, the caller is responsible for ensuring that + // size is correct. + return sliceOrPtr, nil + } + + slicePtrType := sliceOrPtrType + if slicePtrType.Kind() != reflect.Ptr || slicePtrType.Elem().Kind() != reflect.Slice { + return nil, fmt.Errorf("per-cpu value requires a slice or a pointer to slice") + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + + sliceType := slicePtrType.Elem() + slice := reflect.MakeSlice(sliceType, possibleCPUs, possibleCPUs) + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + reflect.ValueOf(sliceOrPtr).Elem().Set(slice) + if !sliceElemIsPointer { + return slice.Interface(), nil + } + sliceElemType = sliceElemType.Elem() + + for i := 0; i < possibleCPUs; i++ { + newElem := reflect.New(sliceElemType) + slice.Index(i).Set(newElem) + } + + return slice.Interface(), nil +} + +func (m *Map) lookupAndDelete(key any, valuePtr sys.Pointer, flags MapLookupFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapLookupAndDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err := sys.MapLookupAndDeleteElem(&attr); err != nil { + return fmt.Errorf("lookup and delete: %w", wrapMapError(err)) + } + + return nil +} + +// MapUpdateFlags controls the behaviour of the Map.Update call. +// +// The exact semantics depend on the specific MapType. +type MapUpdateFlags uint64 + +const ( + // UpdateAny creates a new element or update an existing one. + UpdateAny MapUpdateFlags = iota + // UpdateNoExist creates a new element. + UpdateNoExist MapUpdateFlags = 1 << (iota - 1) + // UpdateExist updates an existing element. + UpdateExist + // UpdateLock updates elements under bpf_spin_lock. + UpdateLock +) + +// Put replaces or creates a value in map. +// +// It is equivalent to calling Update with UpdateAny. +func (m *Map) Put(key, value interface{}) error { + return m.Update(key, value, UpdateAny) +} + +// Update changes the value of a key. +func (m *Map) Update(key, value any, flags MapUpdateFlags) error { + if m.typ.hasPerCPUValue() { + return m.updatePerCPU(key, value, flags) + } + + valuePtr, err := m.marshalValue(value) + if err != nil { + return fmt.Errorf("marshal value: %w", err) + } + + return m.update(key, valuePtr, flags) +} + +func (m *Map) updatePerCPU(key, value any, flags MapUpdateFlags) error { + valuePtr, err := marshalPerCPUValue(value, int(m.valueSize)) + if err != nil { + return fmt.Errorf("marshal value: %w", err) + } + + return m.update(key, valuePtr, flags) +} + +func (m *Map) update(key any, valuePtr sys.Pointer, flags MapUpdateFlags) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("marshal key: %w", err) + } + + attr := sys.MapUpdateElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + Value: valuePtr, + Flags: uint64(flags), + } + + if err = sys.MapUpdateElem(&attr); err != nil { + return fmt.Errorf("update: %w", wrapMapError(err)) + } + + return nil +} + +// Delete removes a value. +// +// Returns ErrKeyNotExist if the key does not exist. +func (m *Map) Delete(key interface{}) error { + keyPtr, err := m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + + attr := sys.MapDeleteElemAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + } + + if err = sys.MapDeleteElem(&attr); err != nil { + return fmt.Errorf("delete: %w", wrapMapError(err)) + } + return nil +} + +// NextKey finds the key following an initial key. +// +// See NextKeyBytes for details. +// +// Returns ErrKeyNotExist if there is no next key. +func (m *Map) NextKey(key, nextKeyOut interface{}) error { + nextKeyBytes := makeMapSyscallOutput(nextKeyOut, int(m.keySize)) + + if err := m.nextKey(key, nextKeyBytes.Pointer()); err != nil { + return err + } + + if err := nextKeyBytes.Unmarshal(nextKeyOut); err != nil { + return fmt.Errorf("can't unmarshal next key: %w", err) + } + return nil +} + +// NextKeyBytes returns the key following an initial key as a byte slice. +// +// Passing nil will return the first key. +// +// Use Iterate if you want to traverse all entries in the map. +// +// Returns nil if there are no more keys. +func (m *Map) NextKeyBytes(key interface{}) ([]byte, error) { + nextKey := make([]byte, m.keySize) + nextKeyPtr := sys.NewSlicePointer(nextKey) + + err := m.nextKey(key, nextKeyPtr) + if errors.Is(err, ErrKeyNotExist) { + return nil, nil + } + + return nextKey, err +} + +func (m *Map) nextKey(key interface{}, nextKeyOut sys.Pointer) error { + var ( + keyPtr sys.Pointer + err error + ) + + if key != nil { + keyPtr, err = m.marshalKey(key) + if err != nil { + return fmt.Errorf("can't marshal key: %w", err) + } + } + + attr := sys.MapGetNextKeyAttr{ + MapFd: m.fd.Uint(), + Key: keyPtr, + NextKey: nextKeyOut, + } + + if err = sys.MapGetNextKey(&attr); err != nil { + // Kernels 4.4.131 and earlier return EFAULT instead of a pointer to the + // first map element when a nil key pointer is specified. + if key == nil && errors.Is(err, unix.EFAULT) { + var guessKey []byte + guessKey, err = m.guessNonExistentKey() + if err != nil { + return err + } + + // Retry the syscall with a valid non-existing key. + attr.Key = sys.NewSlicePointer(guessKey) + if err = sys.MapGetNextKey(&attr); err == nil { + return nil + } + } + + return fmt.Errorf("next key: %w", wrapMapError(err)) + } + + return nil +} + +var mmapProtectedPage = sync.OnceValues(func() ([]byte, error) { + return unix.Mmap(-1, 0, os.Getpagesize(), unix.PROT_NONE, unix.MAP_ANON|unix.MAP_SHARED) +}) + +// guessNonExistentKey attempts to perform a map lookup that returns ENOENT. +// This is necessary on kernels before 4.4.132, since those don't support +// iterating maps from the start by providing an invalid key pointer. +func (m *Map) guessNonExistentKey() ([]byte, error) { + // Map a protected page and use that as the value pointer. This saves some + // work copying out the value, which we're not interested in. + page, err := mmapProtectedPage() + if err != nil { + return nil, err + } + valuePtr := sys.NewSlicePointer(page) + + randKey := make([]byte, int(m.keySize)) + + for i := 0; i < 4; i++ { + switch i { + // For hash maps, the 0 key is less likely to be occupied. They're often + // used for storing data related to pointers, and their access pattern is + // generally scattered across the keyspace. + case 0: + // An all-0xff key is guaranteed to be out of bounds of any array, since + // those have a fixed key size of 4 bytes. The only corner case being + // arrays with 2^32 max entries, but those are prohibitively expensive + // in many environments. + case 1: + for r := range randKey { + randKey[r] = 0xff + } + // Inspired by BCC, 0x55 is an alternating binary pattern (0101), so + // is unlikely to be taken. + case 2: + for r := range randKey { + randKey[r] = 0x55 + } + // Last ditch effort, generate a random key. + case 3: + rand.New(rand.NewSource(time.Now().UnixNano())).Read(randKey) + } + + err := m.lookup(randKey, valuePtr, 0) + if errors.Is(err, ErrKeyNotExist) { + return randKey, nil + } + } + + return nil, errors.New("couldn't find non-existing key") +} + +// BatchLookup looks up many elements in a map at once. +// +// "keysOut" and "valuesOut" must be of type slice, a pointer +// to a slice or buffer will not work. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. +// +// ErrKeyNotExist is returned when the batch lookup has reached +// the end of all possible results, even when partial results +// are returned. It should be used to evaluate when lookup is "done". +func (m *Map) BatchLookup(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_BATCH, cursor, keysOut, valuesOut, opts) + if err != nil { + return n, fmt.Errorf("map batch lookup: %w", err) + } + return n, nil +} + +// BatchLookupAndDelete looks up many elements in a map at once, +// +// It then deletes all those elements. +// "keysOut" and "valuesOut" must be of type slice, a pointer +// to a slice or buffer will not work. +// "cursor" is an pointer to an opaque handle. It must be non-nil. Pass +// "cursor" to subsequent calls of this function to continue the batching +// operation in the case of chunking. +// +// Warning: This API is not very safe to use as the kernel implementation for +// batching relies on the user to be aware of subtle details with regarding to +// different map type implementations. +// +// ErrKeyNotExist is returned when the batch lookup has reached +// the end of all possible results, even when partial results +// are returned. It should be used to evaluate when lookup is "done". +func (m *Map) BatchLookupAndDelete(cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + n, err := m.batchLookup(sys.BPF_MAP_LOOKUP_AND_DELETE_BATCH, cursor, keysOut, valuesOut, opts) + if err != nil { + return n, fmt.Errorf("map batch lookup and delete: %w", err) + } + return n, nil +} + +// MapBatchCursor represents a starting point for a batch operation. +type MapBatchCursor struct { + m *Map + opaque []byte +} + +func (m *Map) batchLookup(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + if m.typ.hasPerCPUValue() { + return m.batchLookupPerCPU(cmd, cursor, keysOut, valuesOut, opts) + } + + count, err := batchCount(keysOut, valuesOut) + if err != nil { + return 0, err + } + + valueBuf := sysenc.SyscallOutput(valuesOut, count*int(m.fullValueSize)) + + n, err := m.batchLookupCmd(cmd, cursor, count, keysOut, valueBuf.Pointer(), opts) + if errors.Is(err, unix.ENOSPC) { + // Hash tables return ENOSPC when the size of the batch is smaller than + // any bucket. + return n, fmt.Errorf("%w (batch size too small?)", err) + } else if err != nil { + return n, err + } + + err = valueBuf.Unmarshal(valuesOut) + if err != nil { + return 0, err + } + + return n, nil +} + +func (m *Map) batchLookupPerCPU(cmd sys.Cmd, cursor *MapBatchCursor, keysOut, valuesOut interface{}, opts *BatchOptions) (int, error) { + count, err := sliceLen(keysOut) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf := make([]byte, count*int(m.fullValueSize)) + valuePtr := sys.NewSlicePointer(valueBuf) + + n, sysErr := m.batchLookupCmd(cmd, cursor, count, keysOut, valuePtr, opts) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, err + } + + err = unmarshalBatchPerCPUValue(valuesOut, count, int(m.valueSize), valueBuf) + if err != nil { + return 0, err + } + + return n, sysErr +} + +func (m *Map) batchLookupCmd(cmd sys.Cmd, cursor *MapBatchCursor, count int, keysOut any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + cursorLen := int(m.keySize) + if cursorLen < 4 { + // * generic_map_lookup_batch requires that batch_out is key_size bytes. + // This is used by array and LPM maps. + // + // * __htab_map_lookup_and_delete_batch requires u32. This is used by the + // various hash maps. + // + // Use a minimum of 4 bytes to avoid having to distinguish between the two. + cursorLen = 4 + } + + inBatch := cursor.opaque + if inBatch == nil { + // This is the first lookup, allocate a buffer to hold the cursor. + cursor.opaque = make([]byte, cursorLen) + cursor.m = m + } else if cursor.m != m { + // Prevent reuse of a cursor across maps. First, it's unlikely to work. + // Second, the maps may require different cursorLen and cursor.opaque + // may therefore be too short. This could lead to the kernel clobbering + // user space memory. + return 0, errors.New("a cursor may not be reused across maps") + } + + if err := haveBatchAPI(); err != nil { + return 0, err + } + + keyBuf := sysenc.SyscallOutput(keysOut, count*int(m.keySize)) + + attr := sys.MapLookupBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyBuf.Pointer(), + Values: valuePtr, + Count: uint32(count), + InBatch: sys.NewSlicePointer(inBatch), + OutBatch: sys.NewSlicePointer(cursor.opaque), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + _, sysErr := sys.BPF(cmd, unsafe.Pointer(&attr), unsafe.Sizeof(attr)) + sysErr = wrapMapError(sysErr) + if sysErr != nil && !errors.Is(sysErr, unix.ENOENT) { + return 0, sysErr + } + + if err := keyBuf.Unmarshal(keysOut); err != nil { + return 0, err + } + + return int(attr.Count), sysErr +} + +// BatchUpdate updates the map with multiple keys and values +// simultaneously. +// "keys" and "values" must be of type slice, a pointer +// to a slice or buffer will not work. +func (m *Map) BatchUpdate(keys, values interface{}, opts *BatchOptions) (int, error) { + if m.typ.hasPerCPUValue() { + return m.batchUpdatePerCPU(keys, values, opts) + } + + count, err := batchCount(keys, values) + if err != nil { + return 0, err + } + + valuePtr, err := marshalMapSyscallInput(values, count*int(m.valueSize)) + if err != nil { + return 0, err + } + + return m.batchUpdate(count, keys, valuePtr, opts) +} + +func (m *Map) batchUpdate(count int, keys any, valuePtr sys.Pointer, opts *BatchOptions) (int, error) { + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) + if err != nil { + return 0, err + } + + attr := sys.MapUpdateBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Values: valuePtr, + Count: uint32(count), + } + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + err = sys.MapUpdateBatch(&attr) + if err != nil { + if haveFeatErr := haveBatchAPI(); haveFeatErr != nil { + return 0, haveFeatErr + } + return int(attr.Count), fmt.Errorf("batch update: %w", wrapMapError(err)) + } + + return int(attr.Count), nil +} + +func (m *Map) batchUpdatePerCPU(keys, values any, opts *BatchOptions) (int, error) { + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valueBuf, err := marshalBatchPerCPUValue(values, count, int(m.valueSize)) + if err != nil { + return 0, err + } + + return m.batchUpdate(count, keys, sys.NewSlicePointer(valueBuf), opts) +} + +// BatchDelete batch deletes entries in the map by keys. +// "keys" must be of type slice, a pointer to a slice or buffer will not work. +func (m *Map) BatchDelete(keys interface{}, opts *BatchOptions) (int, error) { + count, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + keyPtr, err := marshalMapSyscallInput(keys, count*int(m.keySize)) + if err != nil { + return 0, fmt.Errorf("cannot marshal keys: %v", err) + } + + attr := sys.MapDeleteBatchAttr{ + MapFd: m.fd.Uint(), + Keys: keyPtr, + Count: uint32(count), + } + + if opts != nil { + attr.ElemFlags = opts.ElemFlags + attr.Flags = opts.Flags + } + + if err = sys.MapDeleteBatch(&attr); err != nil { + if haveFeatErr := haveBatchAPI(); haveFeatErr != nil { + return 0, haveFeatErr + } + return int(attr.Count), fmt.Errorf("batch delete: %w", wrapMapError(err)) + } + + return int(attr.Count), nil +} + +func batchCount(keys, values any) (int, error) { + keysLen, err := sliceLen(keys) + if err != nil { + return 0, fmt.Errorf("keys: %w", err) + } + + valuesLen, err := sliceLen(values) + if err != nil { + return 0, fmt.Errorf("values: %w", err) + } + + if keysLen != valuesLen { + return 0, fmt.Errorf("keys and values must have the same length") + } + + return keysLen, nil +} + +// Iterate traverses a map. +// +// It's safe to create multiple iterators at the same time. +// +// It's not possible to guarantee that all keys in a map will be +// returned if there are concurrent modifications to the map. +func (m *Map) Iterate() *MapIterator { + return newMapIterator(m) +} + +// Close the Map's underlying file descriptor, which could unload the +// Map from the kernel if it is not pinned or in use by a loaded Program. +func (m *Map) Close() error { + if m == nil { + // This makes it easier to clean up when iterating maps + // of maps / programs. + return nil + } + + return m.fd.Close() +} + +// FD gets the file descriptor of the Map. +// +// Calling this function is invalid after Close has been called. +func (m *Map) FD() int { + return m.fd.Int() +} + +// Clone creates a duplicate of the Map. +// +// Closing the duplicate does not affect the original, and vice versa. +// Changes made to the map are reflected by both instances however. +// If the original map was pinned, the cloned map will not be pinned by default. +// +// Cloning a nil Map returns nil. +func (m *Map) Clone() (*Map, error) { + if m == nil { + return nil, nil + } + + dup, err := m.fd.Dup() + if err != nil { + return nil, fmt.Errorf("can't clone map: %w", err) + } + + return &Map{ + m.name, + dup, + m.typ, + m.keySize, + m.valueSize, + m.maxEntries, + m.flags, + "", + m.fullValueSize, + nil, + }, nil +} + +// Pin persists the map on the BPF virtual file system past the lifetime of +// the process that created it . +// +// Calling Pin on a previously pinned map will overwrite the path, except when +// the new path already exists. Re-pinning across filesystems is not supported. +// You can Clone a map to pin it to a different path. +// +// This requires bpffs to be mounted above fileName. +// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd +func (m *Map) Pin(fileName string) error { + if err := sys.Pin(m.pinnedPath, fileName, m.fd); err != nil { + return err + } + m.pinnedPath = fileName + return nil +} + +// Unpin removes the persisted state for the map from the BPF virtual filesystem. +// +// Failed calls to Unpin will not alter the state returned by IsPinned. +// +// Unpinning an unpinned Map returns nil. +func (m *Map) Unpin() error { + if err := sys.Unpin(m.pinnedPath); err != nil { + return err + } + m.pinnedPath = "" + return nil +} + +// IsPinned returns true if the map has a non-empty pinned path. +func (m *Map) IsPinned() bool { + return m.pinnedPath != "" +} + +// Freeze prevents a map to be modified from user space. +// +// It makes no changes to kernel-side restrictions. +func (m *Map) Freeze() error { + attr := sys.MapFreezeAttr{ + MapFd: m.fd.Uint(), + } + + if err := sys.MapFreeze(&attr); err != nil { + if haveFeatErr := haveMapMutabilityModifiers(); haveFeatErr != nil { + return fmt.Errorf("can't freeze map: %w", haveFeatErr) + } + return fmt.Errorf("can't freeze map: %w", err) + } + return nil +} + +// finalize populates the Map according to the Contents specified +// in spec and freezes the Map if requested by spec. +func (m *Map) finalize(spec *MapSpec) error { + for _, kv := range spec.Contents { + if err := m.Put(kv.Key, kv.Value); err != nil { + return fmt.Errorf("putting value: key %v: %w", kv.Key, err) + } + } + + if isConstantDataSection(spec.Name) || isKconfigSection(spec.Name) { + if err := m.Freeze(); err != nil { + return fmt.Errorf("freezing map: %w", err) + } + } + + return nil +} + +func (m *Map) marshalKey(data interface{}) (sys.Pointer, error) { + if data == nil { + if m.keySize == 0 { + // Queues have a key length of zero, so passing nil here is valid. + return sys.NewPointer(nil), nil + } + return sys.Pointer{}, errors.New("can't use nil as key of map") + } + + return marshalMapSyscallInput(data, int(m.keySize)) +} + +func (m *Map) marshalValue(data interface{}) (sys.Pointer, error) { + var ( + buf []byte + err error + ) + + switch value := data.(type) { + case *Map: + if !m.typ.canStoreMap() { + return sys.Pointer{}, fmt.Errorf("can't store map in %s", m.typ) + } + buf, err = marshalMap(value, int(m.valueSize)) + + case *Program: + if !m.typ.canStoreProgram() { + return sys.Pointer{}, fmt.Errorf("can't store program in %s", m.typ) + } + buf, err = marshalProgram(value, int(m.valueSize)) + + default: + return marshalMapSyscallInput(data, int(m.valueSize)) + } + + if err != nil { + return sys.Pointer{}, err + } + + return sys.NewSlicePointer(buf), nil +} + +func (m *Map) unmarshalValue(value any, buf sysenc.Buffer) error { + switch value := value.(type) { + case **Map: + if !m.typ.canStoreMap() { + return fmt.Errorf("can't read a map from %s", m.typ) + } + + other, err := unmarshalMap(buf) + if err != nil { + return err + } + + // The caller might close the map externally, so ignore errors. + _ = (*value).Close() + + *value = other + return nil + + case *Map: + if !m.typ.canStoreMap() { + return fmt.Errorf("can't read a map from %s", m.typ) + } + return errors.New("require pointer to *Map") + + case **Program: + if !m.typ.canStoreProgram() { + return fmt.Errorf("can't read a program from %s", m.typ) + } + + other, err := unmarshalProgram(buf) + if err != nil { + return err + } + + // The caller might close the program externally, so ignore errors. + _ = (*value).Close() + + *value = other + return nil + + case *Program: + if !m.typ.canStoreProgram() { + return fmt.Errorf("can't read a program from %s", m.typ) + } + return errors.New("require pointer to *Program") + } + + return buf.Unmarshal(value) +} + +// LoadPinnedMap opens a Map from a pin (file) on the BPF virtual filesystem. +// +// Requires at least Linux 4.5. +func LoadPinnedMap(fileName string, opts *LoadPinOptions) (*Map, error) { + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, err + } + + if typ != sys.BPF_TYPE_MAP { + _ = fd.Close() + return nil, fmt.Errorf("%s is not a Map", fileName) + } + + m, err := newMapFromFD(fd) + if err == nil { + m.pinnedPath = fileName + } + + return m, err +} + +// unmarshalMap creates a map from a map ID encoded in host endianness. +func unmarshalMap(buf sysenc.Buffer) (*Map, error) { + var id uint32 + if err := buf.Unmarshal(&id); err != nil { + return nil, err + } + return NewMapFromID(MapID(id)) +} + +// marshalMap marshals the fd of a map into a buffer in host endianness. +func marshalMap(m *Map, length int) ([]byte, error) { + if m == nil { + return nil, errors.New("can't marshal a nil Map") + } + + if length != 4 { + return nil, fmt.Errorf("can't marshal map to %d bytes", length) + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, m.fd.Uint()) + return buf, nil +} + +// MapIterator iterates a Map. +// +// See Map.Iterate. +type MapIterator struct { + target *Map + // Temporary storage to avoid allocations in Next(). This is any instead + // of []byte to avoid allocations. + cursor any + count, maxEntries uint32 + done bool + err error +} + +func newMapIterator(target *Map) *MapIterator { + return &MapIterator{ + target: target, + maxEntries: target.maxEntries, + } +} + +// Next decodes the next key and value. +// +// Iterating a hash map from which keys are being deleted is not +// safe. You may see the same key multiple times. Iteration may +// also abort with an error, see IsIterationAborted. +// +// Returns false if there are no more entries. You must check +// the result of Err afterwards. +// +// See Map.Get for further caveats around valueOut. +func (mi *MapIterator) Next(keyOut, valueOut interface{}) bool { + if mi.err != nil || mi.done { + return false + } + + // For array-like maps NextKey returns nil only after maxEntries + // iterations. + for mi.count <= mi.maxEntries { + if mi.cursor == nil { + // Pass nil interface to NextKey to make sure the Map's first key + // is returned. If we pass an uninitialized []byte instead, it'll see a + // non-nil interface and try to marshal it. + mi.cursor = make([]byte, mi.target.keySize) + mi.err = mi.target.NextKey(nil, mi.cursor) + } else { + mi.err = mi.target.NextKey(mi.cursor, mi.cursor) + } + + if errors.Is(mi.err, ErrKeyNotExist) { + mi.done = true + mi.err = nil + return false + } else if mi.err != nil { + mi.err = fmt.Errorf("get next key: %w", mi.err) + return false + } + + mi.count++ + mi.err = mi.target.Lookup(mi.cursor, valueOut) + if errors.Is(mi.err, ErrKeyNotExist) { + // Even though the key should be valid, we couldn't look up + // its value. If we're iterating a hash map this is probably + // because a concurrent delete removed the value before we + // could get it. This means that the next call to NextKeyBytes + // is very likely to restart iteration. + // If we're iterating one of the fd maps like + // ProgramArray it means that a given slot doesn't have + // a valid fd associated. It's OK to continue to the next slot. + continue + } + if mi.err != nil { + mi.err = fmt.Errorf("look up next key: %w", mi.err) + return false + } + + buf := mi.cursor.([]byte) + if ptr, ok := keyOut.(unsafe.Pointer); ok { + copy(unsafe.Slice((*byte)(ptr), len(buf)), buf) + } else { + mi.err = sysenc.Unmarshal(keyOut, buf) + } + + return mi.err == nil + } + + mi.err = fmt.Errorf("%w", ErrIterationAborted) + return false +} + +// Err returns any encountered error. +// +// The method must be called after Next returns nil. +// +// Returns ErrIterationAborted if it wasn't possible to do a full iteration. +func (mi *MapIterator) Err() error { + return mi.err +} + +// MapGetNextID returns the ID of the next eBPF map. +// +// Returns ErrNotExist, if there is no next eBPF map. +func MapGetNextID(startID MapID) (MapID, error) { + attr := &sys.MapGetNextIdAttr{Id: uint32(startID)} + return MapID(attr.NextId), sys.MapGetNextId(attr) +} + +// NewMapFromID returns the map for a given id. +// +// Returns ErrNotExist, if there is no eBPF map with the given id. +func NewMapFromID(id MapID) (*Map, error) { + fd, err := sys.MapGetFdById(&sys.MapGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, err + } + + return newMapFromFD(fd) +} + +// sliceLen returns the length if the value is a slice or an error otherwise. +func sliceLen(slice any) (int, error) { + sliceValue := reflect.ValueOf(slice) + if sliceValue.Kind() != reflect.Slice { + return 0, fmt.Errorf("%T is not a slice", slice) + } + return sliceValue.Len(), nil +} diff --git a/vendor/github.com/cilium/ebpf/marshalers.go b/vendor/github.com/cilium/ebpf/marshalers.go new file mode 100644 index 0000000000..57a0a8e88a --- /dev/null +++ b/vendor/github.com/cilium/ebpf/marshalers.go @@ -0,0 +1,210 @@ +package ebpf + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "slices" + "unsafe" + + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" +) + +// marshalMapSyscallInput converts an arbitrary value into a pointer suitable +// to be passed to the kernel. +// +// As an optimization, it returns the original value if it is an +// unsafe.Pointer. +func marshalMapSyscallInput(data any, length int) (sys.Pointer, error) { + if ptr, ok := data.(unsafe.Pointer); ok { + return sys.NewPointer(ptr), nil + } + + buf, err := sysenc.Marshal(data, length) + if err != nil { + return sys.Pointer{}, err + } + + return buf.Pointer(), nil +} + +func makeMapSyscallOutput(dst any, length int) sysenc.Buffer { + if ptr, ok := dst.(unsafe.Pointer); ok { + return sysenc.UnsafeBuffer(ptr) + } + + _, ok := dst.(encoding.BinaryUnmarshaler) + if ok { + return sysenc.SyscallOutput(nil, length) + } + + return sysenc.SyscallOutput(dst, length) +} + +// appendPerCPUSlice encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +func appendPerCPUSlice(buf []byte, slice any, possibleCPUs, elemLength, alignedElemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, errors.New("per-CPU value requires slice") + } + + sliceValue := reflect.ValueOf(slice) + sliceLen := sliceValue.Len() + if sliceLen > possibleCPUs { + return nil, fmt.Errorf("per-CPU value greater than number of CPUs") + } + + // Grow increases the slice's capacity, _if_necessary_ + buf = slices.Grow(buf, alignedElemLength*possibleCPUs) + for i := 0; i < sliceLen; i++ { + elem := sliceValue.Index(i).Interface() + elemBytes, err := sysenc.Marshal(elem, elemLength) + if err != nil { + return nil, err + } + + buf = elemBytes.AppendTo(buf) + buf = append(buf, make([]byte, alignedElemLength-elemLength)...) + } + + // Ensure buf is zero-padded full size. + buf = append(buf, make([]byte, (possibleCPUs-sliceLen)*alignedElemLength)...) + + return buf, nil +} + +// marshalPerCPUValue encodes a slice containing one value per +// possible CPU into a buffer of bytes. +// +// Values are initialized to zero if the slice has less elements than CPUs. +func marshalPerCPUValue(slice any, elemLength int) (sys.Pointer, error) { + possibleCPUs, err := PossibleCPU() + if err != nil { + return sys.Pointer{}, err + } + + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, alignedElemLength*possibleCPUs) + buf, err = appendPerCPUSlice(buf, slice, possibleCPUs, elemLength, alignedElemLength) + if err != nil { + return sys.Pointer{}, err + } + + return sys.NewSlicePointer(buf), nil +} + +// marshalBatchPerCPUValue encodes a batch-sized slice of slices containing +// one value per possible CPU into a buffer of bytes. +func marshalBatchPerCPUValue(slice any, batchLen, elemLength int) ([]byte, error) { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return nil, fmt.Errorf("batch value requires a slice") + } + sliceValue := reflect.ValueOf(slice) + + possibleCPUs, err := PossibleCPU() + if err != nil { + return nil, err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return nil, fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + batchLen*possibleCPUs, sliceValue.Len()) + } + alignedElemLength := internal.Align(elemLength, 8) + buf := make([]byte, 0, batchLen*alignedElemLength*possibleCPUs) + for i := 0; i < batchLen; i++ { + batch := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + buf, err = appendPerCPUSlice(buf, batch, possibleCPUs, elemLength, alignedElemLength) + if err != nil { + return nil, fmt.Errorf("batch %d: %w", i, err) + } + } + return buf, nil +} + +// unmarshalPerCPUValue decodes a buffer into a slice containing one value per +// possible CPU. +// +// slice must be a literal slice and not a pointer. +func unmarshalPerCPUValue(slice any, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("per-CPU value requires a slice") + } + + possibleCPUs, err := PossibleCPU() + if err != nil { + return err + } + + sliceValue := reflect.ValueOf(slice) + if sliceValue.Len() != possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + possibleCPUs, sliceValue.Len()) + } + + sliceElemType := sliceType.Elem() + sliceElemIsPointer := sliceElemType.Kind() == reflect.Ptr + stride := internal.Align(elemLength, 8) + for i := 0; i < possibleCPUs; i++ { + var elem any + v := sliceValue.Index(i) + if sliceElemIsPointer { + if !v.Elem().CanAddr() { + return fmt.Errorf("per-CPU slice elements cannot be nil") + } + elem = v.Elem().Addr().Interface() + } else { + elem = v.Addr().Interface() + } + err := sysenc.Unmarshal(elem, buf[:elemLength]) + if err != nil { + return fmt.Errorf("cpu %d: %w", i, err) + } + + buf = buf[stride:] + } + return nil +} + +// unmarshalBatchPerCPUValue decodes a buffer into a batch-sized slice +// containing one value per possible CPU. +// +// slice must have length batchLen * PossibleCPUs(). +func unmarshalBatchPerCPUValue(slice any, batchLen, elemLength int, buf []byte) error { + sliceType := reflect.TypeOf(slice) + if sliceType.Kind() != reflect.Slice { + return fmt.Errorf("batch requires a slice") + } + + sliceValue := reflect.ValueOf(slice) + possibleCPUs, err := PossibleCPU() + if err != nil { + return err + } + if sliceValue.Len() != batchLen*possibleCPUs { + return fmt.Errorf("per-CPU slice has incorrect length, expected %d, got %d", + sliceValue.Len(), batchLen*possibleCPUs) + } + + fullValueSize := possibleCPUs * internal.Align(elemLength, 8) + if len(buf) != batchLen*fullValueSize { + return fmt.Errorf("input buffer has incorrect length, expected %d, got %d", + len(buf), batchLen*fullValueSize) + } + + for i := 0; i < batchLen; i++ { + elem := sliceValue.Slice(i*possibleCPUs, (i+1)*possibleCPUs).Interface() + if err := unmarshalPerCPUValue(elem, elemLength, buf[:fullValueSize]); err != nil { + return fmt.Errorf("batch %d: %w", i, err) + } + buf = buf[fullValueSize:] + } + return nil +} diff --git a/vendor/github.com/cilium/ebpf/memory.go b/vendor/github.com/cilium/ebpf/memory.go new file mode 100644 index 0000000000..312c967131 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/memory.go @@ -0,0 +1,145 @@ +package ebpf + +import ( + "errors" + "fmt" + "io" + "runtime" + + "github.com/cilium/ebpf/internal/unix" +) + +// Memory is the building block for accessing the memory of specific bpf map +// types (Array and Arena at the time of writing) without going through the bpf +// syscall interface. +// +// Given the fd of a bpf map created with the BPF_F_MMAPABLE flag, a shared +// 'file'-based memory-mapped region can be allocated in the process' address +// space, exposing the bpf map's memory by simply accessing a memory location. + +var ErrReadOnly = errors.New("resource is read-only") + +// Memory implements accessing a Map's memory without making any syscalls. +// Pay attention to the difference between Go and C struct alignment rules. Use +// [structs.HostLayout] on supported Go versions to help with alignment. +// +// Note on memory coherence: avoid using packed structs in memory shared between +// user space and eBPF C programs. This drops a struct's memory alignment to 1, +// forcing the compiler to use single-byte loads and stores for field accesses. +// This may lead to partially-written data to be observed from user space. +// +// On most architectures, the memmove implementation used by Go's copy() will +// access data in word-sized chunks. If paired with a matching access pattern on +// the eBPF C side (and if using default memory alignment), accessing shared +// memory without atomics or other synchronization primitives should be sound +// for individual values. For accesses beyond a single value, the usual +// concurrent programming rules apply. +type Memory struct { + b []byte + ro bool +} + +func newMemory(fd, size int) (*Memory, error) { + // Typically, maps created with BPF_F_RDONLY_PROG remain writable from user + // space until frozen. As a security precaution, the kernel doesn't allow + // mapping bpf map memory as read-write into user space if the bpf map was + // frozen, or if it was created using the RDONLY_PROG flag. + // + // The user would be able to write to the map after freezing (since the kernel + // can't change the protection mode of an already-mapped page), while the + // verifier assumes the contents to be immutable. + b, err := unix.Mmap(fd, 0, size, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED) + + // If the map is frozen when an rw mapping is requested, expect EPERM. If the + // map was created with BPF_F_RDONLY_PROG, expect EACCES. + var ro bool + if errors.Is(err, unix.EPERM) || errors.Is(err, unix.EACCES) { + ro = true + b, err = unix.Mmap(fd, 0, size, unix.PROT_READ, unix.MAP_SHARED) + } + if err != nil { + return nil, fmt.Errorf("setting up memory-mapped region: %w", err) + } + + mm := &Memory{ + b, + ro, + } + runtime.SetFinalizer(mm, (*Memory).close) + + return mm, nil +} + +func (mm *Memory) close() { + if err := unix.Munmap(mm.b); err != nil { + panic(fmt.Errorf("unmapping memory: %w", err)) + } + mm.b = nil +} + +// Size returns the size of the memory-mapped region in bytes. +func (mm *Memory) Size() int { + return len(mm.b) +} + +// ReadOnly returns true if the memory-mapped region is read-only. +func (mm *Memory) ReadOnly() bool { + return mm.ro +} + +// bounds returns true if an access at off of the given size is within bounds. +func (mm *Memory) bounds(off uint64, size uint64) bool { + return off+size < uint64(len(mm.b)) +} + +// ReadAt implements [io.ReaderAt]. Useful for creating a new [io.OffsetWriter]. +// +// See [Memory] for details around memory coherence. +func (mm *Memory) ReadAt(p []byte, off int64) (int, error) { + if mm.b == nil { + return 0, fmt.Errorf("memory-mapped region closed") + } + + if p == nil { + return 0, fmt.Errorf("input buffer p is nil") + } + + if off < 0 || off >= int64(len(mm.b)) { + return 0, fmt.Errorf("read offset out of range") + } + + n := copy(p, mm.b[off:]) + if n < len(p) { + return n, io.EOF + } + + return n, nil +} + +// WriteAt implements [io.WriterAt]. Useful for creating a new +// [io.SectionReader]. +// +// See [Memory] for details around memory coherence. +func (mm *Memory) WriteAt(p []byte, off int64) (int, error) { + if mm.b == nil { + return 0, fmt.Errorf("memory-mapped region closed") + } + if mm.ro { + return 0, fmt.Errorf("memory-mapped region not writable: %w", ErrReadOnly) + } + + if p == nil { + return 0, fmt.Errorf("output buffer p is nil") + } + + if off < 0 || off >= int64(len(mm.b)) { + return 0, fmt.Errorf("write offset out of range") + } + + n := copy(mm.b[off:], p) + if n < len(p) { + return n, io.EOF + } + + return n, nil +} diff --git a/vendor/github.com/cilium/ebpf/netlify.toml b/vendor/github.com/cilium/ebpf/netlify.toml new file mode 100644 index 0000000000..67c83f3b30 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/netlify.toml @@ -0,0 +1,4 @@ +[build] + base = "docs/" + publish = "site/" + command = "mkdocs build" diff --git a/vendor/github.com/cilium/ebpf/prog.go b/vendor/github.com/cilium/ebpf/prog.go new file mode 100644 index 0000000000..4f3ce43bfa --- /dev/null +++ b/vendor/github.com/cilium/ebpf/prog.go @@ -0,0 +1,1177 @@ +package ebpf + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "path/filepath" + "runtime" + "strings" + "time" + "unsafe" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/kallsyms" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/sysenc" + "github.com/cilium/ebpf/internal/unix" +) + +// ErrNotSupported is returned whenever the kernel doesn't support a feature. +var ErrNotSupported = internal.ErrNotSupported + +// errBadRelocation is returned when the verifier rejects a program due to a +// bad CO-RE relocation. +// +// This error is detected based on heuristics and therefore may not be reliable. +var errBadRelocation = errors.New("bad CO-RE relocation") + +// errUnknownKfunc is returned when the verifier rejects a program due to an +// unknown kfunc. +// +// This error is detected based on heuristics and therefore may not be reliable. +var errUnknownKfunc = errors.New("unknown kfunc") + +// ProgramID represents the unique ID of an eBPF program. +type ProgramID uint32 + +const ( + // Number of bytes to pad the output buffer for BPF_PROG_TEST_RUN. + // This is currently the maximum of spare space allocated for SKB + // and XDP programs, and equal to XDP_PACKET_HEADROOM + NET_IP_ALIGN. + outputPad = 256 + 2 +) + +// minVerifierLogSize is the default number of bytes allocated for the +// verifier log. +const minVerifierLogSize = 64 * 1024 + +// maxVerifierLogSize is the maximum size of verifier log buffer the kernel +// will accept before returning EINVAL. May be increased to MaxUint32 in the +// future, but avoid the unnecessary EINVAL for now. +const maxVerifierLogSize = math.MaxUint32 >> 2 + +// ProgramOptions control loading a program into the kernel. +type ProgramOptions struct { + // Bitmap controlling the detail emitted by the kernel's eBPF verifier log. + // LogLevel-type values can be ORed together to request specific kinds of + // verifier output. See the documentation on [ebpf.LogLevel] for details. + // + // opts.LogLevel = (ebpf.LogLevelBranch | ebpf.LogLevelStats) + // + // If left to its default value, the program will first be loaded without + // verifier output enabled. Upon error, the program load will be repeated + // with LogLevelBranch and the given (or default) LogSize value. + // + // Unless LogDisabled is set, setting this to a non-zero value will enable the verifier + // log, populating the [ebpf.Program.VerifierLog] field on successful loads + // and including detailed verifier errors if the program is rejected. This + // will always allocate an output buffer, but will result in only a single + // attempt at loading the program. + LogLevel LogLevel + + // Starting size of the verifier log buffer. If the verifier log is larger + // than this size, the buffer will be grown to fit the entire log. Leave at + // its default value unless troubleshooting. + LogSizeStart uint32 + + // Disables the verifier log completely, regardless of other options. + LogDisabled bool + + // Type information used for CO-RE relocations. + // + // This is useful in environments where the kernel BTF is not available + // (containers) or where it is in a non-standard location. Defaults to + // use the kernel BTF from a well-known location if nil. + KernelTypes *btf.Spec + + // Type information used for CO-RE relocations of kernel modules, + // indexed by module name. + // + // This is useful in environments where the kernel BTF is not available + // (containers) or where it is in a non-standard location. Defaults to + // use the kernel module BTF from a well-known location if nil. + KernelModuleTypes map[string]*btf.Spec +} + +// ProgramSpec defines a Program. +type ProgramSpec struct { + // Name is passed to the kernel as a debug aid. Must only contain + // alpha numeric and '_' characters. + Name string + + // Type determines at which hook in the kernel a program will run. + Type ProgramType + + // AttachType of the program, needed to differentiate allowed context + // accesses in some newer program types like CGroupSockAddr. + // + // Available on kernels 4.17 and later. + AttachType AttachType + + // Name of a kernel data structure or function to attach to. Its + // interpretation depends on Type and AttachType. + AttachTo string + + // The program to attach to. Must be provided manually. + AttachTarget *Program + + // The name of the ELF section this program originated from. + SectionName string + + Instructions asm.Instructions + + // Flags is passed to the kernel and specifies additional program + // load attributes. + Flags uint32 + + // License of the program. Some helpers are only available if + // the license is deemed compatible with the GPL. + // + // See https://www.kernel.org/doc/html/latest/process/license-rules.html#id1 + License string + + // Version used by Kprobe programs. + // + // Deprecated on kernels 5.0 and later. Leave empty to let the library + // detect this value automatically. + KernelVersion uint32 + + // The byte order this program was compiled for, may be nil. + ByteOrder binary.ByteOrder +} + +// Copy returns a copy of the spec. +func (ps *ProgramSpec) Copy() *ProgramSpec { + if ps == nil { + return nil + } + + cpy := *ps + cpy.Instructions = make(asm.Instructions, len(ps.Instructions)) + copy(cpy.Instructions, ps.Instructions) + return &cpy +} + +// Tag calculates the kernel tag for a series of instructions. +// +// Use asm.Instructions.Tag if you need to calculate for non-native endianness. +func (ps *ProgramSpec) Tag() (string, error) { + return ps.Instructions.Tag(internal.NativeEndian) +} + +// kernelModule returns the kernel module providing the symbol in +// ProgramSpec.AttachTo, if any. Returns an empty string if the symbol is not +// present or not part of a kernel module. +func (ps *ProgramSpec) kernelModule() (string, error) { + if ps.AttachTo == "" && ps.targetsKernelModule() { + return kallsyms.Module(ps.AttachTo) + } + + return "", nil +} + +// targetsKernelModule returns true if the program supports being attached to a +// symbol provided by a kernel module. +func (ps *ProgramSpec) targetsKernelModule() bool { + if ps.AttachTo == "" { + return false + } + + switch ps.Type { + case Tracing: + switch ps.AttachType { + case AttachTraceFEntry, AttachTraceFExit: + return true + } + case Kprobe: + return true + } + + return false +} + +// VerifierError is returned by [NewProgram] and [NewProgramWithOptions] if a +// program is rejected by the verifier. +// +// Use [errors.As] to access the error. +type VerifierError = internal.VerifierError + +// Program represents BPF program loaded into the kernel. +// +// It is not safe to close a Program which is used by other goroutines. +type Program struct { + // Contains the output of the kernel verifier if enabled, + // otherwise it is empty. + VerifierLog string + + fd *sys.FD + name string + pinnedPath string + typ ProgramType +} + +// NewProgram creates a new Program. +// +// See [NewProgramWithOptions] for details. +// +// Returns a [VerifierError] containing the full verifier log if the program is +// rejected by the kernel. +func NewProgram(spec *ProgramSpec) (*Program, error) { + return NewProgramWithOptions(spec, ProgramOptions{}) +} + +// NewProgramWithOptions creates a new Program. +// +// Loading a program for the first time will perform +// feature detection by loading small, temporary programs. +// +// Returns a [VerifierError] containing the full verifier log if the program is +// rejected by the kernel. +func NewProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + if spec == nil { + return nil, errors.New("can't load a program from a nil spec") + } + + prog, err := newProgramWithOptions(spec, opts) + if errors.Is(err, asm.ErrUnsatisfiedMapReference) { + return nil, fmt.Errorf("cannot load program without loading its whole collection: %w", err) + } + return prog, err +} + +var ( + coreBadLoad = []byte(fmt.Sprintf("(18) r10 = 0x%x\n", btf.COREBadRelocationSentinel)) + // This log message was introduced by ebb676daa1a3 ("bpf: Print function name in + // addition to function id") which first appeared in v4.10 and has remained + // unchanged since. + coreBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", btf.COREBadRelocationSentinel)) + kfuncBadCall = []byte(fmt.Sprintf("invalid func unknown#%d\n", kfuncCallPoisonBase)) +) + +func newProgramWithOptions(spec *ProgramSpec, opts ProgramOptions) (*Program, error) { + if len(spec.Instructions) == 0 { + return nil, errors.New("instructions cannot be empty") + } + + if spec.Type == UnspecifiedProgram { + return nil, errors.New("can't load program of unspecified type") + } + + if spec.ByteOrder != nil && spec.ByteOrder != internal.NativeEndian { + return nil, fmt.Errorf("can't load %s program on %s", spec.ByteOrder, internal.NativeEndian) + } + + // Kernels before 5.0 (6c4fc209fcf9 "bpf: remove useless version check for prog load") + // require the version field to be set to the value of the KERNEL_VERSION + // macro for kprobe-type programs. + // Overwrite Kprobe program version if set to zero or the magic version constant. + kv := spec.KernelVersion + if spec.Type == Kprobe && (kv == 0 || kv == internal.MagicKernelVersion) { + v, err := linux.KernelVersion() + if err != nil { + return nil, fmt.Errorf("detecting kernel version: %w", err) + } + kv = v.Kernel() + } + + attr := &sys.ProgLoadAttr{ + ProgType: sys.ProgType(spec.Type), + ProgFlags: spec.Flags, + ExpectedAttachType: sys.AttachType(spec.AttachType), + License: sys.NewStringPointer(spec.License), + KernVersion: kv, + } + + if haveObjName() == nil { + attr.ProgName = sys.NewObjName(spec.Name) + } + + insns := make(asm.Instructions, len(spec.Instructions)) + copy(insns, spec.Instructions) + + kmodName, err := spec.kernelModule() + if err != nil { + return nil, fmt.Errorf("kernel module search: %w", err) + } + + var targets []*btf.Spec + if opts.KernelTypes != nil { + targets = append(targets, opts.KernelTypes) + } + if kmodName != "" && opts.KernelModuleTypes != nil { + if modBTF, ok := opts.KernelModuleTypes[kmodName]; ok { + targets = append(targets, modBTF) + } + } + + var b btf.Builder + if err := applyRelocations(insns, targets, kmodName, spec.ByteOrder, &b); err != nil { + return nil, fmt.Errorf("apply CO-RE relocations: %w", err) + } + + errExtInfos := haveProgramExtInfos() + if !b.Empty() && errors.Is(errExtInfos, ErrNotSupported) { + // There is at least one CO-RE relocation which relies on a stable local + // type ID. + // Return ErrNotSupported instead of E2BIG if there is no BTF support. + return nil, errExtInfos + } + + if errExtInfos == nil { + // Only add func and line info if the kernel supports it. This allows + // BPF compiled with modern toolchains to work on old kernels. + fib, lib, err := btf.MarshalExtInfos(insns, &b) + if err != nil { + return nil, fmt.Errorf("marshal ext_infos: %w", err) + } + + attr.FuncInfoRecSize = btf.FuncInfoSize + attr.FuncInfoCnt = uint32(len(fib)) / btf.FuncInfoSize + attr.FuncInfo = sys.NewSlicePointer(fib) + + attr.LineInfoRecSize = btf.LineInfoSize + attr.LineInfoCnt = uint32(len(lib)) / btf.LineInfoSize + attr.LineInfo = sys.NewSlicePointer(lib) + } + + if !b.Empty() { + handle, err := btf.NewHandle(&b) + if err != nil { + return nil, fmt.Errorf("load BTF: %w", err) + } + defer handle.Close() + + attr.ProgBtfFd = uint32(handle.FD()) + } + + kconfig, err := resolveKconfigReferences(insns) + if err != nil { + return nil, fmt.Errorf("resolve .kconfig: %w", err) + } + defer kconfig.Close() + + if err := resolveKsymReferences(insns); err != nil { + return nil, fmt.Errorf("resolve .ksyms: %w", err) + } + + if err := fixupAndValidate(insns); err != nil { + return nil, err + } + + handles, err := fixupKfuncs(insns) + if err != nil { + return nil, fmt.Errorf("fixing up kfuncs: %w", err) + } + defer handles.Close() + + if len(handles) > 0 { + fdArray := handles.fdArray() + attr.FdArray = sys.NewPointer(unsafe.Pointer(&fdArray[0])) + } + + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + err = insns.Marshal(buf, internal.NativeEndian) + if err != nil { + return nil, err + } + + bytecode := buf.Bytes() + attr.Insns = sys.NewSlicePointer(bytecode) + attr.InsnCnt = uint32(len(bytecode) / asm.InstructionSize) + + if spec.AttachTarget != nil { + targetID, err := findTargetInProgram(spec.AttachTarget, spec.AttachTo, spec.Type, spec.AttachType) + if err != nil { + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) + } + + attr.AttachBtfId = targetID + attr.AttachBtfObjFd = uint32(spec.AttachTarget.FD()) + defer runtime.KeepAlive(spec.AttachTarget) + } else if spec.AttachTo != "" { + module, targetID, err := findProgramTargetInKernel(spec.AttachTo, spec.Type, spec.AttachType) + if err != nil && !errors.Is(err, errUnrecognizedAttachType) { + // We ignore errUnrecognizedAttachType since AttachTo may be non-empty + // for programs that don't attach anywhere. + return nil, fmt.Errorf("attach %s/%s: %w", spec.Type, spec.AttachType, err) + } + + attr.AttachBtfId = targetID + if module != nil { + attr.AttachBtfObjFd = uint32(module.FD()) + defer module.Close() + } + } + + // The caller requested a specific verifier log level. Set up the log buffer + // so that there is a chance of loading the program in a single shot. + logSize := internal.Between(opts.LogSizeStart, minVerifierLogSize, maxVerifierLogSize) + var logBuf []byte + if !opts.LogDisabled && opts.LogLevel != 0 { + logBuf = make([]byte, logSize) + attr.LogLevel = opts.LogLevel + attr.LogSize = uint32(len(logBuf)) + attr.LogBuf = sys.NewSlicePointer(logBuf) + } + + for { + var fd *sys.FD + fd, err = sys.ProgLoad(attr) + if err == nil { + return &Program{unix.ByteSliceToString(logBuf), fd, spec.Name, "", spec.Type}, nil + } + + if opts.LogDisabled { + break + } + + if attr.LogTrueSize != 0 && attr.LogSize >= attr.LogTrueSize { + // The log buffer already has the correct size. + break + } + + if attr.LogSize != 0 && !errors.Is(err, unix.ENOSPC) { + // Logging is enabled and the error is not ENOSPC, so we can infer + // that the log buffer is large enough. + break + } + + if attr.LogLevel == 0 { + // Logging is not enabled but loading the program failed. Enable + // basic logging. + attr.LogLevel = LogLevelBranch + } + + // Make an educated guess how large the buffer should be by multiplying. + // Ensure the size doesn't overflow. + const factor = 2 + logSize = internal.Between(logSize, minVerifierLogSize, maxVerifierLogSize/factor) + logSize *= factor + + if attr.LogTrueSize != 0 { + // The kernel has given us a hint how large the log buffer has to be. + logSize = attr.LogTrueSize + } + + logBuf = make([]byte, logSize) + attr.LogSize = logSize + attr.LogBuf = sys.NewSlicePointer(logBuf) + } + + end := bytes.IndexByte(logBuf, 0) + if end < 0 { + end = len(logBuf) + } + + tail := logBuf[max(end-256, 0):end] + switch { + case errors.Is(err, unix.EPERM): + if len(logBuf) > 0 && logBuf[0] == 0 { + // EPERM due to RLIMIT_MEMLOCK happens before the verifier, so we can + // check that the log is empty to reduce false positives. + return nil, fmt.Errorf("load program: %w (MEMLOCK may be too low, consider rlimit.RemoveMemlock)", err) + } + + case errors.Is(err, unix.EFAULT): + // EFAULT is returned when the kernel hits a verifier bug, and always + // overrides ENOSPC, defeating the buffer growth strategy. Warn the user + // that they may need to increase the buffer size manually. + return nil, fmt.Errorf("load program: %w (hit verifier bug, increase LogSizeStart to fit the log and check dmesg)", err) + + case errors.Is(err, unix.EINVAL): + if bytes.Contains(tail, coreBadCall) { + err = errBadRelocation + break + } else if bytes.Contains(tail, kfuncBadCall) { + err = errUnknownKfunc + break + } + + case errors.Is(err, unix.EACCES): + if bytes.Contains(tail, coreBadLoad) { + err = errBadRelocation + break + } + } + + // hasFunctionReferences may be expensive, so check it last. + if (errors.Is(err, unix.EINVAL) || errors.Is(err, unix.EPERM)) && + hasFunctionReferences(spec.Instructions) { + if err := haveBPFToBPFCalls(); err != nil { + return nil, fmt.Errorf("load program: %w", err) + } + } + + return nil, internal.ErrorWithLog("load program", err, logBuf) +} + +// NewProgramFromFD creates a program from a raw fd. +// +// You should not use fd after calling this function. +// +// Requires at least Linux 4.10. +func NewProgramFromFD(fd int) (*Program, error) { + f, err := sys.NewFD(fd) + if err != nil { + return nil, err + } + + return newProgramFromFD(f) +} + +// NewProgramFromID returns the program for a given id. +// +// Returns ErrNotExist, if there is no eBPF program with the given id. +func NewProgramFromID(id ProgramID) (*Program, error) { + fd, err := sys.ProgGetFdById(&sys.ProgGetFdByIdAttr{ + Id: uint32(id), + }) + if err != nil { + return nil, fmt.Errorf("get program by id: %w", err) + } + + return newProgramFromFD(fd) +} + +func newProgramFromFD(fd *sys.FD) (*Program, error) { + info, err := newProgramInfoFromFd(fd) + if err != nil { + fd.Close() + return nil, fmt.Errorf("discover program type: %w", err) + } + + return &Program{"", fd, info.Name, "", info.Type}, nil +} + +func (p *Program) String() string { + if p.name != "" { + return fmt.Sprintf("%s(%s)#%v", p.typ, p.name, p.fd) + } + return fmt.Sprintf("%s(%v)", p.typ, p.fd) +} + +// Type returns the underlying type of the program. +func (p *Program) Type() ProgramType { + return p.typ +} + +// Info returns metadata about the program. +// +// Requires at least 4.10. +func (p *Program) Info() (*ProgramInfo, error) { + return newProgramInfoFromFd(p.fd) +} + +// Handle returns a reference to the program's type information in the kernel. +// +// Returns ErrNotSupported if the kernel has no BTF support, or if there is no +// BTF associated with the program. +func (p *Program) Handle() (*btf.Handle, error) { + info, err := p.Info() + if err != nil { + return nil, err + } + + id, ok := info.BTFID() + if !ok { + return nil, fmt.Errorf("program %s: retrieve BTF ID: %w", p, ErrNotSupported) + } + + return btf.NewHandleFromID(id) +} + +// FD gets the file descriptor of the Program. +// +// It is invalid to call this function after Close has been called. +func (p *Program) FD() int { + return p.fd.Int() +} + +// Clone creates a duplicate of the Program. +// +// Closing the duplicate does not affect the original, and vice versa. +// +// Cloning a nil Program returns nil. +func (p *Program) Clone() (*Program, error) { + if p == nil { + return nil, nil + } + + dup, err := p.fd.Dup() + if err != nil { + return nil, fmt.Errorf("can't clone program: %w", err) + } + + return &Program{p.VerifierLog, dup, p.name, "", p.typ}, nil +} + +// Pin persists the Program on the BPF virtual file system past the lifetime of +// the process that created it +// +// Calling Pin on a previously pinned program will overwrite the path, except when +// the new path already exists. Re-pinning across filesystems is not supported. +// +// This requires bpffs to be mounted above fileName. +// See https://docs.cilium.io/en/stable/network/kubernetes/configuration/#mounting-bpffs-with-systemd +func (p *Program) Pin(fileName string) error { + if err := sys.Pin(p.pinnedPath, fileName, p.fd); err != nil { + return err + } + p.pinnedPath = fileName + return nil +} + +// Unpin removes the persisted state for the Program from the BPF virtual filesystem. +// +// Failed calls to Unpin will not alter the state returned by IsPinned. +// +// Unpinning an unpinned Program returns nil. +func (p *Program) Unpin() error { + if err := sys.Unpin(p.pinnedPath); err != nil { + return err + } + p.pinnedPath = "" + return nil +} + +// IsPinned returns true if the Program has a non-empty pinned path. +func (p *Program) IsPinned() bool { + return p.pinnedPath != "" +} + +// Close the Program's underlying file descriptor, which could unload +// the program from the kernel if it is not pinned or attached to a +// kernel hook. +func (p *Program) Close() error { + if p == nil { + return nil + } + + return p.fd.Close() +} + +// Various options for Run'ing a Program +type RunOptions struct { + // Program's data input. Required field. + // + // The kernel expects at least 14 bytes input for an ethernet header for + // XDP and SKB programs. + Data []byte + // Program's data after Program has run. Caller must allocate. Optional field. + DataOut []byte + // Program's context input. Optional field. + Context interface{} + // Program's context after Program has run. Must be a pointer or slice. Optional field. + ContextOut interface{} + // Minimum number of times to run Program. Optional field. Defaults to 1. + // + // The program may be executed more often than this due to interruptions, e.g. + // when runtime.AllThreadsSyscall is invoked. + Repeat uint32 + // Optional flags. + Flags uint32 + // CPU to run Program on. Optional field. + // Note not all program types support this field. + CPU uint32 + // Called whenever the syscall is interrupted, and should be set to testing.B.ResetTimer + // or similar. Typically used during benchmarking. Optional field. + // + // Deprecated: use [testing.B.ReportMetric] with unit "ns/op" instead. + Reset func() +} + +// Test runs the Program in the kernel with the given input and returns the +// value returned by the eBPF program. +// +// Note: the kernel expects at least 14 bytes input for an ethernet header for +// XDP and SKB programs. +// +// This function requires at least Linux 4.12. +func (p *Program) Test(in []byte) (uint32, []byte, error) { + // Older kernels ignore the dataSizeOut argument when copying to user space. + // Combined with things like bpf_xdp_adjust_head() we don't really know what the final + // size will be. Hence we allocate an output buffer which we hope will always be large + // enough, and panic if the kernel wrote past the end of the allocation. + // See https://patchwork.ozlabs.org/cover/1006822/ + var out []byte + if len(in) > 0 { + out = make([]byte, len(in)+outputPad) + } + + opts := RunOptions{ + Data: in, + DataOut: out, + Repeat: 1, + } + + ret, _, err := p.run(&opts) + if err != nil { + return ret, nil, fmt.Errorf("test program: %w", err) + } + return ret, opts.DataOut, nil +} + +// Run runs the Program in kernel with given RunOptions. +// +// Note: the same restrictions from Test apply. +func (p *Program) Run(opts *RunOptions) (uint32, error) { + if opts == nil { + opts = &RunOptions{} + } + + ret, _, err := p.run(opts) + if err != nil { + return ret, fmt.Errorf("run program: %w", err) + } + return ret, nil +} + +// Benchmark runs the Program with the given input for a number of times +// and returns the time taken per iteration. +// +// Returns the result of the last execution of the program and the time per +// run or an error. reset is called whenever the benchmark syscall is +// interrupted, and should be set to testing.B.ResetTimer or similar. +// +// This function requires at least Linux 4.12. +func (p *Program) Benchmark(in []byte, repeat int, reset func()) (uint32, time.Duration, error) { + if uint(repeat) > math.MaxUint32 { + return 0, 0, fmt.Errorf("repeat is too high") + } + + opts := RunOptions{ + Data: in, + Repeat: uint32(repeat), + Reset: reset, + } + + ret, total, err := p.run(&opts) + if err != nil { + return ret, total, fmt.Errorf("benchmark program: %w", err) + } + return ret, total, nil +} + +var haveProgRun = internal.NewFeatureTest("BPF_PROG_RUN", func() error { + prog, err := NewProgram(&ProgramSpec{ + // SocketFilter does not require privileges on newer kernels. + Type: SocketFilter, + Instructions: asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + License: "MIT", + }) + if err != nil { + // This may be because we lack sufficient permissions, etc. + return err + } + defer prog.Close() + + in := internal.EmptyBPFContext + attr := sys.ProgRunAttr{ + ProgFd: uint32(prog.FD()), + DataSizeIn: uint32(len(in)), + DataIn: sys.NewSlicePointer(in), + } + + err = sys.ProgRun(&attr) + switch { + case errors.Is(err, unix.EINVAL): + // Check for EINVAL specifically, rather than err != nil since we + // otherwise misdetect due to insufficient permissions. + return internal.ErrNotSupported + + case errors.Is(err, unix.EINTR): + // We know that PROG_TEST_RUN is supported if we get EINTR. + return nil + + case errors.Is(err, sys.ENOTSUPP): + // The first PROG_TEST_RUN patches shipped in 4.12 didn't include + // a test runner for SocketFilter. ENOTSUPP means PROG_TEST_RUN is + // supported, but not for the program type used in the probe. + return nil + } + + return err +}, "4.12") + +func (p *Program) run(opts *RunOptions) (uint32, time.Duration, error) { + if uint(len(opts.Data)) > math.MaxUint32 { + return 0, 0, fmt.Errorf("input is too long") + } + + if err := haveProgRun(); err != nil { + return 0, 0, err + } + + var ctxBytes []byte + if opts.Context != nil { + ctx := new(bytes.Buffer) + if err := binary.Write(ctx, internal.NativeEndian, opts.Context); err != nil { + return 0, 0, fmt.Errorf("cannot serialize context: %v", err) + } + ctxBytes = ctx.Bytes() + } + + var ctxOut []byte + if opts.ContextOut != nil { + ctxOut = make([]byte, binary.Size(opts.ContextOut)) + } + + attr := sys.ProgRunAttr{ + ProgFd: p.fd.Uint(), + DataSizeIn: uint32(len(opts.Data)), + DataSizeOut: uint32(len(opts.DataOut)), + DataIn: sys.NewSlicePointer(opts.Data), + DataOut: sys.NewSlicePointer(opts.DataOut), + Repeat: uint32(opts.Repeat), + CtxSizeIn: uint32(len(ctxBytes)), + CtxSizeOut: uint32(len(ctxOut)), + CtxIn: sys.NewSlicePointer(ctxBytes), + CtxOut: sys.NewSlicePointer(ctxOut), + Flags: opts.Flags, + Cpu: opts.CPU, + } + +retry: + for { + err := sys.ProgRun(&attr) + if err == nil { + break retry + } + + if errors.Is(err, unix.EINTR) { + if attr.Repeat <= 1 { + // Older kernels check whether enough repetitions have been + // executed only after checking for pending signals. + // + // run signal? done? run ... + // + // As a result we can get EINTR for repeat==1 even though + // the program was run exactly once. Treat this as a + // successful run instead. + // + // Since commit 607b9cc92bd7 ("bpf: Consolidate shared test timing code") + // the conditions are reversed: + // run done? signal? ... + break retry + } + + if opts.Reset != nil { + opts.Reset() + } + continue retry + } + + if errors.Is(err, sys.ENOTSUPP) { + return 0, 0, fmt.Errorf("kernel doesn't support running %s: %w", p.Type(), ErrNotSupported) + } + + return 0, 0, err + } + + if opts.DataOut != nil { + if int(attr.DataSizeOut) > cap(opts.DataOut) { + // Houston, we have a problem. The program created more data than we allocated, + // and the kernel wrote past the end of our buffer. + panic("kernel wrote past end of output buffer") + } + opts.DataOut = opts.DataOut[:int(attr.DataSizeOut)] + } + + if len(ctxOut) != 0 { + b := bytes.NewReader(ctxOut) + if err := binary.Read(b, internal.NativeEndian, opts.ContextOut); err != nil { + return 0, 0, fmt.Errorf("failed to decode ContextOut: %v", err) + } + } + + total := time.Duration(attr.Duration) * time.Nanosecond + return attr.Retval, total, nil +} + +func unmarshalProgram(buf sysenc.Buffer) (*Program, error) { + var id uint32 + if err := buf.Unmarshal(&id); err != nil { + return nil, err + } + + // Looking up an entry in a nested map or prog array returns an id, + // not an fd. + return NewProgramFromID(ProgramID(id)) +} + +func marshalProgram(p *Program, length int) ([]byte, error) { + if p == nil { + return nil, errors.New("can't marshal a nil Program") + } + + if length != 4 { + return nil, fmt.Errorf("can't marshal program to %d bytes", length) + } + + buf := make([]byte, 4) + internal.NativeEndian.PutUint32(buf, p.fd.Uint()) + return buf, nil +} + +// LoadPinnedProgram loads a Program from a pin (file) on the BPF virtual +// filesystem. +// +// Requires at least Linux 4.11. +func LoadPinnedProgram(fileName string, opts *LoadPinOptions) (*Program, error) { + fd, typ, err := sys.ObjGetTyped(&sys.ObjGetAttr{ + Pathname: sys.NewStringPointer(fileName), + FileFlags: opts.Marshal(), + }) + if err != nil { + return nil, err + } + + if typ != sys.BPF_TYPE_PROG { + _ = fd.Close() + return nil, fmt.Errorf("%s is not a Program", fileName) + } + + info, err := newProgramInfoFromFd(fd) + if err != nil { + _ = fd.Close() + return nil, fmt.Errorf("info for %s: %w", fileName, err) + } + + var progName string + if haveObjName() == nil { + progName = info.Name + } else { + progName = filepath.Base(fileName) + } + + return &Program{"", fd, progName, fileName, info.Type}, nil +} + +// SanitizeName replaces all invalid characters in name with replacement. +// Passing a negative value for replacement will delete characters instead +// of replacing them. Use this to automatically generate valid names for maps +// and programs at runtime. +// +// The set of allowed characters depends on the running kernel version. +// Dots are only allowed as of kernel 5.2. +func SanitizeName(name string, replacement rune) string { + return strings.Map(func(char rune) rune { + if invalidBPFObjNameChar(char) { + return replacement + } + return char + }, name) +} + +// ProgramGetNextID returns the ID of the next eBPF program. +// +// Returns ErrNotExist, if there is no next eBPF program. +func ProgramGetNextID(startID ProgramID) (ProgramID, error) { + attr := &sys.ProgGetNextIdAttr{Id: uint32(startID)} + return ProgramID(attr.NextId), sys.ProgGetNextId(attr) +} + +// BindMap binds map to the program and is only released once program is released. +// +// This may be used in cases where metadata should be associated with the program +// which otherwise does not contain any references to the map. +func (p *Program) BindMap(m *Map) error { + attr := &sys.ProgBindMapAttr{ + ProgFd: uint32(p.FD()), + MapFd: uint32(m.FD()), + } + + return sys.ProgBindMap(attr) +} + +var errUnrecognizedAttachType = errors.New("unrecognized attach type") + +// find an attach target type in the kernel. +// +// name, progType and attachType determine which type we need to attach to. +// +// The attach target may be in a loaded kernel module. +// In that case the returned handle will be non-nil. +// The caller is responsible for closing the handle. +// +// Returns errUnrecognizedAttachType if the combination of progType and attachType +// is not recognised. +func findProgramTargetInKernel(name string, progType ProgramType, attachType AttachType) (*btf.Handle, btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var ( + typeName, featureName string + target btf.Type + ) + + switch (match{progType, attachType}) { + case match{LSM, AttachLSMMac}: + typeName = "bpf_lsm_" + name + featureName = name + " LSM hook" + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceIter}: + typeName = "bpf_iter_" + name + featureName = name + " iterator" + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceFEntry}: + typeName = name + featureName = fmt.Sprintf("fentry %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceFExit}: + typeName = name + featureName = fmt.Sprintf("fexit %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachModifyReturn}: + typeName = name + featureName = fmt.Sprintf("fmod_ret %s", name) + target = (*btf.Func)(nil) + case match{Tracing, AttachTraceRawTp}: + typeName = fmt.Sprintf("btf_trace_%s", name) + featureName = fmt.Sprintf("raw_tp %s", name) + target = (*btf.Typedef)(nil) + default: + return nil, 0, errUnrecognizedAttachType + } + + spec, err := btf.LoadKernelSpec() + if err != nil { + return nil, 0, fmt.Errorf("load kernel spec: %w", err) + } + + spec, module, err := findTargetInKernel(spec, typeName, &target) + if errors.Is(err, btf.ErrNotFound) { + return nil, 0, &internal.UnsupportedFeatureError{Name: featureName} + } + // See cilium/ebpf#894. Until we can disambiguate between equally-named kernel + // symbols, we should explicitly refuse program loads. They will not reliably + // do what the caller intended. + if errors.Is(err, btf.ErrMultipleMatches) { + return nil, 0, fmt.Errorf("attaching to ambiguous kernel symbol is not supported: %w", err) + } + if err != nil { + return nil, 0, fmt.Errorf("find target for %s: %w", featureName, err) + } + + id, err := spec.TypeID(target) + if err != nil { + module.Close() + return nil, 0, err + } + + return module, id, nil +} + +// findTargetInKernel attempts to find a named type in the current kernel. +// +// target will point at the found type after a successful call. Searches both +// vmlinux and any loaded modules. +// +// Returns a non-nil handle if the type was found in a module, or btf.ErrNotFound +// if the type wasn't found at all. +func findTargetInKernel(kernelSpec *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) { + err := kernelSpec.TypeByName(typeName, target) + if errors.Is(err, btf.ErrNotFound) { + spec, module, err := findTargetInModule(kernelSpec, typeName, target) + if err != nil { + return nil, nil, fmt.Errorf("find target in modules: %w", err) + } + return spec, module, nil + } + if err != nil { + return nil, nil, fmt.Errorf("find target in vmlinux: %w", err) + } + return kernelSpec, nil, err +} + +// findTargetInModule attempts to find a named type in any loaded module. +// +// base must contain the kernel's types and is used to parse kmod BTF. Modules +// are searched in the order they were loaded. +// +// Returns btf.ErrNotFound if the target can't be found in any module. +func findTargetInModule(base *btf.Spec, typeName string, target *btf.Type) (*btf.Spec, *btf.Handle, error) { + it := new(btf.HandleIterator) + defer it.Handle.Close() + + for it.Next() { + info, err := it.Handle.Info() + if err != nil { + return nil, nil, fmt.Errorf("get info for BTF ID %d: %w", it.ID, err) + } + + if !info.IsModule() { + continue + } + + spec, err := it.Handle.Spec(base) + if err != nil { + return nil, nil, fmt.Errorf("parse types for module %s: %w", info.Name, err) + } + + err = spec.TypeByName(typeName, target) + if errors.Is(err, btf.ErrNotFound) { + continue + } + if err != nil { + return nil, nil, fmt.Errorf("lookup type in module %s: %w", info.Name, err) + } + + return spec, it.Take(), nil + } + if err := it.Err(); err != nil { + return nil, nil, fmt.Errorf("iterate modules: %w", err) + } + + return nil, nil, btf.ErrNotFound +} + +// find an attach target type in a program. +// +// Returns errUnrecognizedAttachType. +func findTargetInProgram(prog *Program, name string, progType ProgramType, attachType AttachType) (btf.TypeID, error) { + type match struct { + p ProgramType + a AttachType + } + + var typeName string + switch (match{progType, attachType}) { + case match{Extension, AttachNone}, + match{Tracing, AttachTraceFEntry}, + match{Tracing, AttachTraceFExit}: + typeName = name + default: + return 0, errUnrecognizedAttachType + } + + btfHandle, err := prog.Handle() + if err != nil { + return 0, fmt.Errorf("load target BTF: %w", err) + } + defer btfHandle.Close() + + spec, err := btfHandle.Spec(nil) + if err != nil { + return 0, err + } + + var targetFunc *btf.Func + err = spec.TypeByName(typeName, &targetFunc) + if err != nil { + return 0, fmt.Errorf("find target %s: %w", typeName, err) + } + + return spec.TypeID(targetFunc) +} diff --git a/vendor/github.com/cilium/ebpf/syscalls.go b/vendor/github.com/cilium/ebpf/syscalls.go new file mode 100644 index 0000000000..25c84c3c5c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/syscalls.go @@ -0,0 +1,355 @@ +package ebpf + +import ( + "bytes" + "errors" + "fmt" + "math" + "os" + "runtime" + + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/internal" + "github.com/cilium/ebpf/internal/linux" + "github.com/cilium/ebpf/internal/sys" + "github.com/cilium/ebpf/internal/tracefs" + "github.com/cilium/ebpf/internal/unix" +) + +var ( + // pre-allocating these here since they may + // get called in hot code paths and cause + // unnecessary memory allocations + sysErrKeyNotExist = sys.Error(ErrKeyNotExist, unix.ENOENT) + sysErrKeyExist = sys.Error(ErrKeyExist, unix.EEXIST) + sysErrNotSupported = sys.Error(ErrNotSupported, sys.ENOTSUPP) +) + +// invalidBPFObjNameChar returns true if char may not appear in +// a BPF object name. +func invalidBPFObjNameChar(char rune) bool { + dotAllowed := objNameAllowsDot() == nil + + switch { + case char >= 'A' && char <= 'Z': + return false + case char >= 'a' && char <= 'z': + return false + case char >= '0' && char <= '9': + return false + case dotAllowed && char == '.': + return false + case char == '_': + return false + default: + return true + } +} + +func progLoad(insns asm.Instructions, typ ProgramType, license string) (*sys.FD, error) { + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return nil, err + } + bytecode := buf.Bytes() + + return sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(typ), + License: sys.NewStringPointer(license), + Insns: sys.NewSlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + }) +} + +var haveNestedMaps = internal.NewFeatureTest("nested maps", func() error { + _, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(ArrayOfMaps), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + // Invalid file descriptor. + InnerMapFd: ^uint32(0), + }) + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if errors.Is(err, unix.EBADF) { + return nil + } + return err +}, "4.12") + +var haveMapMutabilityModifiers = internal.NewFeatureTest("read- and write-only maps", func() error { + // This checks BPF_F_RDONLY_PROG and BPF_F_WRONLY_PROG. Since + // BPF_MAP_FREEZE appeared in 5.2 as well we don't do a separate check. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: sys.BPF_F_RDONLY_PROG, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}, "5.2") + +var haveMmapableMaps = internal.NewFeatureTest("mmapable maps", func() error { + // This checks BPF_F_MMAPABLE, which appeared in 5.5 for array maps. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: sys.BPF_F_MMAPABLE, + }) + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}, "5.5") + +var haveInnerMaps = internal.NewFeatureTest("inner maps", func() error { + // This checks BPF_F_INNER_MAP, which appeared in 5.10. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: sys.BPF_F_INNER_MAP, + }) + + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}, "5.10") + +var haveNoPreallocMaps = internal.NewFeatureTest("prealloc maps", func() error { + // This checks BPF_F_NO_PREALLOC, which appeared in 4.6. + m, err := sys.MapCreate(&sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapFlags: sys.BPF_F_NO_PREALLOC, + }) + + if err != nil { + return internal.ErrNotSupported + } + _ = m.Close() + return nil +}, "4.6") + +func wrapMapError(err error) error { + if err == nil { + return nil + } + + if errors.Is(err, unix.ENOENT) { + return sysErrKeyNotExist + } + + if errors.Is(err, unix.EEXIST) { + return sysErrKeyExist + } + + if errors.Is(err, sys.ENOTSUPP) { + return sysErrNotSupported + } + + if errors.Is(err, unix.E2BIG) { + return fmt.Errorf("key too big for map: %w", err) + } + + return err +} + +var haveObjName = internal.NewFeatureTest("object names", func() error { + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapName: sys.NewObjName("feature_test"), + } + + // Tolerate EPERM as this runs during ELF loading which is potentially + // unprivileged. Only EINVAL is conclusive, thrown from CHECK_ATTR. + fd, err := sys.MapCreate(&attr) + if errors.Is(err, unix.EPERM) { + return nil + } + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + + _ = fd.Close() + return nil +}, "4.15") + +var objNameAllowsDot = internal.NewFeatureTest("dot in object names", func() error { + if err := haveObjName(); err != nil { + return err + } + + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Array), + KeySize: 4, + ValueSize: 4, + MaxEntries: 1, + MapName: sys.NewObjName(".test"), + } + + // Tolerate EPERM, otherwise MapSpec.Name has its dots removed when run by + // unprivileged tools. (bpf2go, other code gen). Only EINVAL is conclusive, + // thrown from bpf_obj_name_cpy(). + fd, err := sys.MapCreate(&attr) + if errors.Is(err, unix.EPERM) { + return nil + } + if errors.Is(err, unix.EINVAL) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + + _ = fd.Close() + return nil +}, "5.2") + +var haveBatchAPI = internal.NewFeatureTest("map batch api", func() error { + var maxEntries uint32 = 2 + attr := sys.MapCreateAttr{ + MapType: sys.MapType(Hash), + KeySize: 4, + ValueSize: 4, + MaxEntries: maxEntries, + } + + fd, err := sys.MapCreate(&attr) + if err != nil { + return internal.ErrNotSupported + } + defer fd.Close() + + keys := []uint32{1, 2} + values := []uint32{3, 4} + kp, _ := marshalMapSyscallInput(keys, 8) + vp, _ := marshalMapSyscallInput(values, 8) + + err = sys.MapUpdateBatch(&sys.MapUpdateBatchAttr{ + MapFd: fd.Uint(), + Keys: kp, + Values: vp, + Count: maxEntries, + }) + if err != nil { + return internal.ErrNotSupported + } + return nil +}, "5.6") + +var haveProbeReadKernel = internal.NewFeatureTest("bpf_probe_read_kernel", func() error { + insns := asm.Instructions{ + asm.Mov.Reg(asm.R1, asm.R10), + asm.Add.Imm(asm.R1, -8), + asm.Mov.Imm(asm.R2, 8), + asm.Mov.Imm(asm.R3, 0), + asm.FnProbeReadKernel.Call(), + asm.Return(), + } + + fd, err := progLoad(insns, Kprobe, "GPL") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}, "5.5") + +var haveBPFToBPFCalls = internal.NewFeatureTest("bpf2bpf calls", func() error { + insns := asm.Instructions{ + asm.Call.Label("prog2").WithSymbol("prog1"), + asm.Return(), + asm.Mov.Imm(asm.R0, 0).WithSymbol("prog2"), + asm.Return(), + } + + fd, err := progLoad(insns, SocketFilter, "MIT") + if err != nil { + return internal.ErrNotSupported + } + _ = fd.Close() + return nil +}, "4.16") + +var haveSyscallWrapper = internal.NewFeatureTest("syscall wrapper", func() error { + prefix := linux.PlatformPrefix() + if prefix == "" { + return fmt.Errorf("unable to find the platform prefix for (%s)", runtime.GOARCH) + } + + args := tracefs.ProbeArgs{ + Type: tracefs.Kprobe, + Symbol: prefix + "sys_bpf", + Pid: -1, + } + + var err error + args.Group, err = tracefs.RandomGroup("ebpf_probe") + if err != nil { + return err + } + + evt, err := tracefs.NewEvent(args) + if errors.Is(err, os.ErrNotExist) { + return internal.ErrNotSupported + } + if err != nil { + return err + } + + return evt.Close() +}, "4.17") + +var haveProgramExtInfos = internal.NewFeatureTest("program ext_infos", func() error { + insns := asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), + asm.Return(), + } + + buf := bytes.NewBuffer(make([]byte, 0, insns.Size())) + if err := insns.Marshal(buf, internal.NativeEndian); err != nil { + return err + } + bytecode := buf.Bytes() + + _, err := sys.ProgLoad(&sys.ProgLoadAttr{ + ProgType: sys.ProgType(SocketFilter), + License: sys.NewStringPointer("MIT"), + Insns: sys.NewSlicePointer(bytecode), + InsnCnt: uint32(len(bytecode) / asm.InstructionSize), + FuncInfoCnt: 1, + ProgBtfFd: math.MaxUint32, + }) + + if errors.Is(err, unix.EBADF) { + return nil + } + + if errors.Is(err, unix.E2BIG) { + return ErrNotSupported + } + + return err +}, "5.0") diff --git a/vendor/github.com/cilium/ebpf/types.go b/vendor/github.com/cilium/ebpf/types.go new file mode 100644 index 0000000000..211b308bbc --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types.go @@ -0,0 +1,321 @@ +package ebpf + +import ( + "github.com/cilium/ebpf/internal/sys" +) + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output types_string.go -type=MapType,ProgramType,PinType + +// MapType indicates the type map structure +// that will be initialized in the kernel. +type MapType uint32 + +// All the various map types that can be created +const ( + UnspecifiedMap MapType = iota + // Hash is a hash map + Hash + // Array is an array map + Array + // ProgramArray - A program array map is a special kind of array map whose map + // values contain only file descriptors referring to other eBPF + // programs. Thus, both the key_size and value_size must be + // exactly four bytes. This map is used in conjunction with the + // TailCall helper. + ProgramArray + // PerfEventArray - A perf event array is used in conjunction with PerfEventRead + // and PerfEventOutput calls, to read the raw bpf_perf_data from the registers. + PerfEventArray + // PerCPUHash - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + PerCPUHash + // PerCPUArray - This data structure is useful for people who have high performance + // network needs and can reconcile adds at the end of some cycle, so that + // hashes can be lock free without the use of XAdd, which can be costly. + // Each CPU gets a copy of this hash, the contents of all of which can be reconciled + // later. + PerCPUArray + // StackTrace - This holds whole user and kernel stack traces, it can be retrieved with + // GetStackID + StackTrace + // CGroupArray - This is a very niche structure used to help SKBInCGroup determine + // if an skb is from a socket belonging to a specific cgroup + CGroupArray + // LRUHash - This allows you to create a small hash structure that will purge the + // least recently used items rather than throw an error when you run out of memory + LRUHash + // LRUCPUHash - This is NOT like PerCPUHash, this structure is shared among the CPUs, + // it has more to do with including the CPU id with the LRU calculation so that if a + // particular CPU is using a value over-and-over again, then it will be saved, but if + // a value is being retrieved a lot but sparsely across CPUs it is not as important, basically + // giving weight to CPU locality over overall usage. + LRUCPUHash + // LPMTrie - This is an implementation of Longest-Prefix-Match Trie structure. It is useful, + // for storing things like IP addresses which can be bit masked allowing for keys of differing + // values to refer to the same reference based on their masks. See wikipedia for more details. + LPMTrie + // ArrayOfMaps - Each item in the array is another map. The inner map mustn't be a map of maps + // itself. + ArrayOfMaps + // HashOfMaps - Each item in the hash map is another map. The inner map mustn't be a map of maps + // itself. + HashOfMaps + // DevMap - Specialized map to store references to network devices. + DevMap + // SockMap - Specialized map to store references to sockets. + SockMap + // CPUMap - Specialized map to store references to CPUs. + CPUMap + // XSKMap - Specialized map for XDP programs to store references to open sockets. + XSKMap + // SockHash - Specialized hash to store references to sockets. + SockHash + // CGroupStorage - Special map for CGroups. + CGroupStorage + // ReusePortSockArray - Specialized map to store references to sockets that can be reused. + ReusePortSockArray + // PerCPUCGroupStorage - Special per CPU map for CGroups. + PerCPUCGroupStorage + // Queue - FIFO storage for BPF programs. + Queue + // Stack - LIFO storage for BPF programs. + Stack + // SkStorage - Specialized map for local storage at SK for BPF programs. + SkStorage + // DevMapHash - Hash-based indexing scheme for references to network devices. + DevMapHash + // StructOpsMap - This map holds a kernel struct with its function pointer implemented in a BPF + // program. + StructOpsMap + // RingBuf - Similar to PerfEventArray, but shared across all CPUs. + RingBuf + // InodeStorage - Specialized local storage map for inodes. + InodeStorage + // TaskStorage - Specialized local storage map for task_struct. + TaskStorage + // BloomFilter - Space-efficient data structure to quickly test whether an element exists in a set. + BloomFilter + // UserRingbuf - The reverse of RingBuf, used to send messages from user space to BPF programs. + UserRingbuf + // CgroupStorage - Store data keyed on a cgroup. If the cgroup disappears, the key is automatically removed. + CgroupStorage + // Arena - Sparse shared memory region between a BPF program and user space. + Arena +) + +// hasPerCPUValue returns true if the Map stores a value per CPU. +func (mt MapType) hasPerCPUValue() bool { + return mt == PerCPUHash || mt == PerCPUArray || mt == LRUCPUHash || mt == PerCPUCGroupStorage +} + +// canStoreMapOrProgram returns true if the Map stores references to another Map +// or Program. +func (mt MapType) canStoreMapOrProgram() bool { + return mt.canStoreMap() || mt.canStoreProgram() +} + +// canStoreMap returns true if the map type accepts a map fd +// for update and returns a map id for lookup. +func (mt MapType) canStoreMap() bool { + return mt == ArrayOfMaps || mt == HashOfMaps +} + +// canStoreProgram returns true if the map type accepts a program fd +// for update and returns a program id for lookup. +func (mt MapType) canStoreProgram() bool { + return mt == ProgramArray +} + +// canHaveValueSize returns true if the map type supports setting a value size. +func (mt MapType) canHaveValueSize() bool { + switch mt { + case RingBuf, Arena: + return false + + // Special-case perf events since they require a value size of either 0 or 4 + // for historical reasons. Let the library fix this up later. + case PerfEventArray: + return false + } + + return true +} + +// ProgramType of the eBPF program +type ProgramType uint32 + +// eBPF program types +const ( + UnspecifiedProgram = ProgramType(sys.BPF_PROG_TYPE_UNSPEC) + SocketFilter = ProgramType(sys.BPF_PROG_TYPE_SOCKET_FILTER) + Kprobe = ProgramType(sys.BPF_PROG_TYPE_KPROBE) + SchedCLS = ProgramType(sys.BPF_PROG_TYPE_SCHED_CLS) + SchedACT = ProgramType(sys.BPF_PROG_TYPE_SCHED_ACT) + TracePoint = ProgramType(sys.BPF_PROG_TYPE_TRACEPOINT) + XDP = ProgramType(sys.BPF_PROG_TYPE_XDP) + PerfEvent = ProgramType(sys.BPF_PROG_TYPE_PERF_EVENT) + CGroupSKB = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SKB) + CGroupSock = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK) + LWTIn = ProgramType(sys.BPF_PROG_TYPE_LWT_IN) + LWTOut = ProgramType(sys.BPF_PROG_TYPE_LWT_OUT) + LWTXmit = ProgramType(sys.BPF_PROG_TYPE_LWT_XMIT) + SockOps = ProgramType(sys.BPF_PROG_TYPE_SOCK_OPS) + SkSKB = ProgramType(sys.BPF_PROG_TYPE_SK_SKB) + CGroupDevice = ProgramType(sys.BPF_PROG_TYPE_CGROUP_DEVICE) + SkMsg = ProgramType(sys.BPF_PROG_TYPE_SK_MSG) + RawTracepoint = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT) + CGroupSockAddr = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCK_ADDR) + LWTSeg6Local = ProgramType(sys.BPF_PROG_TYPE_LWT_SEG6LOCAL) + LircMode2 = ProgramType(sys.BPF_PROG_TYPE_LIRC_MODE2) + SkReuseport = ProgramType(sys.BPF_PROG_TYPE_SK_REUSEPORT) + FlowDissector = ProgramType(sys.BPF_PROG_TYPE_FLOW_DISSECTOR) + CGroupSysctl = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SYSCTL) + RawTracepointWritable = ProgramType(sys.BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) + CGroupSockopt = ProgramType(sys.BPF_PROG_TYPE_CGROUP_SOCKOPT) + Tracing = ProgramType(sys.BPF_PROG_TYPE_TRACING) + StructOps = ProgramType(sys.BPF_PROG_TYPE_STRUCT_OPS) + Extension = ProgramType(sys.BPF_PROG_TYPE_EXT) + LSM = ProgramType(sys.BPF_PROG_TYPE_LSM) + SkLookup = ProgramType(sys.BPF_PROG_TYPE_SK_LOOKUP) + Syscall = ProgramType(sys.BPF_PROG_TYPE_SYSCALL) + Netfilter = ProgramType(sys.BPF_PROG_TYPE_NETFILTER) +) + +// AttachType of the eBPF program, needed to differentiate allowed context accesses in +// some newer program types like CGroupSockAddr. Should be set to AttachNone if not required. +// Will cause invalid argument (EINVAL) at program load time if set incorrectly. +type AttachType uint32 + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -type AttachType -trimprefix Attach + +// AttachNone is an alias for AttachCGroupInetIngress for readability reasons. +const AttachNone AttachType = 0 + +const ( + AttachCGroupInetIngress = AttachType(sys.BPF_CGROUP_INET_INGRESS) + AttachCGroupInetEgress = AttachType(sys.BPF_CGROUP_INET_EGRESS) + AttachCGroupInetSockCreate = AttachType(sys.BPF_CGROUP_INET_SOCK_CREATE) + AttachCGroupSockOps = AttachType(sys.BPF_CGROUP_SOCK_OPS) + AttachSkSKBStreamParser = AttachType(sys.BPF_SK_SKB_STREAM_PARSER) + AttachSkSKBStreamVerdict = AttachType(sys.BPF_SK_SKB_STREAM_VERDICT) + AttachCGroupDevice = AttachType(sys.BPF_CGROUP_DEVICE) + AttachSkMsgVerdict = AttachType(sys.BPF_SK_MSG_VERDICT) + AttachCGroupInet4Bind = AttachType(sys.BPF_CGROUP_INET4_BIND) + AttachCGroupInet6Bind = AttachType(sys.BPF_CGROUP_INET6_BIND) + AttachCGroupInet4Connect = AttachType(sys.BPF_CGROUP_INET4_CONNECT) + AttachCGroupInet6Connect = AttachType(sys.BPF_CGROUP_INET6_CONNECT) + AttachCGroupInet4PostBind = AttachType(sys.BPF_CGROUP_INET4_POST_BIND) + AttachCGroupInet6PostBind = AttachType(sys.BPF_CGROUP_INET6_POST_BIND) + AttachCGroupUDP4Sendmsg = AttachType(sys.BPF_CGROUP_UDP4_SENDMSG) + AttachCGroupUDP6Sendmsg = AttachType(sys.BPF_CGROUP_UDP6_SENDMSG) + AttachLircMode2 = AttachType(sys.BPF_LIRC_MODE2) + AttachFlowDissector = AttachType(sys.BPF_FLOW_DISSECTOR) + AttachCGroupSysctl = AttachType(sys.BPF_CGROUP_SYSCTL) + AttachCGroupUDP4Recvmsg = AttachType(sys.BPF_CGROUP_UDP4_RECVMSG) + AttachCGroupUDP6Recvmsg = AttachType(sys.BPF_CGROUP_UDP6_RECVMSG) + AttachCGroupGetsockopt = AttachType(sys.BPF_CGROUP_GETSOCKOPT) + AttachCGroupSetsockopt = AttachType(sys.BPF_CGROUP_SETSOCKOPT) + AttachTraceRawTp = AttachType(sys.BPF_TRACE_RAW_TP) + AttachTraceFEntry = AttachType(sys.BPF_TRACE_FENTRY) + AttachTraceFExit = AttachType(sys.BPF_TRACE_FEXIT) + AttachModifyReturn = AttachType(sys.BPF_MODIFY_RETURN) + AttachLSMMac = AttachType(sys.BPF_LSM_MAC) + AttachTraceIter = AttachType(sys.BPF_TRACE_ITER) + AttachCgroupInet4GetPeername = AttachType(sys.BPF_CGROUP_INET4_GETPEERNAME) + AttachCgroupInet6GetPeername = AttachType(sys.BPF_CGROUP_INET6_GETPEERNAME) + AttachCgroupInet4GetSockname = AttachType(sys.BPF_CGROUP_INET4_GETSOCKNAME) + AttachCgroupInet6GetSockname = AttachType(sys.BPF_CGROUP_INET6_GETSOCKNAME) + AttachXDPDevMap = AttachType(sys.BPF_XDP_DEVMAP) + AttachCgroupInetSockRelease = AttachType(sys.BPF_CGROUP_INET_SOCK_RELEASE) + AttachXDPCPUMap = AttachType(sys.BPF_XDP_CPUMAP) + AttachSkLookup = AttachType(sys.BPF_SK_LOOKUP) + AttachXDP = AttachType(sys.BPF_XDP) + AttachSkSKBVerdict = AttachType(sys.BPF_SK_SKB_VERDICT) + AttachSkReuseportSelect = AttachType(sys.BPF_SK_REUSEPORT_SELECT) + AttachSkReuseportSelectOrMigrate = AttachType(sys.BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) + AttachPerfEvent = AttachType(sys.BPF_PERF_EVENT) + AttachTraceKprobeMulti = AttachType(sys.BPF_TRACE_KPROBE_MULTI) + AttachLSMCgroup = AttachType(sys.BPF_LSM_CGROUP) + AttachStructOps = AttachType(sys.BPF_STRUCT_OPS) + AttachNetfilter = AttachType(sys.BPF_NETFILTER) + AttachTCXIngress = AttachType(sys.BPF_TCX_INGRESS) + AttachTCXEgress = AttachType(sys.BPF_TCX_EGRESS) + AttachTraceUprobeMulti = AttachType(sys.BPF_TRACE_UPROBE_MULTI) + AttachCgroupUnixConnect = AttachType(sys.BPF_CGROUP_UNIX_CONNECT) + AttachCgroupUnixSendmsg = AttachType(sys.BPF_CGROUP_UNIX_SENDMSG) + AttachCgroupUnixRecvmsg = AttachType(sys.BPF_CGROUP_UNIX_RECVMSG) + AttachCgroupUnixGetpeername = AttachType(sys.BPF_CGROUP_UNIX_GETPEERNAME) + AttachCgroupUnixGetsockname = AttachType(sys.BPF_CGROUP_UNIX_GETSOCKNAME) + AttachNetkitPrimary = AttachType(sys.BPF_NETKIT_PRIMARY) + AttachNetkitPeer = AttachType(sys.BPF_NETKIT_PEER) +) + +// AttachFlags of the eBPF program used in BPF_PROG_ATTACH command +type AttachFlags uint32 + +// PinType determines whether a map is pinned into a BPFFS. +type PinType uint32 + +// Valid pin types. +// +// Mirrors enum libbpf_pin_type. +const ( + PinNone PinType = iota + // Pin an object by using its name as the filename. + PinByName +) + +// LoadPinOptions control how a pinned object is loaded. +type LoadPinOptions struct { + // Request a read-only or write-only object. The default is a read-write + // object. Only one of the flags may be set. + ReadOnly bool + WriteOnly bool + + // Raw flags for the syscall. Other fields of this struct take precedence. + Flags uint32 +} + +// Marshal returns a value suitable for BPF_OBJ_GET syscall file_flags parameter. +func (lpo *LoadPinOptions) Marshal() uint32 { + if lpo == nil { + return 0 + } + + flags := lpo.Flags + if lpo.ReadOnly { + flags |= sys.BPF_F_RDONLY + } + if lpo.WriteOnly { + flags |= sys.BPF_F_WRONLY + } + return flags +} + +// BatchOptions batch map operations options +// +// Mirrors libbpf struct bpf_map_batch_opts +// Currently BPF_F_FLAG is the only supported +// flag (for ElemFlags). +type BatchOptions struct { + ElemFlags uint64 + Flags uint64 +} + +// LogLevel controls the verbosity of the kernel's eBPF program verifier. +// These constants can be used for the ProgramOptions.LogLevel field. +type LogLevel = sys.LogLevel + +const ( + // Print verifier state at branch points. + LogLevelBranch = sys.BPF_LOG_LEVEL1 + + // Print verifier state for every instruction. + // Available since Linux v5.2. + LogLevelInstruction = sys.BPF_LOG_LEVEL2 + + // Print verifier errors and stats at the end of the verification process. + // Available since Linux v5.2. + LogLevelStats = sys.BPF_LOG_STATS +) diff --git a/vendor/github.com/cilium/ebpf/types_string.go b/vendor/github.com/cilium/ebpf/types_string.go new file mode 100644 index 0000000000..f06685112c --- /dev/null +++ b/vendor/github.com/cilium/ebpf/types_string.go @@ -0,0 +1,123 @@ +// Code generated by "stringer -output types_string.go -type=MapType,ProgramType,PinType"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedMap-0] + _ = x[Hash-1] + _ = x[Array-2] + _ = x[ProgramArray-3] + _ = x[PerfEventArray-4] + _ = x[PerCPUHash-5] + _ = x[PerCPUArray-6] + _ = x[StackTrace-7] + _ = x[CGroupArray-8] + _ = x[LRUHash-9] + _ = x[LRUCPUHash-10] + _ = x[LPMTrie-11] + _ = x[ArrayOfMaps-12] + _ = x[HashOfMaps-13] + _ = x[DevMap-14] + _ = x[SockMap-15] + _ = x[CPUMap-16] + _ = x[XSKMap-17] + _ = x[SockHash-18] + _ = x[CGroupStorage-19] + _ = x[ReusePortSockArray-20] + _ = x[PerCPUCGroupStorage-21] + _ = x[Queue-22] + _ = x[Stack-23] + _ = x[SkStorage-24] + _ = x[DevMapHash-25] + _ = x[StructOpsMap-26] + _ = x[RingBuf-27] + _ = x[InodeStorage-28] + _ = x[TaskStorage-29] + _ = x[BloomFilter-30] + _ = x[UserRingbuf-31] + _ = x[CgroupStorage-32] + _ = x[Arena-33] +} + +const _MapType_name = "UnspecifiedMapHashArrayProgramArrayPerfEventArrayPerCPUHashPerCPUArrayStackTraceCGroupArrayLRUHashLRUCPUHashLPMTrieArrayOfMapsHashOfMapsDevMapSockMapCPUMapXSKMapSockHashCGroupStorageReusePortSockArrayPerCPUCGroupStorageQueueStackSkStorageDevMapHashStructOpsMapRingBufInodeStorageTaskStorageBloomFilterUserRingbufCgroupStorageArena" + +var _MapType_index = [...]uint16{0, 14, 18, 23, 35, 49, 59, 70, 80, 91, 98, 108, 115, 126, 136, 142, 149, 155, 161, 169, 182, 200, 219, 224, 229, 238, 248, 260, 267, 279, 290, 301, 312, 325, 330} + +func (i MapType) String() string { + if i >= MapType(len(_MapType_index)-1) { + return "MapType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _MapType_name[_MapType_index[i]:_MapType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[UnspecifiedProgram-0] + _ = x[SocketFilter-1] + _ = x[Kprobe-2] + _ = x[SchedCLS-3] + _ = x[SchedACT-4] + _ = x[TracePoint-5] + _ = x[XDP-6] + _ = x[PerfEvent-7] + _ = x[CGroupSKB-8] + _ = x[CGroupSock-9] + _ = x[LWTIn-10] + _ = x[LWTOut-11] + _ = x[LWTXmit-12] + _ = x[SockOps-13] + _ = x[SkSKB-14] + _ = x[CGroupDevice-15] + _ = x[SkMsg-16] + _ = x[RawTracepoint-17] + _ = x[CGroupSockAddr-18] + _ = x[LWTSeg6Local-19] + _ = x[LircMode2-20] + _ = x[SkReuseport-21] + _ = x[FlowDissector-22] + _ = x[CGroupSysctl-23] + _ = x[RawTracepointWritable-24] + _ = x[CGroupSockopt-25] + _ = x[Tracing-26] + _ = x[StructOps-27] + _ = x[Extension-28] + _ = x[LSM-29] + _ = x[SkLookup-30] + _ = x[Syscall-31] + _ = x[Netfilter-32] +} + +const _ProgramType_name = "UnspecifiedProgramSocketFilterKprobeSchedCLSSchedACTTracePointXDPPerfEventCGroupSKBCGroupSockLWTInLWTOutLWTXmitSockOpsSkSKBCGroupDeviceSkMsgRawTracepointCGroupSockAddrLWTSeg6LocalLircMode2SkReuseportFlowDissectorCGroupSysctlRawTracepointWritableCGroupSockoptTracingStructOpsExtensionLSMSkLookupSyscallNetfilter" + +var _ProgramType_index = [...]uint16{0, 18, 30, 36, 44, 52, 62, 65, 74, 83, 93, 98, 104, 111, 118, 123, 135, 140, 153, 167, 179, 188, 199, 212, 224, 245, 258, 265, 274, 283, 286, 294, 301, 310} + +func (i ProgramType) String() string { + if i >= ProgramType(len(_ProgramType_index)-1) { + return "ProgramType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProgramType_name[_ProgramType_index[i]:_ProgramType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[PinNone-0] + _ = x[PinByName-1] +} + +const _PinType_name = "PinNonePinByName" + +var _PinType_index = [...]uint8{0, 7, 16} + +func (i PinType) String() string { + if i >= PinType(len(_PinType_index)-1) { + return "PinType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _PinType_name[_PinType_index[i]:_PinType_index[i+1]] +} diff --git a/vendor/github.com/cilium/ebpf/variable.go b/vendor/github.com/cilium/ebpf/variable.go new file mode 100644 index 0000000000..288b173a11 --- /dev/null +++ b/vendor/github.com/cilium/ebpf/variable.go @@ -0,0 +1,230 @@ +package ebpf + +import ( + "fmt" + "io" + + "github.com/cilium/ebpf/btf" + "github.com/cilium/ebpf/internal/sysenc" +) + +// VariableSpec is a convenience wrapper for modifying global variables of a +// CollectionSpec before loading it into the kernel. +// +// All operations on a VariableSpec's underlying MapSpec are performed in the +// host's native endianness. +type VariableSpec struct { + name string + offset uint64 + size uint64 + + m *MapSpec + t *btf.Var +} + +// Set sets the value of the VariableSpec to the provided input using the host's +// native endianness. +func (s *VariableSpec) Set(in any) error { + buf, err := sysenc.Marshal(in, int(s.size)) + if err != nil { + return fmt.Errorf("marshaling value %s: %w", s.name, err) + } + + b, _, err := s.m.dataSection() + if err != nil { + return fmt.Errorf("getting data section of map %s: %w", s.m.Name, err) + } + + if int(s.offset+s.size) > len(b) { + return fmt.Errorf("offset %d(+%d) for variable %s is out of bounds", s.offset, s.size, s.name) + } + + // MapSpec.Copy() performs a shallow copy. Fully copy the byte slice + // to avoid any changes affecting other copies of the MapSpec. + cpy := make([]byte, len(b)) + copy(cpy, b) + + buf.CopyTo(cpy[s.offset : s.offset+s.size]) + + s.m.Contents[0] = MapKV{Key: uint32(0), Value: cpy} + + return nil +} + +// Get writes the value of the VariableSpec to the provided output using the +// host's native endianness. +func (s *VariableSpec) Get(out any) error { + b, _, err := s.m.dataSection() + if err != nil { + return fmt.Errorf("getting data section of map %s: %w", s.m.Name, err) + } + + if int(s.offset+s.size) > len(b) { + return fmt.Errorf("offset %d(+%d) for variable %s is out of bounds", s.offset, s.size, s.name) + } + + if err := sysenc.Unmarshal(out, b[s.offset:s.offset+s.size]); err != nil { + return fmt.Errorf("unmarshaling value: %w", err) + } + + return nil +} + +// Size returns the size of the variable in bytes. +func (s *VariableSpec) Size() uint64 { + return s.size +} + +// MapName returns the name of the underlying MapSpec. +func (s *VariableSpec) MapName() string { + return s.m.Name +} + +// Offset returns the offset of the variable in the underlying MapSpec. +func (s *VariableSpec) Offset() uint64 { + return s.offset +} + +// Constant returns true if the VariableSpec represents a variable that is +// read-only from the perspective of the BPF program. +func (s *VariableSpec) Constant() bool { + return s.m.readOnly() +} + +// Type returns the [btf.Var] representing the variable in its data section. +// This is useful for inspecting the variable's decl tags and the type +// information of the inner type. +// +// Returns nil if the original ELF object did not contain BTF information. +func (s *VariableSpec) Type() *btf.Var { + return s.t +} + +func (s *VariableSpec) String() string { + return fmt.Sprintf("%s (type=%v, map=%s, offset=%d, size=%d)", s.name, s.t, s.m.Name, s.offset, s.size) +} + +// copy returns a new VariableSpec with the same values as the original, +// but with a different underlying MapSpec. This is useful when copying a +// CollectionSpec. Returns nil if a MapSpec with the same name is not found. +func (s *VariableSpec) copy(cpy *CollectionSpec) *VariableSpec { + out := &VariableSpec{ + name: s.name, + offset: s.offset, + size: s.size, + } + if s.t != nil { + out.t = btf.Copy(s.t).(*btf.Var) + } + + // Attempt to find a MapSpec with the same name in the copied CollectionSpec. + for _, m := range cpy.Maps { + if m.Name == s.m.Name { + out.m = m + return out + } + } + + return nil +} + +// Variable is a convenience wrapper for modifying global variables of a +// Collection after loading it into the kernel. Operations on a Variable are +// performed using direct memory access, bypassing the BPF map syscall API. +// +// On kernels older than 5.5, most interactions with Variable return +// [ErrNotSupported]. +type Variable struct { + name string + offset uint64 + size uint64 + t *btf.Var + + mm *Memory +} + +func newVariable(name string, offset, size uint64, t *btf.Var, mm *Memory) (*Variable, error) { + if mm != nil { + if int(offset+size) > mm.Size() { + return nil, fmt.Errorf("offset %d(+%d) is out of bounds", offset, size) + } + } + + return &Variable{ + name: name, + offset: offset, + size: size, + t: t, + mm: mm, + }, nil +} + +// Size returns the size of the variable. +func (v *Variable) Size() uint64 { + return v.size +} + +// ReadOnly returns true if the Variable represents a variable that is read-only +// after loading the Collection into the kernel. +// +// On systems without BPF_F_MMAPABLE support, ReadOnly always returns true. +func (v *Variable) ReadOnly() bool { + if v.mm == nil { + return true + } + return v.mm.ReadOnly() +} + +// Type returns the [btf.Var] representing the variable in its data section. +// This is useful for inspecting the variable's decl tags and the type +// information of the inner type. +// +// Returns nil if the original ELF object did not contain BTF information. +func (v *Variable) Type() *btf.Var { + return v.t +} + +func (v *Variable) String() string { + return fmt.Sprintf("%s (type=%v)", v.name, v.t) +} + +// Set the value of the Variable to the provided input. The input must marshal +// to the same length as the size of the Variable. +func (v *Variable) Set(in any) error { + if v.mm == nil { + return fmt.Errorf("variable %s: direct access requires Linux 5.5 or later: %w", v.name, ErrNotSupported) + } + + if v.ReadOnly() { + return fmt.Errorf("variable %s: %w", v.name, ErrReadOnly) + } + + buf, err := sysenc.Marshal(in, int(v.size)) + if err != nil { + return fmt.Errorf("marshaling value %s: %w", v.name, err) + } + + if _, err := v.mm.WriteAt(buf.Bytes(), int64(v.offset)); err != nil { + return fmt.Errorf("writing value to %s: %w", v.name, err) + } + + return nil +} + +// Get writes the value of the Variable to the provided output. The output must +// be a pointer to a value whose size matches the Variable. +func (v *Variable) Get(out any) error { + if v.mm == nil { + return fmt.Errorf("variable %s: direct access requires Linux 5.5 or later: %w", v.name, ErrNotSupported) + } + + if !v.mm.bounds(v.offset, v.size) { + return fmt.Errorf("variable %s: access out of bounds: %w", v.name, io.EOF) + } + + if err := sysenc.Unmarshal(out, v.mm.b[v.offset:v.offset+v.size]); err != nil { + return fmt.Errorf("unmarshaling value %s: %w", v.name, err) + } + + return nil +} diff --git a/vendor/github.com/cilium/hive/.gitignore b/vendor/github.com/cilium/hive/.gitignore new file mode 100644 index 0000000000..63403c7e2d --- /dev/null +++ b/vendor/github.com/cilium/hive/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.so.* +*.d + +# Folders +_obj +_test +_build/ + +# Architecture specific extensions/prefixes +*.exe +*.test +*.prof + +.DS_Store +.idea/ +.vscode/* +!.vscode/launch.json +!.vscode/extensions.json +*.plist + +*_bash_completion +*.swo diff --git a/vendor/github.com/cilium/hive/CODEOWNERS b/vendor/github.com/cilium/hive/CODEOWNERS new file mode 100644 index 0000000000..1031f9db44 --- /dev/null +++ b/vendor/github.com/cilium/hive/CODEOWNERS @@ -0,0 +1,12 @@ +# Code owners groups assigned to this repository and a brief description of their areas: +# @cilium/ci-structure Continuous integration, testing +# @cilium/contributing Developer documentation & tools +# @cilium/github-sec GitHub security (handling of secrets, consequences of pull_request_target, etc.) +# @cilium/sig-foundations Core libraries and guidance to overall software architecture. + +# The following filepaths should be sorted so that more specific paths occur +# after the less specific paths, otherwise the ownership for the specific paths +# is not properly picked up in Github. +* @cilium/sig-foundations +/.github/workflows/ @cilium/github-sec @cilium/ci-structure @cilium/sig-foundations +/CODEOWNERS @cilium/contributing @cilium/sig-foundations diff --git a/vendor/k8s.io/component-base/LICENSE b/vendor/github.com/cilium/hive/LICENSE similarity index 99% rename from vendor/k8s.io/component-base/LICENSE rename to vendor/github.com/cilium/hive/LICENSE index d645695673..a2e486a803 100644 --- a/vendor/k8s.io/component-base/LICENSE +++ b/vendor/github.com/cilium/hive/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -179,7 +178,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -187,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} Authors of Cilium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/vendor/github.com/cilium/hive/Makefile b/vendor/github.com/cilium/hive/Makefile new file mode 100644 index 0000000000..3e17ea11fe --- /dev/null +++ b/vendor/github.com/cilium/hive/Makefile @@ -0,0 +1,15 @@ +.PHONY: all build test test-race bench + +all: build test test-race bench + +build: + go build ./... + +test: + go test ./... -cover -test.count 1 + +test-race: + go test -race ./... -test.count 1 + +bench: + go test ./... -bench . -test.run xxx diff --git a/vendor/github.com/cilium/hive/README.md b/vendor/github.com/cilium/hive/README.md new file mode 100644 index 0000000000..60701fabd6 --- /dev/null +++ b/vendor/github.com/cilium/hive/README.md @@ -0,0 +1,14 @@ +# :bee: Hive [![GoDoc](https://pkg.go.dev/badge/github.com/cilium/hive)](https://pkg.go.dev/github.com/cilium/hive) + +Hive is a dependency injection framework for Go. To build an application +in Hive you tell it your object constructors and then ask it to invoke +functions that make use of those constructors. Hive figures out what constructors +to call and in what order. + +Hive is built on top of `uber/dig` and is similar to `uber/fx`. +The main difference to `uber/fx` is opinionated way to provide configuration +and command-line inspection tooling (`go run ./example hive`). Hive was built +for the needs of the Cilium project to improve modularity of the Cilium codebase. + +To get started, see the [documentation](https://pkg.go.dev/github.com/cilium/hive) +and explore the [example](example). diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/cell.go b/vendor/github.com/cilium/hive/cell/cell.go similarity index 90% rename from vendor/github.com/cilium/cilium/pkg/hive/cell/cell.go rename to vendor/github.com/cilium/hive/cell/cell.go index 0529919e71..652a92833c 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/cell.go +++ b/vendor/github.com/cilium/hive/cell/cell.go @@ -5,13 +5,6 @@ package cell import ( "go.uber.org/dig" - - "github.com/cilium/cilium/pkg/logging" - "github.com/cilium/cilium/pkg/logging/logfields" -) - -var ( - log = logging.DefaultLogger.WithField(logfields.LogSubsys, "hive") ) // Cell is the modular building block of the hive. @@ -28,7 +21,7 @@ type Cell interface { Info(container) Info // Apply the cell to the dependency graph container. - Apply(container) error + Apply(container, rootContainer) error } // In when embedded into a struct used as constructor parameter makes the exported @@ -56,3 +49,5 @@ type container interface { Decorate(fn any, opts ...dig.DecorateOption) error Scope(name string, opts ...dig.ScopeOption) *dig.Scope } + +type rootContainer = container diff --git a/vendor/github.com/cilium/hive/cell/config.go b/vendor/github.com/cilium/hive/cell/config.go new file mode 100644 index 0000000000..3662da33f3 --- /dev/null +++ b/vendor/github.com/cilium/hive/cell/config.go @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cell + +import ( + "fmt" + "reflect" + "strings" + + "github.com/cilium/hive/internal" + "github.com/mitchellh/mapstructure" + "github.com/spf13/pflag" + "go.uber.org/dig" +) + +// Config constructs a new config cell. +// +// The configuration struct `T` needs to implement the Flags method that +// registers the flags. The structure is populated and provided via dependency +// injection by Hive.Run(). The underlying mechanism for populating the struct +// is viper's Unmarshal(). +func Config[Cfg Flagger](def Cfg) Cell { + return &config[Cfg]{defaultConfig: def} +} + +// Flagger is implemented by configuration structs to provide configuration +// for a cell. +type Flagger interface { + // Flags registers the configuration options as command-line flags. + // + // By convention a flag name matches the field name + // if they're the same under case-insensitive comparison when dashes are + // removed. E.g. "my-config-flag" matches field "MyConfigFlag". The + // correspondence to the flag can be also specified with the mapstructure + // tag: MyConfigFlag `mapstructure:"my-config-flag"`. + // + // Exported fields that are not found from the viper settings will cause + // hive.Run() to fail. Unexported fields are ignored. + // + // See https://pkg.go.dev/github.com/mitchellh/mapstructure for more info. + Flags(*pflag.FlagSet) +} + +// config is a cell for configuration. It registers the config's command-line +// flags and provides the parsed config to the hive. +type config[Cfg Flagger] struct { + defaultConfig Cfg +} + +type AllSettings map[string]any + +type DecodeHooks []mapstructure.DecodeHookFunc + +type configParams[Cfg Flagger] struct { + dig.In + AllSettings AllSettings + DecodeHooks DecodeHooks `optional:"true"` + Override func(*Cfg) `optional:"true"` +} + +func provideConfig[Cfg Flagger](defaultConfig Cfg, flags *pflag.FlagSet) func(p configParams[Cfg]) (Cfg, error) { + return func(p configParams[Cfg]) (Cfg, error) { + settings := p.AllSettings + target := defaultConfig + decoder, err := mapstructure.NewDecoder(decoderConfig(&target, p.DecodeHooks)) + if err != nil { + return target, fmt.Errorf("failed to create config decoder: %w", err) + } + + // As input, only consider the declared flags. + input := make(map[string]any) + + flags.VisitAll(func(f *pflag.Flag) { + if v, ok := settings[f.Name]; ok { + input[f.Name] = v + } else { + err = fmt.Errorf("internal error: %s not found from settings", f.Name) + } + }) + if err != nil { + return target, err + } + if err := decoder.Decode(input); err != nil { + return target, fmt.Errorf("failed to unmarshal config struct %T: %w.\n"+ + "Hint: field 'FooBar' matches flag 'foo-bar', or use tag `mapstructure:\"flag-name\"` to match field with flag", + target, err) + } + + // See if the configuration was overridden with ConfigOverride. We check the override + // after the decode to validate that the config struct is properly formed and all + // flags are registered. + if p.Override != nil { + p.Override(&target) + } + + return target, nil + } +} + +func decoderConfig(target any, extraHooks DecodeHooks) *mapstructure.DecoderConfig { + decodeHooks := []mapstructure.DecodeHookFunc{ + // To unify the splitting of fields of a []string field across the input coming + // from environment, configmap and pflag (command-line), we first split a string + // (env/configmap) by comma, and then for all input methods we split a single + // value []string by whitespace. Thus the following all result in the same slice: + // + // --string-slice=foo,bar,baz + // --string-slice="foo bar baz" + // CILIUM_STRING_SLICE="foo,bar,baz" + // CILIUM_STRING_SLICE="foo bar baz" + // /.../configmap/string_slice: "foo bar baz" + // /.../configmap/string_slice: "foo,bar,baz" + // + // If both commas and whitespaces are present the commas take precedence: + // "foo,bar baz" => []string{"foo", "bar baz"} + mapstructure.StringToSliceHookFunc(","), // string->[]string is split by comma + fixupStringSliceHookFunc, // []string of length 1 is split again by whitespace + + mapstructure.StringToTimeDurationHookFunc(), + stringToMapHookFunc, + } + decodeHooks = append(decodeHooks, extraHooks...) + + return &mapstructure.DecoderConfig{ + Metadata: nil, + Result: target, + WeaklyTypedInput: true, + DecodeHook: mapstructure.ComposeDecodeHookFunc(decodeHooks...), + ZeroFields: true, + // Error out if the config struct has fields that are + // not found from input. + ErrorUnset: true, + // Error out also if settings from input are not used. + ErrorUnused: true, + // Match field FooBarBaz with "foo-bar-baz" by removing + // the dashes from the flag. + MatchName: func(mapKey, fieldName string) bool { + return strings.EqualFold( + strings.ReplaceAll(mapKey, "-", ""), + fieldName) + }, + } +} + +func (c *config[Cfg]) Apply(cont container, _ rootContainer) error { + flags := pflag.NewFlagSet("", pflag.ContinueOnError) + c.defaultConfig.Flags(flags) + + // Register the flags to the global set of all flags. + err := cont.Invoke( + func(allFlags *pflag.FlagSet) { + allFlags.AddFlagSet(flags) + }) + if err != nil { + return err + } + // And provide the constructor for the config. + return cont.Provide( + provideConfig(c.defaultConfig, flags), + dig.Export(true), + ) +} + +func (c *config[Cfg]) Info(cont container) (info Info) { + cont.Invoke(func(cfg Cfg) { + info = &InfoStruct{cfg} + }) + return +} + +// stringToMapHookFunc is a DecodeHookFunc that converts string +// to map[string]string supporting both json and KV formats. +func stringToMapHookFunc(from reflect.Kind, to reflect.Kind, data interface{}) (interface{}, error) { + if from != reflect.String || to != reflect.Map { + return data, nil + } + return internal.ToStringMapStringE(data.(string)) +} + +// fixupStringSliceHookFunc takes a []string and if it's a single element splits it again +// by whitespace. This unifies the flag parsing behavior with StringSlice +// values coming from environment or configmap where both spaces or commas can be used to split. +func fixupStringSliceHookFunc(from reflect.Type, to reflect.Type, data interface{}) (interface{}, error) { + if from.Kind() != reflect.Slice || to.Kind() != reflect.Slice { + return data, nil + } + if from.Elem().Kind() != reflect.String || to.Elem().Kind() != reflect.String { + return data, nil + } + + raw := data.([]string) + if len(raw) == 1 { + // Flag was already split by commas (the default behavior), so split it + // now by spaces. + return strings.Fields(raw[0]), nil + } + return raw, nil +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/decorator.go b/vendor/github.com/cilium/hive/cell/decorator.go similarity index 56% rename from vendor/github.com/cilium/cilium/pkg/hive/cell/decorator.go rename to vendor/github.com/cilium/hive/cell/decorator.go index fcba55aa2a..2aafd467da 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/decorator.go +++ b/vendor/github.com/cilium/hive/cell/decorator.go @@ -6,7 +6,7 @@ package cell import ( "fmt" - "github.com/cilium/cilium/pkg/hive/internal" + "github.com/cilium/hive/internal" ) // Decorate takes a decorator function and a set of cells and returns @@ -38,14 +38,14 @@ type decorator struct { cells []Cell } -func (d *decorator) Apply(c container) error { +func (d *decorator) Apply(c container, rc rootContainer) error { scope := c.Scope(fmt.Sprintf("(decorate %s)", internal.PrettyType(d.decorator))) if err := scope.Decorate(d.decorator); err != nil { return err } for _, cell := range d.cells { - if err := cell.Apply(scope); err != nil { + if err := cell.Apply(scope, rc); err != nil { return err } } @@ -60,3 +60,37 @@ func (d *decorator) Info(c container) Info { } return n } + +// DecorateAll takes a decorator function and applies the decoration globally. +// +// Example: +// +// cell.Module( +// "my-app", +// "My application", +// foo.Cell, // provides foo.Foo +// bar.Cell, +// +// // Wrap 'foo.Foo' everywhere, including inside foo.Cell. +// cell.DecorateAll( +// func(f foo.Foo) foo.Foo { +// return myFooWrapper{f} +// }, +// ), +// ) +func DecorateAll(dtor any) Cell { + return &allDecorator{dtor} +} + +type allDecorator struct { + decorator any +} + +func (d *allDecorator) Apply(_ container, rc rootContainer) error { + return rc.Decorate(d.decorator) +} + +func (d *allDecorator) Info(_ container) Info { + n := NewInfoNode(fmt.Sprintf("🔀* %s: %s", internal.FuncNameAndLocation(d.decorator), internal.PrettyType(d.decorator))) + return n +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/group.go b/vendor/github.com/cilium/hive/cell/group.go similarity index 80% rename from vendor/github.com/cilium/cilium/pkg/hive/cell/group.go rename to vendor/github.com/cilium/hive/cell/group.go index f2e60b12c6..c608683347 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/group.go +++ b/vendor/github.com/cilium/hive/cell/group.go @@ -11,9 +11,9 @@ func Group(cells ...Cell) Cell { return group(cells) } -func (g group) Apply(c container) error { +func (g group) Apply(c container, rc rootContainer) error { for _, cell := range g { - if err := cell.Apply(c); err != nil { + if err := cell.Apply(c, rc); err != nil { return err } } diff --git a/vendor/github.com/cilium/hive/cell/health.go b/vendor/github.com/cilium/hive/cell/health.go new file mode 100644 index 0000000000..aace1e897e --- /dev/null +++ b/vendor/github.com/cilium/hive/cell/health.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cell + +// Level denotes what kind an update is. +type Level string + +const ( + // StatusUnknown is the default status of a Module, prior to it reporting + // any status. + // All created + StatusUnknown Level = "Unknown" + + // StatusStopped is the status of a Module that has completed, further updates + // will not be processed. + StatusStopped Level = "Stopped" + + // StatusDegraded is the status of a Module that has entered a degraded state. + StatusDegraded Level = "Degraded" + + // StatusOK is the status of a Module that has achieved a desired state. + StatusOK Level = "OK" +) + +// Health provides a method of declaring a Modules health status. +// +// The interface is meant to be used with "ModuleDecorator" to inject it into +// the scope of modules. +// +// Implementation for health reporting is not included with the Hive library. +type Health interface { + // OK declares that a Module has achieved a desired state and has not entered + // any unexpected or incorrect states. + // Modules should only declare themselves as 'OK' once they have stabilized, + // rather than during their initial state. This should be left to be reported + // as the default "unknown" to denote that the module has not reached a "ready" + // health state. + OK(status string) + + // Stopped reports that a module has completed, and will no longer report any + // health status. + // Implementations should differentiate that a stopped module may also be OK or Degraded. + // Stopping a reporting should only affect future updates. + Stopped(reason string) + + // Degraded declares that a module has entered a degraded state. + // This means that it may have failed to provide it's intended services, or + // to perform it's desired task. + Degraded(reason string, err error) + + // NewScope constructs a new scoped health reporter. + NewScope(name string) Health + + // Close closes this health scope and removes it. This is distinct from + // 'Stopped' in that after closing the health status will disappear completely. + Close() +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/info.go b/vendor/github.com/cilium/hive/cell/info.go similarity index 100% rename from vendor/github.com/cilium/cilium/pkg/hive/cell/info.go rename to vendor/github.com/cilium/hive/cell/info.go diff --git a/vendor/github.com/cilium/hive/cell/invoke.go b/vendor/github.com/cilium/hive/cell/invoke.go new file mode 100644 index 0000000000..739b5b62f4 --- /dev/null +++ b/vendor/github.com/cilium/hive/cell/invoke.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cell + +import ( + "fmt" + "log/slog" + "slices" + "strings" + "sync" + "time" + + "go.uber.org/dig" + + "github.com/cilium/hive/internal" +) + +type invoker struct { + funcs []namedFunc +} + +type namedFunc struct { + name string + fn any + + infoMu sync.Mutex + info *dig.InvokeInfo +} + +type InvokerList interface { + AppendInvoke(func(*slog.Logger, time.Duration) error) +} + +func (inv *invoker) invoke(log *slog.Logger, cont container, logThreshold time.Duration) error { + for i := range inv.funcs { + nf := &inv.funcs[i] + log.Debug("Invoking", "function", nf.name) + t0 := time.Now() + + var opts []dig.InvokeOption + nf.infoMu.Lock() + if nf.info == nil { + nf.info = &dig.InvokeInfo{} + opts = []dig.InvokeOption{ + dig.FillInvokeInfo(nf.info), + } + } + defer inv.funcs[i].infoMu.Unlock() + + if err := cont.Invoke(nf.fn, opts...); err != nil { + log.Error("Invoke failed", "error", err, "function", nf.name) + return err + } + d := time.Since(t0) + if d > logThreshold { + log.Info("Invoked", "duration", d, "function", nf.name) + } else { + log.Debug("Invoked", "duration", d, "function", nf.name) + } + } + return nil +} + +func (inv *invoker) Apply(c container, _ rootContainer) error { + // Remember the scope in which we need to invoke. + invoker := func(log *slog.Logger, logThreshold time.Duration) error { return inv.invoke(log, c, logThreshold) } + + // Append the invoker to the list of invoke functions. These are invoked + // prior to start to build up the objects. They are not invoked directly + // here as first the configuration flags need to be registered. This allows + // using hives in a command-line application with many commands and where + // we don't yet know which command to run, but we still need to register + // all the flags. + return c.Invoke(func(l InvokerList) { + l.AppendInvoke(invoker) + }) +} + +func (inv *invoker) Info(container) Info { + n := NewInfoNode("") + for i := range inv.funcs { + namedFunc := &inv.funcs[i] + namedFunc.infoMu.Lock() + defer namedFunc.infoMu.Unlock() + + invNode := NewInfoNode(fmt.Sprintf("🛠️ %s", namedFunc.name)) + invNode.condensed = true + + ins := make([]string, 0, len(namedFunc.info.Inputs)) + for _, input := range namedFunc.info.Inputs { + ins = append(ins, input.String()) + } + slices.Sort(ins) + invNode.AddLeaf("⇨ %s", strings.Join(ins, ", ")) + n.Add(invNode) + } + return n +} + +// Invoke constructs a cell for invoke functions. The invoke functions are executed +// when the hive is started to instantiate all objects via the constructors. +func Invoke(funcs ...any) Cell { + namedFuncs := []namedFunc{} + for _, fn := range funcs { + namedFuncs = append( + namedFuncs, + namedFunc{name: internal.FuncNameAndLocation(fn), fn: fn}) + } + return &invoker{funcs: namedFuncs} +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/lifecycle.go b/vendor/github.com/cilium/hive/cell/lifecycle.go similarity index 62% rename from vendor/github.com/cilium/cilium/pkg/hive/lifecycle.go rename to vendor/github.com/cilium/hive/cell/lifecycle.go index 412e30fbb5..a41f75ee2c 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/lifecycle.go +++ b/vendor/github.com/cilium/hive/cell/lifecycle.go @@ -1,17 +1,17 @@ // SPDX-License-Identifier: Apache-2.0 // Copyright Authors of Cilium -package hive +package cell import ( "context" + "errors" "fmt" + "log/slog" + "sync" "time" - "go.uber.org/multierr" - - "github.com/cilium/cilium/pkg/hive/internal" - "github.com/cilium/cilium/pkg/lock" + "github.com/cilium/hive/internal" ) // HookContext is a context passed to a lifecycle hook that is cancelled @@ -20,6 +20,13 @@ import ( // initialize) must abort any such operation if this context is cancelled. type HookContext context.Context +// HookInterface wraps the Start and Stop methods that can be appended +// to an application lifecycle. +type HookInterface interface { + Start(HookContext) error + Stop(HookContext) error +} + // Hook is a pair of start and stop callbacks. Both are optional. // They're paired up to make sure that on failed start all corresponding // stop hooks are executed. @@ -42,44 +49,53 @@ func (h Hook) Stop(ctx HookContext) error { return h.OnStop(ctx) } -type HookInterface interface { - // Start hook is called when the hive is started. - // Returning a non-nil error causes the start to abort and - // the stop hooks for already started cells to be called. - // - // The context is valid only for the duration of the start - // and is used to allow aborting of start hook on timeout. - Start(HookContext) error - - // Stop hook is called when the hive is stopped or start aborted. - // Returning a non-nil error does not abort stopping. The error - // is recorded and rest of the stop hooks are executed. - Stop(HookContext) error -} - // Lifecycle enables cells to register start and stop hooks, either // from a constructor or an invoke function. type Lifecycle interface { Append(HookInterface) + + Start(*slog.Logger, context.Context) error + Stop(*slog.Logger, context.Context) error + PrintHooks() } // DefaultLifecycle lifecycle implements a simple lifecycle management that conforms // to Lifecycle. It is exported for use in applications that have nested lifecycles // (e.g. operator). type DefaultLifecycle struct { - mu lock.Mutex - hooks []HookInterface + mu sync.Mutex + hooks []augmentedHook numStarted int + + LogThreshold time.Duration +} + +type augmentedHook struct { + HookInterface + moduleID FullModuleID +} + +func NewDefaultLifecycle(hooks []HookInterface, numStarted int, logThreshold time.Duration) *DefaultLifecycle { + h := make([]augmentedHook, 0, len(hooks)) + for _, hook := range hooks { + h = append(h, augmentedHook{hook, nil}) + } + return &DefaultLifecycle{ + mu: sync.Mutex{}, + hooks: h, + numStarted: numStarted, + LogThreshold: logThreshold, + } } func (lc *DefaultLifecycle) Append(hook HookInterface) { lc.mu.Lock() defer lc.mu.Unlock() - lc.hooks = append(lc.hooks, hook) + lc.hooks = append(lc.hooks, augmentedHook{hook, nil}) } -func (lc *DefaultLifecycle) Start(ctx context.Context) error { +func (lc *DefaultLifecycle) Start(log *slog.Logger, ctx context.Context) error { lc.mu.Lock() defer lc.mu.Unlock() @@ -89,7 +105,7 @@ func (lc *DefaultLifecycle) Start(ctx context.Context) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - for _, hook := range lc.hooks { + for i, hook := range lc.hooks { fnName, exists := getHookFuncName(hook, true) if !exists { @@ -98,21 +114,32 @@ func (lc *DefaultLifecycle) Start(ctx context.Context) error { continue } - l := log.WithField("function", fnName) + l := log.With("function", fnName) + + // Do not attempt to start already started hooks. + if i < lc.numStarted { + l.Error("Hook appears to be running. Skipping") + continue + } + l.Debug("Executing start hook") t0 := time.Now() if err := hook.Start(ctx); err != nil { - l.WithError(err).Error("Start hook failed") + l.Error("Start hook failed", "error", err) return err } d := time.Since(t0) - l.WithField("duration", d).Info("Start hook executed") + if d > lc.LogThreshold { + l.Info("Start hook executed", "duration", d) + } else { + l.Debug("Start hook executed", "duration", d) + } lc.numStarted++ } return nil } -func (lc *DefaultLifecycle) Stop(ctx context.Context) error { +func (lc *DefaultLifecycle) Stop(log *slog.Logger, ctx context.Context) error { lc.mu.Lock() defer lc.mu.Unlock() @@ -122,7 +149,7 @@ func (lc *DefaultLifecycle) Stop(ctx context.Context) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - var errs []error + var errs error for ; lc.numStarted > 0; lc.numStarted-- { if ctx.Err() != nil { return ctx.Err() @@ -133,18 +160,22 @@ func (lc *DefaultLifecycle) Stop(ctx context.Context) error { if !exists { continue } - l := log.WithField("function", fnName) + l := log.With("function", fnName) l.Debug("Executing stop hook") t0 := time.Now() if err := hook.Stop(ctx); err != nil { - l.WithError(err).Error("Stop hook failed") - errs = append(errs, err) + l.Error("Stop hook failed", "error", err) + errs = errors.Join(errs, err) } else { d := time.Since(t0) - l.WithField("duration", d).Info("Stop hook executed") + if d > lc.LogThreshold { + l.Info("Stop hook executed", "duration", d) + } else { + l.Debug("Stop hook executed", "duration", d) + } } } - return multierr.Combine(errs...) + return errs } func (lc *DefaultLifecycle) PrintHooks() { @@ -153,27 +184,39 @@ func (lc *DefaultLifecycle) PrintHooks() { fmt.Printf("Start hooks:\n\n") for _, hook := range lc.hooks { - fnName, exists := getHookFuncName(hook, true) + fnName, exists := getHookFuncName(hook.HookInterface, true) if !exists { continue } - fmt.Printf(" • %s\n", fnName) + fmt.Printf(" • %s (%s)\n", fnName, hook.moduleID) } fmt.Printf("\nStop hooks:\n\n") for i := len(lc.hooks) - 1; i >= 0; i-- { hook := lc.hooks[i] - fnName, exists := getHookFuncName(hook, false) + fnName, exists := getHookFuncName(hook.HookInterface, false) if !exists { continue } - fmt.Printf(" • %s\n", fnName) + fmt.Printf(" • %s (%s)\n", fnName, hook.moduleID) } } +type augmentedLifecycle struct { + *DefaultLifecycle + moduleID FullModuleID +} + +func (lc augmentedLifecycle) Append(hook HookInterface) { + lc.mu.Lock() + defer lc.mu.Unlock() + + lc.hooks = append(lc.hooks, augmentedHook{hook, lc.moduleID}) +} + func getHookFuncName(hook HookInterface, start bool) (name string, hasHook bool) { // Ok, we need to get a bit fancy here as runtime.FuncForPC does - // not return what we want: we get "hive.Hook.Stop()" when we want + // not return what we want: we get "cell.Hook.Stop()" when we want // "*foo.Stop(). We do know the concrete type, and we do know // the method name, so we check here whether we're dealing with // "Hook" the struct, or an object implementing HookInterface. @@ -183,6 +226,12 @@ func getHookFuncName(hook HookInterface, start bool) (name string, hasHook bool) // and the type params would be missing, so instead we'll just use the // type name + method name. switch hook := hook.(type) { + case augmentedHook: + name, hasHook = getHookFuncName(hook.HookInterface, start) + if hasHook && len(hook.moduleID) > 0 { + name = name + " (" + hook.moduleID.String() + ")" + } + return case Hook: if start { if hook.OnStart == nil { diff --git a/vendor/github.com/cilium/hive/cell/module.go b/vendor/github.com/cilium/hive/cell/module.go new file mode 100644 index 0000000000..f2c546d219 --- /dev/null +++ b/vendor/github.com/cilium/hive/cell/module.go @@ -0,0 +1,206 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cell + +import ( + "fmt" + "log/slog" + "regexp" + "slices" + "strings" + + "go.uber.org/dig" +) + +// Module creates a scoped set of cells with a given identifier. +// +// The id and description will be included in the object dump (hive.PrintObjects). +// The id must be lower-case, at most 30 characters and only contain [a-z0-9-_]. +// The description can contain [a-zA-Z0-9_- ] and must be shorter than 80 characters. +// +// As the description will be shown alongside the id, it should not repeat the id, but +// rather expand on it, for example; +// +// endpoint-manager: Manages and provides access to endpoints +// ^- id ^- description +// +// Private constructors with a module (ProvidePrivate) are only accessible +// within this module and its sub-modules. +func Module(id, description string, cells ...Cell) Cell { + validateIDAndDescription(id, description) + return &module{id, description, cells} +} + +// ModuleID is the module identifier. Provided in the module's scope. +type ModuleID string + +// FullModuleID is the fully qualified module identifier, e.g. the +// concat of nested module ids, e.g. "agent.controlplane.endpoint-manager". +// Provided in the module's scope. +type FullModuleID []string + +func (f FullModuleID) String() string { + return strings.Join(f, ".") +} + +func (f FullModuleID) append(m ModuleID) FullModuleID { + return append(slices.Clone(f), string(m)) +} + +// RootLogger is the unscoped logger without any attrs added to it. +type RootLogger *slog.Logger + +// ModuleDecorator is the optional decorator function used for each module +// to provide or replace objects in each module's scope. +// Supplied with [hive.Options] field 'ModuleDecorators'. +// +// This can be used to provide module-specific instances of objects application +// wide, similar to how *slog.Logger is provided by default. +type ModuleDecorator any + +type ModuleDecorators []ModuleDecorator + +// ModulePrivateProvider is the optional private provide function used for each module +// to provide objects in each module's scope. +// Supplied with [hive.Options] field 'ModulePrivateProviders'. +// +// This is different from a [ModuleDecorator] in that this can be used to provide objects +// that do not yet exist in the object graph, whereas [ModuleDecorator] requires that the +// objects that are being decorated already exist. + +type ModulePrivateProvider any + +type ModulePrivateProviders []ModulePrivateProvider + +var ( + idRegex = regexp.MustCompile(`^[a-z][a-z0-9_\-]{1,30}$`) + descriptionRegex = regexp.MustCompile(`^[a-zA-Z0-9_\- ]{1,80}$`) +) + +func validateIDAndDescription(id, description string) { + if !idRegex.MatchString(id) { + panic(fmt.Sprintf("Invalid hive.Module id: %q, expected to id match %s", id, idRegex)) + } + if !descriptionRegex.MatchString(description) { + panic(fmt.Sprintf("Invalid hive.Module description: %q, expected to match regex %q", description, descriptionRegex)) + } +} + +type module struct { + // id is the module identity. It is shown in object output and is used to derive + // the scoped logger. + id string + + // description is a human-readable short description for the module. Shown in object output + // alongside the identifier. + description string + + cells []Cell +} + +func (m *module) logger(moduleID FullModuleID, rootLog RootLogger) *slog.Logger { + return (*slog.Logger)(rootLog).With("module", moduleID.String()) +} + +func (m *module) moduleID() ModuleID { + return ModuleID(m.id) +} + +func (m *module) fullModuleID(parent FullModuleID) FullModuleID { + return parent.append(m.moduleID()) +} + +func (m *module) lifecycle(lc Lifecycle, fullID FullModuleID) Lifecycle { + switch lc := lc.(type) { + case *DefaultLifecycle: + return &augmentedLifecycle{ + lc, + fullID, + } + case *augmentedLifecycle: + return &augmentedLifecycle{ + lc.DefaultLifecycle, + fullID, + } + default: + return lc + } +} + +type moduleDecoratorParams struct { + In + ModuleDecorators ModuleDecorators +} + +func (m *module) moduleDecorators(scope *dig.Scope) error { + provide := func(p moduleDecoratorParams) error { + for _, d := range p.ModuleDecorators { + if err := scope.Decorate(d); err != nil { + return err + } + } + return nil + } + return scope.Invoke(provide) +} + +type modulePrivateProviderParams struct { + In + ModulePrivateProviders ModulePrivateProviders +} + +func (m *module) modulePrivateProviders(scope *dig.Scope) error { + provide := func(p modulePrivateProviderParams) error { + for _, d := range p.ModulePrivateProviders { + if err := scope.Provide(d); err != nil { + return err + } + } + return nil + } + return scope.Invoke(provide) +} + +func (m *module) Apply(c container, rc rootContainer) error { + scope := c.Scope(m.id) + + // Provide ModuleID and FullModuleID in the module's scope. + if err := scope.Provide(m.moduleID); err != nil { + return err + } + if err := scope.Decorate(m.fullModuleID); err != nil { + return err + } + + if err := scope.Decorate(m.lifecycle); err != nil { + return err + } + + if err := scope.Decorate(m.logger); err != nil { + return err + } + + if err := m.moduleDecorators(scope); err != nil { + return err + } + + if err := m.modulePrivateProviders(scope); err != nil { + return err + } + + for _, cell := range m.cells { + if err := cell.Apply(scope, rc); err != nil { + return err + } + } + return nil +} + +func (m *module) Info(c container) Info { + n := NewInfoNode("Ⓜ️ " + m.id + " (" + m.description + ")") + for _, cell := range m.cells { + n.Add(cell.Info(c)) + } + return n +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/cell/provide.go b/vendor/github.com/cilium/hive/cell/provide.go similarity index 67% rename from vendor/github.com/cilium/cilium/pkg/hive/cell/provide.go rename to vendor/github.com/cilium/hive/cell/provide.go index 6b21cc3c00..7f2b3bc8a2 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/cell/provide.go +++ b/vendor/github.com/cilium/hive/cell/provide.go @@ -5,25 +5,42 @@ package cell import ( "fmt" - "sort" + "slices" "strings" + "sync" "go.uber.org/dig" - "github.com/cilium/cilium/pkg/hive/internal" + "github.com/cilium/hive/internal" ) // provider is a set of constructors type provider struct { - ctors []any - infos []dig.ProvideInfo - export bool + ctors []any + infosMu sync.Mutex + infos []dig.ProvideInfo + export bool } -func (p *provider) Apply(c container) error { - p.infos = make([]dig.ProvideInfo, len(p.ctors)) +func (p *provider) Apply(c container, rc rootContainer) error { + // Since the same Provide cell may be used multiple times + // in different hives we use a mutex to protect it and we + // fill the provide info only the first time. + p.infosMu.Lock() + defer p.infosMu.Unlock() + + fillInfo := false + if p.infos == nil { + p.infos = make([]dig.ProvideInfo, len(p.ctors)) + fillInfo = true + } + for i, ctor := range p.ctors { - if err := c.Provide(ctor, dig.Export(p.export), dig.FillProvideInfo(&p.infos[i])); err != nil { + opts := []dig.ProvideOption{dig.Export(p.export)} + if fillInfo { + opts = append(opts, dig.FillProvideInfo(&p.infos[i])) + } + if err := c.Provide(ctor, opts...); err != nil { return err } } @@ -31,6 +48,9 @@ func (p *provider) Apply(c container) error { } func (p *provider) Info(container) Info { + p.infosMu.Lock() + defer p.infosMu.Unlock() + n := &InfoNode{} for i, ctor := range p.ctors { info := p.infos[i] @@ -42,15 +62,16 @@ func (p *provider) Info(container) Info { ctorNode := NewInfoNode(fmt.Sprintf("🚧%s %s", privateSymbol, internal.FuncNameAndLocation(ctor))) ctorNode.condensed = true - var ins, outs []string + ins := make([]string, 0, len(info.Inputs)) for _, input := range info.Inputs { - ins = append(ins, internal.TrimName(input.String())) + ins = append(ins, input.String()) } - sort.Strings(ins) + slices.Sort(ins) + outs := make([]string, 0, len(info.Outputs)) for _, output := range info.Outputs { - outs = append(outs, internal.TrimName(output.String())) + outs = append(outs, output.String()) } - sort.Strings(outs) + slices.Sort(outs) if len(ins) > 0 { ctorNode.AddLeaf("⇨ %s", strings.Join(ins, ", ")) } diff --git a/vendor/github.com/cilium/hive/cell/simple_health.go b/vendor/github.com/cilium/hive/cell/simple_health.go new file mode 100644 index 0000000000..60da0ef46d --- /dev/null +++ b/vendor/github.com/cilium/hive/cell/simple_health.go @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package cell + +import ( + "fmt" + "regexp" + "sync" + "time" + + "github.com/cilium/hive/script" +) + +type simpleHealthRoot struct { + sync.Mutex + all map[string]*SimpleHealth +} + +type SimpleHealth struct { + *simpleHealthRoot + + Scope string + Level Level + Status string + Error error +} + +// NewScope implements cell.Health. +func (h *SimpleHealth) NewScope(name string) Health { + h.Lock() + defer h.Unlock() + + h2 := &SimpleHealth{ + simpleHealthRoot: h.simpleHealthRoot, + Scope: h.Scope + "." + name, + } + h.all[name] = h2 + return h2 +} + +func (h *SimpleHealth) GetChild(fullName string) *SimpleHealth { + h.Lock() + defer h.Unlock() + + if child, ok := h.all[fullName]; ok { + return child + } + return nil +} + +// Degraded implements cell.Health. +func (h *SimpleHealth) Degraded(reason string, err error) { + h.Lock() + defer h.Unlock() + + h.Level = StatusDegraded + h.Status = reason + h.Error = err +} + +// OK implements cell.Health. +func (h *SimpleHealth) OK(status string) { + h.Lock() + defer h.Unlock() + + h.Level = StatusOK + h.Status = status + h.Error = nil +} + +// Stopped implements cell.Health. +func (h *SimpleHealth) Stopped(reason string) { + h.Lock() + defer h.Unlock() + + h.Level = StatusStopped + h.Status = reason + h.Error = nil +} + +func (h *SimpleHealth) Close() { + h.Lock() + defer h.Unlock() + + delete(h.all, h.Scope) +} + +func NewSimpleHealth() (Health, *SimpleHealth) { + h := &SimpleHealth{ + simpleHealthRoot: &simpleHealthRoot{ + all: make(map[string]*SimpleHealth), + }, + } + return h, h +} + +// SimpleHealthCmd for showing or checking the simple module health state. +// Not provided as hive.ScriptCmdOut due to cyclic import issues. To include +// provide with: hive.ScriptCmdOut("health", SimpleHealthCmd(simpleHealth))) +// +// Example: +// +// # show health +// health +// +// # grep health +// health 'my-module: level=OK' +func SimpleHealthCmd(h *SimpleHealth) script.Cmd { + return script.Command( + script.CmdUsage{ + Summary: "Show or grep simple health", + Args: "(pattern)", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + var re *regexp.Regexp + if len(args) == 1 { + re = regexp.MustCompile(args[0]) + } + for s.Context().Err() == nil { + h.Lock() + for name, h := range h.all { + var errStr string + if h.Error != nil { + errStr = h.Error.Error() + } + line := fmt.Sprintf("%s: level=%s message=%s error=%s", name, h.Level, h.Status, errStr) + if re != nil { + if re.Match([]byte(line)) { + h.Unlock() + s.Logf("matched: %s\n", line) + return nil, nil + } + } else { + fmt.Fprintln(s.LogWriter(), line) + } + } + h.Unlock() + if re == nil { + return nil, nil + } + time.Sleep(10 * time.Millisecond) + } + return nil, fmt.Errorf("no match for %s", re) + }, + ) +} + +var _ Health = &SimpleHealth{} + +var SimpleHealthCell = Provide(NewSimpleHealth) diff --git a/vendor/github.com/cilium/cilium/pkg/hive/command.go b/vendor/github.com/cilium/hive/command.go similarity index 80% rename from vendor/github.com/cilium/cilium/pkg/hive/command.go rename to vendor/github.com/cilium/hive/command.go index fbc74a2234..62ec26724c 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/command.go +++ b/vendor/github.com/cilium/hive/command.go @@ -4,10 +4,7 @@ package hive import ( - "github.com/sirupsen/logrus" "github.com/spf13/cobra" - - "github.com/cilium/cilium/pkg/logging" ) // Command constructs the cobra command for hive. The hive @@ -17,8 +14,6 @@ func (h *Hive) Command() *cobra.Command { Use: "hive", Short: "Inspect the hive", Run: func(cmd *cobra.Command, args []string) { - // Silence log messages from calling invokes and constructors. - logging.SetLogLevel(logrus.WarnLevel) h.PrintObjects() }, TraverseChildren: false, diff --git a/vendor/github.com/cilium/cilium/pkg/hive/doc.go b/vendor/github.com/cilium/hive/doc.go similarity index 100% rename from vendor/github.com/cilium/cilium/pkg/hive/doc.go rename to vendor/github.com/cilium/hive/doc.go diff --git a/vendor/github.com/cilium/cilium/pkg/hive/hive.go b/vendor/github.com/cilium/hive/hive.go similarity index 52% rename from vendor/github.com/cilium/cilium/pkg/hive/hive.go rename to vendor/github.com/cilium/hive/hive.go index df9e7426b2..7760ec8add 100644 --- a/vendor/github.com/cilium/cilium/pkg/hive/hive.go +++ b/vendor/github.com/cilium/hive/hive.go @@ -5,7 +5,9 @@ package hive import ( "context" + "errors" "fmt" + "log/slog" "os" "os/signal" "reflect" @@ -13,20 +15,71 @@ import ( "syscall" "time" - "github.com/sirupsen/logrus" "github.com/spf13/pflag" "github.com/spf13/viper" "go.uber.org/dig" - "go.uber.org/multierr" - "github.com/cilium/cilium/pkg/hive/cell" - "github.com/cilium/cilium/pkg/logging" - "github.com/cilium/cilium/pkg/logging/logfields" + "github.com/cilium/hive/cell" + "github.com/cilium/hive/script" ) -var ( - log = logging.DefaultLogger.WithField(logfields.LogSubsys, "hive") -) +type Options struct { + // EnvPrefix is the prefix to use for environment variables, e.g. + // with prefix "CILIUM" the flag "foo" can be set with environment + // variable "CILIUM_FOO". + EnvPrefix string + + // ModuleDecorator is an optional set of decorator functions to use for each + // module. This can be used to provide module-scoped overrides to objects. + // For example: + // + // opts.ModuleDecorators = cell.ModuleDecorators{ + // func(foo Foo, id cell.ModuleID) Foo { + // return foo.With("moduleID", id) + // }, + // } + // + // The above would give each cell within a module an augmented version of 'Foo'. + // The object that is being decorated (the return value) must already exist in + // the object graph. + ModuleDecorators cell.ModuleDecorators + + // ModulePrivateProvider is an optional set of private provide functions to + // use for each module. This can be used to provide module-scoped objects. + // For example: + // + // opts.ModulePrivateProviders = cell.ModulePrivateProviders{ + // func(id cell.ModuleID) Foo { + // return foo.New(id) + // }, + // } + // + ModulePrivateProviders cell.ModulePrivateProviders + + // DecodeHooks are optional additional decode hooks to use with cell.Config + // to decode a configuration flag into a config field. See existing hooks + // in [cell/config.go] for examples. + DecodeHooks cell.DecodeHooks + + StartTimeout time.Duration + StopTimeout time.Duration + + // LogThreshold is an optional threshold to reduce logging verbosity. + // When an Invoke or Lifecycle Start/Stop hook takes longer than this + // threshold, it will be logged at Info level. Otherwise it is logged + // at Debug level. + LogThreshold time.Duration +} + +func DefaultOptions() Options { + return Options{ + EnvPrefix: "", + ModuleDecorators: nil, + StartTimeout: defaultStartTimeout, + StopTimeout: defaultStopTimeout, + LogThreshold: defaultLogThreshold, + } +} const ( // defaultStartTimeout is the amount of time allotted for start hooks. After @@ -36,9 +89,7 @@ const ( // defaultStopTimeout is the amount of time allotted for stop hooks. defaultStopTimeout = time.Minute - // defaultEnvPrefix is the default prefix for environment variables, e.g. - // flag "foo" can be set with environment variable "CILIUM_FOO". - defaultEnvPrefix = "CILIUM_" + defaultLogThreshold = time.Duration(0) ) // Hive is a framework building modular applications. @@ -47,17 +98,16 @@ const ( // // See pkg/hive/example for a runnable example application. type Hive struct { - container *dig.Container - cells []cell.Cell - shutdown chan error - envPrefix string - startTimeout, stopTimeout time.Duration - flags *pflag.FlagSet - viper *viper.Viper - lifecycle *DefaultLifecycle - populated bool - invokes []func() error - configOverrides []any + opts Options + container *dig.Container + cells []cell.Cell + shutdown chan error + flags *pflag.FlagSet + viper *viper.Viper + lifecycle cell.Lifecycle + populated bool + invokes []func(*slog.Logger, time.Duration) error + configOverrides []any } // New returns a new hive that can be run, or inspected. @@ -70,53 +120,43 @@ type Hive struct { // flags. Likewise if configuration settings come from configuration files, then // the Viper() method can be used to populate the hive's viper instance. func New(cells ...cell.Cell) *Hive { + return NewWithOptions(DefaultOptions(), cells...) +} + +func NewWithOptions(opts Options, cells ...cell.Cell) *Hive { h := &Hive{ - container: dig.New(), - envPrefix: defaultEnvPrefix, - cells: cells, - viper: viper.New(), - startTimeout: defaultStartTimeout, - stopTimeout: defaultStopTimeout, - flags: pflag.NewFlagSet("", pflag.ContinueOnError), - lifecycle: &DefaultLifecycle{}, + opts: opts, + container: dig.New(dig.DeferAcyclicVerification()), + cells: cells, + viper: viper.New(), + flags: pflag.NewFlagSet("", pflag.ContinueOnError), + lifecycle: &cell.DefaultLifecycle{ + LogThreshold: opts.LogThreshold, + }, shutdown: make(chan error, 1), configOverrides: nil, } if err := h.provideDefaults(); err != nil { - log.WithError(err).Fatal("Failed to provide default objects") - } - - // Use a single health provider for all cells, which is used to create - // module scoped health reporters. - if err := h.container.Provide(func(lc Lifecycle) cell.Health { - hp := cell.NewHealthProvider() - lc.Append(Hook{ - OnStop: func(ctx HookContext) error { - return hp.Stop(ctx) - }, - }) - return hp - }); err != nil { - log.WithError(err).Fatal("Failed to provide health provider") + panic(fmt.Sprintf("Failed to provide defaults: %s", err)) } // Apply all cells to the container. This registers all constructors // and adds all config flags. Invokes are delayed until Start() is // called. for _, cell := range cells { - if err := cell.Apply(h.container); err != nil { - log.WithError(err).Fatal("Failed to apply cell") + if err := cell.Apply(h.container, h.container); err != nil { + panic(fmt.Sprintf("Failed to apply cell: %s", err)) } } // Bind the newly registered flags to viper. h.flags.VisitAll(func(f *pflag.Flag) { if err := h.viper.BindPFlag(f.Name, f); err != nil { - log.Fatalf("BindPFlag: %s", err) + panic(fmt.Sprintf("BindPFlag: %s", err)) } if err := h.viper.BindEnv(f.Name, h.getEnvName(f.Name)); err != nil { - log.Fatalf("BindEnv: %s", err) + panic(fmt.Sprintf("BindEnv: %s", err)) } }) @@ -132,7 +172,7 @@ func New(cells ...cell.Cell) *Hive { func (h *Hive) RegisterFlags(flags *pflag.FlagSet) { h.flags.VisitAll(func(f *pflag.Flag) { if flags.Lookup(f.Name) != nil { - log.Fatalf("Error registering flag: '%s' already registered", f.Name) + panic(fmt.Sprintf("Error registering flag: '%s' already registered", f.Name)) } flags.AddFlag(f) }) @@ -146,33 +186,32 @@ func (h *Hive) Viper() *viper.Viper { type defaults struct { dig.Out - Flags *pflag.FlagSet - Lifecycle Lifecycle - Logger logrus.FieldLogger - Shutdowner Shutdowner - InvokerList cell.InvokerList + Flags *pflag.FlagSet + Lifecycle cell.Lifecycle + Shutdowner Shutdowner + InvokerList cell.InvokerList + EmptyFullModuleID cell.FullModuleID + DecodeHooks cell.DecodeHooks + ModuleDecorators cell.ModuleDecorators + ModulePrivateProviders cell.ModulePrivateProviders } func (h *Hive) provideDefaults() error { + return h.container.Provide(func() defaults { return defaults{ - Flags: h.flags, - Lifecycle: h.lifecycle, - Logger: log, - Shutdowner: h, - InvokerList: h, + Flags: h.flags, + Lifecycle: h.lifecycle, + Shutdowner: h, + InvokerList: h, + EmptyFullModuleID: nil, + DecodeHooks: h.opts.DecodeHooks, + ModuleDecorators: h.opts.ModuleDecorators, + ModulePrivateProviders: h.opts.ModulePrivateProviders, } }) } -func (h *Hive) SetTimeouts(start, stop time.Duration) { - h.startTimeout, h.stopTimeout = start, stop -} - -func (h *Hive) SetEnvPrefix(prefix string) { - h.envPrefix = prefix -} - // AddConfigOverride appends a config override function to modify // a configuration after it has been parsed. // @@ -183,40 +222,36 @@ func AddConfigOverride[Cfg cell.Flagger](h *Hive, override func(*Cfg)) { // Run populates the cell configurations and runs the hive cells. // Interrupt signal or call to Shutdowner.Shutdown() will cause the hive to stop. -func (h *Hive) Run() error { - startCtx, cancel := context.WithTimeout(context.Background(), h.startTimeout) +func (h *Hive) Run(log *slog.Logger) error { + startCtx, cancel := context.WithTimeout(context.Background(), h.opts.StartTimeout) defer cancel() - var errors []error - - if err := h.Start(startCtx); err != nil { - errors = append(errors, fmt.Errorf("failed to start: %w", err)) + var errs error + if err := h.Start(log, startCtx); err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to start: %w", err)) } // If start was successful, wait for Shutdown() or interrupt. - if len(errors) == 0 { - shutdownErr := h.waitForSignalOrShutdown() - if shutdownErr != nil { - errors = append(errors, shutdownErr) - } + if errs == nil { + errs = errors.Join(errs, h.waitForSignalOrShutdown(log)) } - stopCtx, cancel := context.WithTimeout(context.Background(), h.stopTimeout) + stopCtx, cancel := context.WithTimeout(context.Background(), h.opts.StopTimeout) defer cancel() - if err := h.Stop(stopCtx); err != nil { - errors = append(errors, fmt.Errorf("failed to stop: %w", err)) + if err := h.Stop(log, stopCtx); err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to stop: %w", err)) } - return multierr.Combine(errors...) + return errs } -func (h *Hive) waitForSignalOrShutdown() error { +func (h *Hive) waitForSignalOrShutdown(log *slog.Logger) error { signals := make(chan os.Signal, 1) defer signal.Stop(signals) signal.Notify(signals, os.Interrupt, syscall.SIGTERM) select { case sig := <-signals: - log.WithField("signal", sig).Info("Signal received") + log.Info("Signal received", "signal", sig) return nil case err := <-h.shutdown: return err @@ -225,7 +260,7 @@ func (h *Hive) waitForSignalOrShutdown() error { // Populate instantiates the hive. Use for testing that the hive can // be instantiated. -func (h *Hive) Populate() error { +func (h *Hive) Populate(log *slog.Logger) error { if h.populated { return nil } @@ -239,6 +274,16 @@ func (h *Hive) Populate() error { if err != nil { return err } + // Provide the user-provide logging infrastructure. This happens here so + // that the hive can be created prior to having to lock down the logging + // configuration. + err = h.container.Provide( + func() (*slog.Logger, cell.RootLogger) { + return log, cell.RootLogger(log) + }) + if err != nil { + return err + } // Provide config overriders if any for _, o := range h.configOverrides { @@ -269,39 +314,45 @@ func (h *Hive) Populate() error { // Execute the invoke functions to construct the objects. for _, invoke := range h.invokes { - if err := invoke(); err != nil { + if err := invoke(log, h.opts.LogThreshold); err != nil { return err } } return nil } -func (h *Hive) AppendInvoke(invoke func() error) { +func (h *Hive) AppendInvoke(invoke func(*slog.Logger, time.Duration) error) { h.invokes = append(h.invokes, invoke) } // Start starts the hive. The context allows cancelling the start. // If context is cancelled and the start hooks do not respect the cancellation // then after 5 more seconds the process will be terminated forcefully. -func (h *Hive) Start(ctx context.Context) error { - if err := h.Populate(); err != nil { +func (h *Hive) Start(log *slog.Logger, ctx context.Context) error { + if err := h.Populate(log); err != nil { return err } defer close(h.fatalOnTimeout(ctx)) - log.Info("Starting") - - return h.lifecycle.Start(ctx) + log.Info("Starting hive") + start := time.Now() + err := h.lifecycle.Start(log, ctx) + if err == nil { + log.Info("Started", "duration", time.Since(start)) + } else { + log.Error("Start failed", "error", err, "duration", time.Since(start)) + } + return err } // Stop stops the hive. The context allows cancelling the stop. // If context is cancelled and the stop hooks do not respect the cancellation // then after 5 more seconds the process will be terminated forcefully. -func (h *Hive) Stop(ctx context.Context) error { +func (h *Hive) Stop(log *slog.Logger, ctx context.Context) error { defer close(h.fatalOnTimeout(ctx)) log.Info("Stopping") - return h.lifecycle.Stop(ctx) + return h.lifecycle.Stop(log, ctx) } func (h *Hive) fatalOnTimeout(ctx context.Context) chan struct{} { @@ -317,12 +368,10 @@ func (h *Hive) fatalOnTimeout(ctx context.Context) chan struct{} { // Context was cancelled. Give 5 more seconds and then // go fatal. - time.Sleep(5 * time.Second) - select { case <-terminated: - default: - log.Fatal("Start or stop failed to finish on time, aborting forcefully.") + case <-time.After(5 * time.Second): + panic("Start or stop failed to finish on time, aborting forcefully.") } }() return terminated @@ -344,8 +393,8 @@ func (h *Hive) Shutdown(opts ...ShutdownOption) { } func (h *Hive) PrintObjects() { - if err := h.Populate(); err != nil { - log.WithError(err).Fatal("Failed to populate object graph") + if err := h.Populate(slog.Default()); err != nil { + panic(fmt.Sprintf("Failed to populate object graph: %s", err)) } fmt.Printf("Cells:\n\n") @@ -358,12 +407,12 @@ func (h *Hive) PrintObjects() { } func (h *Hive) PrintDotGraph() { - if err := h.Populate(); err != nil { - log.WithError(err).Fatal("Failed to populate object graph") + if err := h.Populate(slog.Default()); err != nil { + panic(fmt.Sprintf("Failed to populate object graph: %s", err)) } if err := dig.Visualize(h.container, os.Stdout); err != nil { - log.WithError(err).Fatal("Failed to Visualize()") + panic(fmt.Sprintf("Failed to dig.Visualize(): %s", err)) } } @@ -371,5 +420,21 @@ func (h *Hive) PrintDotGraph() { func (h *Hive) getEnvName(option string) string { under := strings.Replace(option, "-", "_", -1) upper := strings.ToUpper(under) - return h.envPrefix + upper + return h.opts.EnvPrefix + upper +} + +func (h *Hive) ScriptCommands(log *slog.Logger) (map[string]script.Cmd, error) { + if err := h.Populate(log); err != nil { + return nil, fmt.Errorf("failed to populate object graph: %s", err) + } + m := map[string]script.Cmd{} + m["hive"] = hiveScriptCmd(h, log) + + // Gather the commands from the hive. + h.container.Invoke(func(sc ScriptCmds) { + for name, cmd := range sc.Map() { + m[name] = cmd + } + }) + return m, nil } diff --git a/vendor/github.com/cilium/hive/internal/map_string.go b/vendor/github.com/cilium/hive/internal/map_string.go new file mode 100644 index 0000000000..b2fefa11ec --- /dev/null +++ b/vendor/github.com/cilium/hive/internal/map_string.go @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package internal + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strings" + "unicode" + + "github.com/spf13/cast" + "github.com/spf13/viper" +) + +const ( + comma = ',' + equal = '=' +) + +var keyValueRegex = regexp.MustCompile(`([\w-:;./@]+=([\w-:;,./@][\w-:;,./@ ]*[\w-:;,./@])?[\w-:;,./@]*,)*([\w-:;./@]+=([\w-:;,./@][\w-:;,./@ ]*)?[\w-:;./@]+)$`) + +// GetStringMapString contains one enhancement to support k1=v2,k2=v2 compared to original +// implementation of GetStringMapString function +// Related upstream issue https://github.com/spf13/viper/issues/911 +func GetStringMapString(vp *viper.Viper, key string) map[string]string { + v, _ := GetStringMapStringE(vp, key) + return v +} + +// GetStringMapStringE is same as GetStringMapString, but with error +func GetStringMapStringE(vp *viper.Viper, key string) (map[string]string, error) { + return ToStringMapStringE(vp.Get(key)) +} + +// ToStringMapStringE casts an interface to a map[string]string type. The underlying +// interface type might be a map or string. In the latter case, it is attempted to be +// json decoded, falling back to the k1=v2,k2=v2 format in case it doesn't look like json. +func ToStringMapStringE(data interface{}) (map[string]string, error) { + if data == nil { + return map[string]string{}, nil + } + + v, err := cast.ToStringMapStringE(data) + if err != nil { + var syntaxErr *json.SyntaxError + if !errors.As(err, &syntaxErr) { + return v, err + } + + switch s := data.(type) { + case string: + if len(s) == 0 { + return map[string]string{}, nil + } + + // if the input is starting with either '{' or '[', just preserve original json parsing error. + firstIndex := strings.IndexFunc(s, func(r rune) bool { + return !unicode.IsSpace(r) + }) + if firstIndex != -1 && (s[firstIndex] == '{' || s[firstIndex] == '[') { + return v, err + } + + if !isValidKeyValuePair(s) { + return map[string]string{}, fmt.Errorf("'%s' is not formatted as key=value,key1=value1", s) + } + + var v = map[string]string{} + kvs := splitKeyValue(s, comma, equal) + for _, kv := range kvs { + temp := strings.Split(kv, string(equal)) + if len(temp) != 2 { + return map[string]string{}, fmt.Errorf("'%s' in '%s' is not formatted as key=value,key1=value1", kv, s) + } + v[temp[0]] = temp[1] + } + return v, nil + } + } + return v, nil +} + +// isValidKeyValuePair returns true if the input is following key1=value1,key2=value2,...,keyN=valueN format. +func isValidKeyValuePair(str string) bool { + if len(str) == 0 { + return true + } + return len(keyValueRegex.ReplaceAllString(str, "")) == 0 +} + +// splitKeyValue is similar to strings.Split, but looks ahead to make sure +// that sep character is allowed in value component of key-value pair. +// +// Example: with the input "c6a.2xlarge=4,15,15,m4.xlarge=2,4,8", +// - strings.Split function will return []string{"c6a.2xlarge=4", "15", "15", "m4.xlarge=2", "4", "8"}. +// - splitKeyValue function will return []string{"c6a.2xlarge=4,15,15", "m4.xlarge=2,4,8"} instead. +func splitKeyValue(str string, sep rune, keyValueSep rune) []string { + var sepIndexes, kvValueSepIndexes []int + // find all indexes of separator character + for i := 0; i < len(str); i++ { + switch int32(str[i]) { + case sep: + sepIndexes = append(sepIndexes, i) + case keyValueSep: + kvValueSepIndexes = append(kvValueSepIndexes, i) + } + } + + // there's only a single key-value if there are no separators ("key=value") + // or a single key-value separator ("key=option1:value1,option2:value2") + if len(sepIndexes) == 0 || len(kvValueSepIndexes) == 1 { + return []string{str} + } + + if len(sepIndexes) == 1 { + index := sepIndexes[0] + return []string{str[:index], str[index+1:]} + } + + var res []string + var start = 0 + for i := 0; i < len(sepIndexes); i++ { + last := len(str) + if i < len(sepIndexes)-1 { + last = sepIndexes[i+1] + } + if strings.ContainsRune(str[sepIndexes[i]:last], keyValueSep) { + res = append(res, str[start:sepIndexes[i]]) + start = sepIndexes[i] + 1 + } + } + // append the remaining for last sep index + res = append(res, str[start:]) + return res +} diff --git a/vendor/github.com/cilium/hive/internal/reflect.go b/vendor/github.com/cilium/hive/internal/reflect.go new file mode 100644 index 0000000000..867810da59 --- /dev/null +++ b/vendor/github.com/cilium/hive/internal/reflect.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package internal + +import ( + "fmt" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" +) + +func PrettyType(x any) string { + return fmt.Sprintf("%T", x) +} + +func FuncNameAndLocation(fn any) string { + f := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()) + file, line := f.FileLine(f.Entry()) + name := f.Name() + name = strings.TrimSuffix(name, "-fm") + // Truncate the path in name as it'll be repeated in file path + if idx := strings.LastIndex(name, "/"); idx > 0 { + name = name[idx+1:] + } + if file != "" { + return fmt.Sprintf("%s (%s:%d)", name, usefulPathSegment(file), line) + } + return name +} + +// Purely a heuristic. +var commonRoots = map[string]struct{}{ + "pkg": {}, + "cmd": {}, +} + +func usefulPathSegment(file string) string { + p := filepath.Clean(file) + segs := strings.Split(p, string(os.PathSeparator)) + + for i := len(segs) - 1; i > 0; i-- { + if _, ok := commonRoots[segs[i]]; ok { + segs = segs[i:] + break + } + } + + // Truncate to at most 3 segments + if len(segs) > 3 { + segs = segs[len(segs)-4:] + segs[0] = "..." + } + + return filepath.Join(segs...) +} diff --git a/vendor/github.com/cilium/hive/job/job.go b/vendor/github.com/cilium/hive/job/job.go new file mode 100644 index 0000000000..1bcd751ce7 --- /dev/null +++ b/vendor/github.com/cilium/hive/job/job.go @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package job + +import ( + "context" + "crypto/sha256" + "fmt" + "log/slog" + "runtime/pprof" + "sync" + + "github.com/cilium/hive" + "github.com/cilium/hive/cell" +) + +// Cell provides job.Registry which constructs job.Group's. Job groups automate a lot of the logic involved with +// lifecycle management of goroutines within a Hive Cell. Providing a context that is canceled on shutdown and making +// sure multiple goroutines properly shutdown takes a lot of boilerplate. Job groups make it easy to queue, spawn, and +// collect jobs with minimal boilerplate. The registry maintains references to all groups which will allow us to add +// automatic metrics collection and/or status reporting in the future. +var Cell = cell.Module( + "jobs", + "Managed background goroutines and timers", + cell.Provide( + newRegistry, + ), +) + +// A Registry creates Groups, it maintains references to these groups for the purposes of collecting information +// centralized like metrics. +type Registry interface { + // NewGroup creates a new group of jobs which can be started and stopped together as part of the cells lifecycle. + // The provided scope is used to report health status of the jobs. A `cell.Scope` can be obtained via injection + // an object with the correct scope is provided by the closest `cell.Module`. + NewGroup(health cell.Health, opts ...groupOpt) Group +} + +type registry struct { + logger *slog.Logger + shutdowner hive.Shutdowner + + mu sync.Mutex + groups []Group +} + +func newRegistry( + logger *slog.Logger, + shutdowner hive.Shutdowner, +) Registry { + return ®istry{ + logger: logger, + shutdowner: shutdowner, + } +} + +// NewGroup creates a new Group with the given `opts` options, which allows you to customize the behavior for the +// group as a whole. For example by allowing you to add pprof labels to the group or by customizing the logger. +func (c *registry) NewGroup(health cell.Health, opts ...groupOpt) Group { + c.mu.Lock() + defer c.mu.Unlock() + + var options options + options.logger = c.logger + options.shutdowner = c.shutdowner + + for _, opt := range opts { + opt(&options) + } + + g := &group{ + options: options, + wg: &sync.WaitGroup{}, + health: health, + } + + c.groups = append(c.groups, g) + + return g +} + +// Group aims to streamline the management of work within a cell. Group implements cell.HookInterface and takes care +// of proper start and stop behavior as expected by hive. A group allows you to add multiple types of jobs which +// different kinds of logic. No matter the job type, the function provided to is always called with a context which +// is bound to the lifecycle of the cell. +type Group interface { + // Add append the job. If the group has not yet been started the job is queued, otherwise it is started + // immediately. + Add(...Job) + + // Scoped creates a scoped group, jobs added to this scoped group will appear as a sub-scope in the health reporter + Scoped(name string) ScopedGroup + + // HookInterface is implemented to Start and Stop the jobs in the group. + cell.HookInterface +} + +// Job in an interface that describes a unit of work which can be added to a Group. This interface contains unexported +// methods and thus can only be implemented by functions in this package such as OneShot, Timer, or Observer. +type Job interface { + start(ctx context.Context, wg *sync.WaitGroup, health cell.Health, options options) +} + +type group struct { + options options + + wg *sync.WaitGroup + + mu sync.Mutex + ctx context.Context + cancel context.CancelFunc + queuedJobs []Job + + health cell.Health +} + +type options struct { + pprofLabels pprof.LabelSet + logger *slog.Logger + shutdowner hive.Shutdowner + metrics Metrics +} + +type groupOpt func(o *options) + +// WithLogger replaces the default logger with the given logger, useful if you want to add certain fields to the logs +// created by the group/jobs. +func WithLogger(logger *slog.Logger) groupOpt { + return func(o *options) { + o.logger = logger + } +} + +// WithPprofLabels adds pprof labels which will be added to the goroutines spawned for the jobs and thus included in +// the pprof profiles. +func WithPprofLabels(pprofLabels pprof.LabelSet) groupOpt { + return func(o *options) { + o.pprofLabels = pprofLabels + } +} + +func WithMetrics(metrics Metrics) groupOpt { + return func(o *options) { + o.metrics = metrics + } +} + +var _ cell.HookInterface = (*group)(nil) + +// Start implements the cell.HookInterface interface +func (jg *group) Start(_ cell.HookContext) error { + jg.mu.Lock() + defer jg.mu.Unlock() + + jg.ctx, jg.cancel = context.WithCancel(context.Background()) + + jg.wg.Add(len(jg.queuedJobs)) + for _, job := range jg.queuedJobs { + pprof.Do(jg.ctx, jg.options.pprofLabels, func(ctx context.Context) { + go job.start(ctx, jg.wg, jg.health, jg.options) + }) + } + // Nil the queue once we start so it can be GC'ed + jg.queuedJobs = nil + + return nil +} + +// Stop implements the cell.HookInterface interface +func (jg *group) Stop(stopCtx cell.HookContext) error { + jg.mu.Lock() + defer jg.mu.Unlock() + + done := make(chan struct{}) + go func() { + jg.wg.Wait() + close(done) + }() + + jg.cancel() + + select { + case <-stopCtx.Done(): + jg.options.logger.Error("Stop hook context expired before job group was done") + case <-done: + } + + return nil +} + +func (jg *group) Add(jobs ...Job) { + jg.add(jg.health, jobs...) +} + +func (jg *group) add(health cell.Health, jobs ...Job) { + jg.mu.Lock() + defer jg.mu.Unlock() + + // The context is only set once the group has been started. If we have not yet started, queue the jobs. + if jg.ctx == nil { + jg.queuedJobs = append(jg.queuedJobs, jobs...) + return + } + + for _, j := range jobs { + jg.wg.Add(1) + pprof.Do(jg.ctx, jg.options.pprofLabels, func(ctx context.Context) { + go j.start(ctx, jg.wg, health, jg.options) + }) + } +} + +// Scoped creates a scoped group, jobs added to this scoped group will appear as a sub-scope in the health reporter +func (jg *group) Scoped(name string) ScopedGroup { + return &scopedGroup{ + group: jg, + health: jg.health.NewScope(name), + } +} + +type ScopedGroup interface { + Add(jobs ...Job) +} + +type scopedGroup struct { + group *group + health cell.Health +} + +func (sg *scopedGroup) Add(jobs ...Job) { + sg.group.add(sg.health, jobs...) +} + +const maxNameLength = 100 + +func sanitizeName(name string) string { + mangled := false + newLength := min(maxNameLength, len(name)) + runes := make([]rune, 0, newLength) + for _, r := range name[:newLength] { + switch { + case r >= 'a' && r <= 'z': + fallthrough + case r >= 'A' && r <= 'Z': + fallthrough + case r >= '0' && r <= '9': + fallthrough + case r == '-' || r == '_': + runes = append(runes, r) + default: + // Skip invalid characters. + mangled = true + } + } + if mangled || len(name) > maxNameLength { + // Name was mangled or is too long, truncate and append hash. + const hashLen = 10 + hash := fmt.Sprintf("%x", sha256.Sum256([]byte(name))) + newLen := min(maxNameLength-hashLen, len(runes)) + runes = runes[:newLen] + return string(runes) + "-" + hash[:hashLen] + } + return string(runes) +} diff --git a/vendor/github.com/cilium/hive/job/metrics.go b/vendor/github.com/cilium/hive/job/metrics.go new file mode 100644 index 0000000000..103de72947 --- /dev/null +++ b/vendor/github.com/cilium/hive/job/metrics.go @@ -0,0 +1,35 @@ +package job + +import "time" + +type Metrics interface { + JobError(name string, err error) + OneShotRunDuration(name string, duration time.Duration) + TimerRunDuration(name string, duration time.Duration) + TimerTriggerStats(name string, latency time.Duration, folds int) + ObserverRunDuration(name string, duration time.Duration) +} + +type NopMetrics struct{} + +// JobError implements Metrics. +func (NopMetrics) JobError(name string, err error) { +} + +// ObserverRunDuration implements Metrics. +func (NopMetrics) ObserverRunDuration(name string, duration time.Duration) { +} + +// OneShotRunDuration implements Metrics. +func (NopMetrics) OneShotRunDuration(name string, duration time.Duration) { +} + +// TimerRunDuration implements Metrics. +func (NopMetrics) TimerRunDuration(name string, duration time.Duration) { +} + +// TimerTriggerStats implements Metrics. +func (NopMetrics) TimerTriggerStats(name string, latency time.Duration, folds int) { +} + +var _ Metrics = NopMetrics{} diff --git a/vendor/github.com/cilium/hive/job/observer.go b/vendor/github.com/cilium/hive/job/observer.go new file mode 100644 index 0000000000..aa29f57048 --- /dev/null +++ b/vendor/github.com/cilium/hive/job/observer.go @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package job + +import ( + "context" + "errors" + "strconv" + "sync" + "time" + + "github.com/cilium/stream" + + "github.com/cilium/hive" + "github.com/cilium/hive/cell" + "github.com/cilium/hive/internal" +) + +// Observer jobs invoke the given `fn` for each item observed on `observable`. +// The Observer name must match regex "^[a-zA-Z][a-zA-Z0-9_\-]{0,100}$". If the `observable` completes, the job stops. +// The context given to the observable is also canceled once the group stops. +func Observer[T any](name string, fn ObserverFunc[T], observable stream.Observable[T], opts ...observerOpt[T]) Job { + name = sanitizeName(name) + if fn == nil { + panic("`fn` must not be nil") + } + + job := &jobObserver[T]{ + name: name, + fn: fn, + observable: observable, + opts: opts, + } + + return job +} + +// ObserverFunc is the func type invoked by observer jobs. +// A ObserverFunc is expected to return as soon as ctx is canceled. +type ObserverFunc[T any] func(ctx context.Context, event T) error + +type observerOpt[T any] func(*jobObserver[T]) + +type jobObserver[T any] struct { + name string + fn ObserverFunc[T] + opts []observerOpt[T] + + health cell.Health + + observable stream.Observable[T] + + // If not nil, call the shutdowner on error + shutdown hive.Shutdowner +} + +func (jo *jobObserver[T]) start(ctx context.Context, wg *sync.WaitGroup, health cell.Health, options options) { + defer wg.Done() + + for _, opt := range jo.opts { + opt(jo) + } + + jo.health = health.NewScope("observer-job-" + jo.name) + reportTicker := time.NewTicker(10 * time.Second) + defer reportTicker.Stop() + + l := options.logger.With( + "name", jo.name, + "func", internal.FuncNameAndLocation(jo.fn)) + + l.Debug("Observer job started") + jo.health.OK("Primed") + var msgCount uint64 + + done := make(chan struct{}) + + var err error + jo.observable.Observe(ctx, func(t T) { + start := time.Now() + err := jo.fn(ctx, t) + duration := time.Since(start) + + if options.metrics != nil { + options.metrics.ObserverRunDuration(jo.name, duration) + } + + if err != nil { + if errors.Is(err, context.Canceled) { + return + } + + jo.health.Degraded("observer job errored", err) + l.Error("Observer job errored", "error", err) + + if options.metrics != nil { + options.metrics.JobError(jo.name, err) + } + if jo.shutdown != nil { + jo.shutdown.Shutdown(hive.ShutdownWithError( + err, + )) + } + return + } + + msgCount++ + + // Don't report health for every event, only when we have not done so for a bit + select { + case <-reportTicker.C: + jo.health.OK("OK (" + duration.String() + ") [" + strconv.FormatUint(msgCount, 10) + "]") + default: + } + }, func(e error) { + err = e + close(done) + }) + + <-done + + jo.health.Stopped("observer job done") + if err != nil && !errors.Is(err, context.Canceled) { + l.Error("Observer job stopped with an error", "error", err) + } else { + l.Debug("Observer job stopped") + } +} diff --git a/vendor/github.com/cilium/hive/job/oneshot.go b/vendor/github.com/cilium/hive/job/oneshot.go new file mode 100644 index 0000000000..959bbe85bd --- /dev/null +++ b/vendor/github.com/cilium/hive/job/oneshot.go @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package job + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/cilium/hive" + "github.com/cilium/hive/cell" + "github.com/cilium/hive/internal" +) + +// OneShot creates a "one shot" job which can be added to a Group. +// The OneShot job name must match regex "^[a-zA-Z][a-zA-Z0-9_\-]{0,100}$". The function passed is invoked once at startup. +// It can live for the entire lifetime of the group or exit early depending on its task. +// If it returns an error, it can optionally be retried if the WithRetry option. If retries are not configured or +// all retries failed as well, a shutdown of the hive can be triggered by specifying the WithShutdown option. +// +// The given function is expected to exit as soon as the context given to it expires, this is especially important for +// blocking or long running jobs. +func OneShot(name string, fn OneShotFunc, opts ...jobOneShotOpt) Job { + name = sanitizeName(name) + if fn == nil { + panic("`fn` must not be nil") + } + + job := &jobOneShot{ + name: name, + fn: fn, + opts: opts, + } + + return job +} + +type jobOneShotOpt func(*jobOneShot) + +type RetryBackoff interface { + Wait() time.Duration +} + +type ConstantBackoff time.Duration + +func (d ConstantBackoff) Wait() time.Duration { + return time.Duration(d) +} + +type ExponentialBackoff struct { + Min time.Duration + Max time.Duration + current time.Duration +} + +func (e *ExponentialBackoff) Wait() time.Duration { + if e.current == 0 { + e.current = e.Min + } else { + e.current *= 2 + if e.current > e.Max { + e.current = e.Max + } + } + return e.current +} + +// WithRetry option configures a one shot job to retry `times` amount of times. On each retry attempt the +// rate limiter is waited upon before making another attempt. +// If `times` is <0, then the job is retried forever. +func WithRetry(times int, backoff RetryBackoff) jobOneShotOpt { + return func(jos *jobOneShot) { + jos.retry = times + jos.backoff = backoff + } +} + +// WithShutdown option configures a one shot job to shutdown the whole hive if the job returns an error. If the +// WithRetry option is also configured, all retries must be exhausted before we trigger the shutdown. +func WithShutdown() jobOneShotOpt { + return func(jos *jobOneShot) { + jos.shutdownOnError = true + } +} + +// OneShotFunc is the function type which is invoked by a one shot job. The given function is expected to exit as soon +// as the context given to it expires, this is especially important for blocking or long running jobs. +type OneShotFunc func(ctx context.Context, health cell.Health) error + +type jobOneShot struct { + name string + fn OneShotFunc + opts []jobOneShotOpt + + health cell.Health + + // If retry > 0, retry on error x times. + retry int + backoff RetryBackoff + shutdownOnError bool +} + +func (jos *jobOneShot) start(ctx context.Context, wg *sync.WaitGroup, health cell.Health, options options) { + defer wg.Done() + + for _, opt := range jos.opts { + opt(jos) + } + + jos.health = health.NewScope("job-" + jos.name) + defer jos.health.Stopped("one-shot job done") + + l := options.logger.With( + "name", jos.name, + "func", internal.FuncNameAndLocation(jos.fn)) + + var err error + for i := 0; jos.retry < 0 || i <= jos.retry; i++ { + if i != 0 { + timeout := jos.backoff.Wait() + options.logger.Debug("Delaying retry attempt", + "backoff", timeout, + "retry-count", i, + ) + select { + case <-ctx.Done(): + return + case <-time.After(timeout): + } + } + + l.Debug("Starting one-shot job") + + jos.health.OK("Running") + start := time.Now() + err = jos.fn(ctx, jos.health) + + if options.metrics != nil { + duration := time.Since(start) + options.metrics.OneShotRunDuration(jos.name, duration) + } + + if err == nil { + return + } else if !errors.Is(err, context.Canceled) { + jos.health.Degraded("one-shot job errored", err) + l.Error("one-shot job errored", "error", err) + if options.metrics != nil { + options.metrics.JobError(jos.name, err) + } + } + } + + if options.shutdowner != nil && jos.shutdownOnError { + options.shutdowner.Shutdown(hive.ShutdownWithError(err)) + } +} diff --git a/vendor/github.com/cilium/hive/job/timer.go b/vendor/github.com/cilium/hive/job/timer.go new file mode 100644 index 0000000000..4fe37f0705 --- /dev/null +++ b/vendor/github.com/cilium/hive/job/timer.go @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package job + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/cilium/hive" + "github.com/cilium/hive/cell" + "github.com/cilium/hive/internal" +) + +// Timer creates a timer job which can be added to a Group. +// The Timer job name must match regex "^[a-zA-Z][a-zA-Z0-9_\-]{0,100}$". The function passed is invoked at the specified interval. +// Timer jobs are particularly useful to implement periodic syncs and cleanup actions. +// Timer jobs can optionally be triggered by an external Trigger with the WithTrigger option. +// This trigger can for example be passed between cells or between jobs in the same cell to allow for an additional +// invocation of the function. +// +// The interval between invocations is counted from the start of the last invocation. If the `fn` takes longer than the +// interval, its next invocation is not delayed. The `fn` is expected to stop as soon as the context passed to it +// expires. This is especially important for long running functions. The signal created by a Trigger is coalesced so +// multiple calls to trigger before the invocation takes place can result in just a single invocation. +func Timer(name string, fn TimerFunc, interval time.Duration, opts ...timerOpt) Job { + name = sanitizeName(name) + if fn == nil { + panic("`fn` must not be nil") + } + + job := &jobTimer{ + name: name, + fn: fn, + interval: interval, + opts: opts, + } + + return job +} + +// TimerFunc is the func type invoked by a timer job. A TimerFunc is expected to return as soon as the ctx expires. +type TimerFunc func(ctx context.Context) error + +type timerOpt func(*jobTimer) + +// Trigger which can be used to trigger a timer job, trigger events are coalesced. +type Trigger interface { + _trigger() + Trigger() +} + +// NewTrigger creates a new trigger, which can be used to trigger a timer job. +func NewTrigger(opts ...triggerOpt) *trigger { + t := &trigger{ + c: make(chan struct{}, 1), + } + for _, opt := range opts { + opt(t) + } + return t +} + +// WithDebounce allows to specify an interval over with multiple trigger requests will be folded into one. +func WithDebounce(interval time.Duration) triggerOpt { + return func(t *trigger) { + t.debounce = interval + } +} + +type trigger struct { + debounce time.Duration + + mu sync.Mutex + c chan struct{} + lastTriggered time.Time + folds int + waitStart time.Time +} + +func (t *trigger) _trigger() {} + +func (t *trigger) Trigger() { + t.mu.Lock() + defer t.mu.Unlock() + + if t.folds == 0 { + t.waitStart = time.Now() + } + t.folds++ + + if t.debounce > 0 && time.Since(t.lastTriggered) < t.debounce { + return + } + + select { + case t.c <- struct{}{}: + default: + } +} + +func (t *trigger) markTriggered(name string, metrics Metrics) { + t.mu.Lock() + defer t.mu.Unlock() + + t.lastTriggered = time.Now() + if metrics != nil { + metrics.TimerTriggerStats(name, t.lastTriggered.Sub(t.waitStart), t.folds) + } + t.folds = 0 + + // discard a possibly enqueued trigger notification. + // This is needed when a notification is already enqueued in the channel (and thus has already passed the debounce check) + // but the fair scheduling receives from the ticker channel. + select { + case <-t.c: + default: + } +} + +type triggerOpt func(t *trigger) + +// WithTrigger option allows a user to specify a trigger, which if triggered will invoke the function of a timer +// before the configured interval has expired. +func WithTrigger(trig Trigger) timerOpt { + return func(jt *jobTimer) { + jt.trigger = trig.(*trigger) + } +} + +type jobTimer struct { + name string + fn TimerFunc + opts []timerOpt + + health cell.Health + + interval time.Duration + trigger *trigger + + // If not nil, call the shutdowner on error + shutdown hive.Shutdowner +} + +func (jt *jobTimer) start(ctx context.Context, wg *sync.WaitGroup, health cell.Health, options options) { + defer wg.Done() + + for _, opt := range jt.opts { + opt(jt) + } + + jt.health = health.NewScope("timer-job-" + jt.name) + + l := options.logger.With( + "name", jt.name, + "func", internal.FuncNameAndLocation(jt.fn)) + + var tickerChan <-chan time.Time + if jt.interval > 0 { + ticker := time.NewTicker(jt.interval) + defer ticker.Stop() + tickerChan = ticker.C + } + + var triggerChan chan struct{} + if jt.trigger != nil { + triggerChan = jt.trigger.c + } + + l.Debug("Starting timer job") + jt.health.OK("Primed") + + for { + select { + case <-ctx.Done(): + jt.health.Stopped("timer job context done") + return + case <-tickerChan: + case <-triggerChan: + } + + l.Debug("Timer job triggered") + + if jt.trigger != nil { + jt.trigger.markTriggered(jt.name, options.metrics) + } + + start := time.Now() + err := jt.fn(ctx) + duration := time.Since(start) + + if options.metrics != nil { + options.metrics.TimerRunDuration(jt.name, duration) + } + + if err == nil { + jt.health.OK("OK (" + duration.String() + ")") + l.Debug("Timer job finished") + } else if !errors.Is(err, context.Canceled) { + jt.health.Degraded("timer job errored", err) + l.Error("Timer job errored", "error", err) + + if options.metrics != nil { + options.metrics.JobError(jt.name, err) + } + if jt.shutdown != nil { + jt.shutdown.Shutdown(hive.ShutdownWithError(err)) + } + } + + // If we exited due to the ctx closing we do not guaranteed return. + // The select can pick the timer or trigger signals over ctx.Done due to fair scheduling, so this guarantees it. + if ctx.Err() != nil { + return + } + } +} diff --git a/vendor/github.com/cilium/hive/script.go b/vendor/github.com/cilium/hive/script.go new file mode 100644 index 0000000000..bf3691a271 --- /dev/null +++ b/vendor/github.com/cilium/hive/script.go @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package hive + +import ( + "context" + "errors" + "fmt" + "io" + "log/slog" + "os" + "os/signal" + "time" + + "github.com/cilium/hive/cell" + "github.com/cilium/hive/script" + "golang.org/x/term" +) + +func NewScriptCmd(name string, cmd script.Cmd) ScriptCmdOut { + return ScriptCmdOut{ScriptCmd: ScriptCmd{name, cmd}} +} + +func NewScriptCmds(cmds map[string]script.Cmd) (out ScriptCmdsOut) { + out.ScriptCmds = make([]ScriptCmd, 0, len(cmds)) + for name, cmd := range cmds { + out.ScriptCmds = append(out.ScriptCmds, ScriptCmd{name, cmd}) + } + return out +} + +type ScriptCmd struct { + Name string + Cmd script.Cmd +} + +type ScriptCmds struct { + cell.In + + ScriptCmds []ScriptCmd `group:"script-commands"` +} + +func (sc ScriptCmds) Map() map[string]script.Cmd { + m := make(map[string]script.Cmd, len(sc.ScriptCmds)) + for _, c := range sc.ScriptCmds { + if c.Name != "" { + m[c.Name] = c.Cmd + } + } + return m +} + +type ScriptCmdOut struct { + cell.Out + + ScriptCmd ScriptCmd `group:"script-commands"` +} + +type ScriptCmdsOut struct { + cell.Out + + ScriptCmds []ScriptCmd `group:"script-commands,flatten"` +} + +func hiveScriptCmd(h *Hive, log *slog.Logger) script.Cmd { + const defaultTimeout = time.Minute + return script.Command( + script.CmdUsage{ + Summary: "manipulate the hive", + Args: "cmd args...", + }, + func(s *script.State, args ...string) (script.WaitFunc, error) { + if len(args) < 1 { + return nil, fmt.Errorf("hive cmd args...\n'cmd' is one of: start, stop, jobs") + } + switch args[0] { + case "start": + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + return nil, h.Start(log, ctx) + case "stop": + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + return nil, h.Stop(log, ctx) + } + return nil, fmt.Errorf("unknown hive command %q, expected one of: start, stop, jobs", args[0]) + }, + ) +} + +func RunRepl(h *Hive, in *os.File, out *os.File, prompt string) { + // Try to set the input into raw mode. + restore, err := script.MakeRaw(int(in.Fd())) + defer restore() + + inout := struct { + io.Reader + io.Writer + }{in, out} + term := term.NewTerminal(inout, prompt) + log := slog.New(slog.NewTextHandler(term, nil)) + + cmds, err := h.ScriptCommands(log) + if err != nil { + log.Error("ScriptCommands()", "error", err) + return + } + for name, cmd := range script.DefaultCmds() { + cmds[name] = cmd + } + + e := script.Engine{ + Cmds: cmds, + Conds: nil, + } + + stop := make(chan struct{}) + defer close(stop) + + sigs := make(chan os.Signal, 1) + defer signal.Stop(sigs) + signal.Notify(sigs, os.Interrupt) + + newState := func() *script.State { + ctx, cancel := context.WithCancel(context.Background()) + s, err := script.NewState(ctx, "/tmp", nil) + if err != nil { + panic(err) + } + go func() { + select { + case <-stop: + cancel() + case <-sigs: + cancel() + } + }() + return s + } + + s := newState() + + for { + line, err := term.ReadLine() + if err != nil { + if errors.Is(err, io.EOF) { + return + } else { + panic(err) + } + } + + err = e.ExecuteLine(s, line, term) + if err != nil { + fmt.Fprintln(term, err.Error()) + } + + if s.Context().Err() != nil { + // Context was cancelled due to interrupt. Re-create the state + // to run more commands. + s = newState() + fmt.Fprintln(term, "^C (interrupted)") + } + } +} diff --git a/vendor/github.com/cilium/hive/script/LICENSE b/vendor/github.com/cilium/hive/script/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/github.com/cilium/hive/script/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cilium/hive/script/README.md b/vendor/github.com/cilium/hive/script/README.md new file mode 100644 index 0000000000..6b6da3a3fa --- /dev/null +++ b/vendor/github.com/cilium/hive/script/README.md @@ -0,0 +1,4 @@ +This is a fork of rsc.io/script (v0.0.2). It mostly adds support for interactive use to it. + +The makeraw* files are adapted from term_unix.go etc. files from x/term to enable interrupts. + diff --git a/vendor/github.com/cilium/hive/script/README.md.original b/vendor/github.com/cilium/hive/script/README.md.original new file mode 100644 index 0000000000..0716f680e3 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/README.md.original @@ -0,0 +1,11 @@ +This is a copy of cmd/go/internal/script. + +See and . +Posting it here makes it available for others to try +without us committing to officially supporting it. +We have been using it in the go command for many years now; +the code is quite stable. +Ironically, it has very few tests. + + is a port +of an earlier version of the go command script language. diff --git a/vendor/github.com/cilium/hive/script/cmds.go b/vendor/github.com/cilium/hive/script/cmds.go new file mode 100644 index 0000000000..4182d1b8a5 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/cmds.go @@ -0,0 +1,1218 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "errors" + "fmt" + "io/fs" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/cilium/hive/script/internal/diff" + "github.com/spf13/pflag" + "golang.org/x/term" +) + +// DefaultCmds returns a set of broadly useful script commands. +// +// Run the 'help' command within a script engine to view a list of the available +// commands. +func DefaultCmds() map[string]Cmd { + return map[string]Cmd{ + "cat": Cat(), + "cd": Cd(), + "chmod": Chmod(), + "cmp": Cmp(), + "cmpenv": Cmpenv(), + "cp": Cp(), + "echo": Echo(), + "env": Env(), + "exec": Exec(func(cmd *exec.Cmd) error { return cmd.Process.Signal(os.Interrupt) }, 100*time.Millisecond), // arbitrary grace period + "exists": Exists(), + "grep": Grep(), + "help": Help(), + "mkdir": Mkdir(), + "mv": Mv(), + "rm": Rm(), + "replace": Replace(), + "sed": Sed(), + "sleep": Sleep(), + "stderr": Stderr(), + "stdout": Stdout(), + "stop": Stop(), + "symlink": Symlink(), + "wait": Wait(), + "break": Break(), + } +} + +// Command returns a new Cmd with a Usage method that returns a copy of the +// given CmdUsage and a Run method calls the given function. +func Command(usage CmdUsage, run func(*State, ...string) (WaitFunc, error)) Cmd { + return &funcCmd{ + usage: usage, + run: run, + } +} + +// A funcCmd implements Cmd using a function value. +type funcCmd struct { + usage CmdUsage + run func(*State, ...string) (WaitFunc, error) +} + +func (c *funcCmd) Run(s *State, args ...string) (WaitFunc, error) { + return c.run(s, args...) +} + +func (c *funcCmd) Usage() *CmdUsage { return &c.usage } + +// firstNonFlag returns a slice containing the index of the first argument in +// rawArgs that is not a flag, or nil if all arguments are flags. +func firstNonFlag(rawArgs ...string) []int { + for i, arg := range rawArgs { + if !strings.HasPrefix(arg, "-") { + return []int{i} + } + if arg == "--" { + return []int{i + 1} + } + } + return nil +} + +// Cat writes the concatenated contents of the named file(s) to the script's +// stdout buffer. +func Cat() Cmd { + return Command( + CmdUsage{ + Summary: "concatenate files and print to the script's stdout buffer", + Args: "files...", + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) == 0 { + return nil, ErrUsage + } + + paths := make([]string, 0, len(args)) + for _, arg := range args { + paths = append(paths, s.Path(arg)) + } + + var buf strings.Builder + errc := make(chan error, 1) + go func() { + for _, p := range paths { + b, err := os.ReadFile(p) + buf.Write(b) + if err != nil { + errc <- err + return + } + } + errc <- nil + }() + + wait := func(*State) (stdout, stderr string, err error) { + err = <-errc + return buf.String(), "", err + } + return wait, nil + }) +} + +// Cd changes the current working directory. +func Cd() Cmd { + return Command( + CmdUsage{ + Summary: "change the working directory", + Args: "dir", + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 1 { + return nil, ErrUsage + } + return nil, s.Chdir(args[0]) + }) +} + +// Chmod changes the permissions of a file or a directory.. +func Chmod() Cmd { + return Command( + CmdUsage{ + Summary: "change file mode bits", + Args: "perm paths...", + Detail: []string{ + "Changes the permissions of the named files or directories to be equal to perm.", + "Only numerical permissions are supported.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 2 { + return nil, ErrUsage + } + + perm, err := strconv.ParseUint(args[0], 0, 32) + if err != nil || perm&uint64(fs.ModePerm) != perm { + return nil, fmt.Errorf("invalid mode: %s", args[0]) + } + + for _, arg := range args[1:] { + err := os.Chmod(s.Path(arg), fs.FileMode(perm)) + if err != nil { + return nil, err + } + } + return nil, nil + }) +} + +func compareFlags(fs *pflag.FlagSet) { + fs.BoolP("quiet", "q", false, "Suppress printing of diff") +} + +// Cmp compares the contents of two files, or the contents of either the +// "stdout" or "stderr" buffer and a file, returning a non-nil error if the +// contents differ. +func Cmp() Cmd { + return Command( + CmdUsage{ + Args: "file1 file2", + Summary: "compare files for differences", + Flags: compareFlags, + Detail: []string{ + "By convention, file1 is the actual data and file2 is the expected data.", + "The command succeeds if the file contents are identical.", + "File1 can be 'stdout' or 'stderr' to compare the stdout or stderr buffer from the most recent command.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, doCompare(s, false, args...) + }) +} + +// Cmpenv is like Compare, but also performs environment substitutions +// on the contents of both arguments. +func Cmpenv() Cmd { + return Command( + CmdUsage{ + Args: "file1 file2", + Flags: compareFlags, + Summary: "compare files for differences, with environment expansion", + Detail: []string{ + "By convention, file1 is the actual data and file2 is the expected data.", + "The command succeeds if the file contents are identical after substituting variables from the script environment.", + "File1 can be 'stdout' or 'stderr' to compare the script's stdout or stderr buffer.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, doCompare(s, true, args...) + }) +} + +func doCompare(s *State, env bool, args ...string) error { + quiet, err := s.Flags.GetBool("quiet") + if err != nil { + return err + } + if len(args) != 2 { + return ErrUsage + } + + name1, name2 := args[0], args[1] + var text1, text2 string + switch name1 { + case "stdout": + text1 = s.Stdout() + case "stderr": + text1 = s.Stderr() + default: + data, err := os.ReadFile(s.Path(name1)) + if err != nil { + return err + } + text1 = string(data) + } + + data, err := os.ReadFile(s.Path(name2)) + if err != nil { + return err + } + text2 = string(data) + + if env { + text1 = s.ExpandEnv(text1, false) + text2 = s.ExpandEnv(text2, false) + } + + if text1 != text2 { + if s.DoUpdate { + // Updates requested, store the file contents and + // ignore mismatches. + s.FileUpdates[name1] = text2 + s.FileUpdates[name2] = text1 + return nil + } + + if !quiet { + diffText := diff.Diff(name1, []byte(text1), name2, []byte(text2)) + s.Logf("%s\n", diffText) + } + return fmt.Errorf("%s and %s differ", name1, name2) + } + return nil +} + +// Cp copies one or more files to a new location. +func Cp() Cmd { + return Command( + CmdUsage{ + Summary: "copy files to a target file or directory", + Args: "src... dst", + Detail: []string{ + "src can include 'stdout' or 'stderr' to copy from the script's stdout or stderr buffer.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 2 { + return nil, ErrUsage + } + + dst := s.Path(args[len(args)-1]) + info, err := os.Stat(dst) + dstDir := err == nil && info.IsDir() + if len(args) > 2 && !dstDir { + return nil, &fs.PathError{Op: "cp", Path: dst, Err: errors.New("destination is not a directory")} + } + + for _, arg := range args[:len(args)-1] { + var ( + src string + data []byte + mode fs.FileMode + ) + switch arg { + case "stdout": + src = arg + data = []byte(s.Stdout()) + mode = 0666 + case "stderr": + src = arg + data = []byte(s.Stderr()) + mode = 0666 + default: + src = s.Path(arg) + info, err := os.Stat(src) + if err != nil { + return nil, err + } + mode = info.Mode() & 0777 + data, err = os.ReadFile(src) + if err != nil { + return nil, err + } + } + targ := dst + if dstDir { + targ = filepath.Join(dst, filepath.Base(src)) + } + err := os.WriteFile(targ, data, mode) + if err != nil { + return nil, err + } + } + + return nil, nil + }) +} + +// Echo writes its arguments to stdout, followed by a newline. +func Echo() Cmd { + return Command( + CmdUsage{ + Summary: "display a line of text", + Args: "string...", + }, + func(s *State, args ...string) (WaitFunc, error) { + var buf strings.Builder + for i, arg := range args { + if i > 0 { + buf.WriteString(" ") + } + buf.WriteString(arg) + } + buf.WriteString("\n") + out := buf.String() + + // Stuff the result into a callback to satisfy the OutputCommandFunc + // interface, even though it isn't really asynchronous even if run in the + // background. + // + // Nobody should be running 'echo' as a background command, but it's not worth + // defining yet another interface, and also doesn't seem worth shoehorning + // into a SimpleCommand the way we did with Wait. + return func(*State) (stdout, stderr string, err error) { + return out, "", nil + }, nil + }) +} + +// Env sets or logs the values of environment variables. +// +// With no arguments, Env reports all variables in the environment. +// "key=value" arguments set variables, and arguments without "=" +// cause the corresponding value to be printed to the stdout buffer. +func Env() Cmd { + return Command( + CmdUsage{ + Summary: "set or log the values of environment variables", + Args: "[key[=value]...]", + Detail: []string{ + "With no arguments, print the script environment to the log.", + "Otherwise, add the listed key=value pairs to the environment or print the listed keys.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + out := new(strings.Builder) + if len(args) == 0 { + for _, kv := range s.env { + fmt.Fprintf(out, "%s\n", kv) + } + } else { + for _, env := range args { + i := strings.Index(env, "=") + if i < 0 { + // Display value instead of setting it. + fmt.Fprintf(out, "%s=%s\n", env, s.envMap[env]) + continue + } + if err := s.Setenv(env[:i], env[i+1:]); err != nil { + return nil, err + } + } + } + var wait WaitFunc + if out.Len() > 0 || len(args) == 0 { + wait = func(*State) (stdout, stderr string, err error) { + return out.String(), "", nil + } + } + return wait, nil + }) +} + +// Exec runs an arbitrary executable as a subprocess. +// +// When the Script's context is canceled, Exec sends the interrupt signal, then +// waits for up to the given delay for the subprocess to flush output before +// terminating it with os.Kill. +func Exec(cancel func(*exec.Cmd) error, waitDelay time.Duration) Cmd { + return Command( + CmdUsage{ + Summary: "run an executable program with arguments", + Args: "program [args...]", + Detail: []string{ + "Note that 'exec' does not terminate the script (unlike Unix shells).", + }, + Async: true, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 1 { + return nil, ErrUsage + } + + // Use the script's PATH to look up the command (if it does not contain a separator) + // instead of the test process's PATH (see lookPath). + // Don't use filepath.Clean, since that changes "./foo" to "foo". + name := filepath.FromSlash(args[0]) + path := name + if !strings.Contains(name, string(filepath.Separator)) { + var err error + path, err = lookPath(s, name) + if err != nil { + return nil, err + } + } + + return startCommand(s, name, path, args[1:], cancel, waitDelay) + }) +} + +func startCommand(s *State, name, path string, args []string, cancel func(*exec.Cmd) error, waitDelay time.Duration) (WaitFunc, error) { + var ( + cmd *exec.Cmd + stdoutBuf, stderrBuf strings.Builder + ) + for { + cmd = exec.CommandContext(s.Context(), path, args...) + if cancel == nil { + cmd.Cancel = nil + } else { + cmd.Cancel = func() error { return cancel(cmd) } + } + cmd.WaitDelay = waitDelay + cmd.Args[0] = name + cmd.Dir = s.Getwd() + cmd.Env = s.env + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + err := cmd.Start() + if err == nil { + break + } + if isETXTBSY(err) { + // If the script (or its host process) just wrote the executable we're + // trying to run, a fork+exec in another thread may be holding open the FD + // that we used to write the executable (see https://go.dev/issue/22315). + // Since the descriptor should have CLOEXEC set, the problem should + // resolve as soon as the forked child reaches its exec call. + // Keep retrying until that happens. + } else { + return nil, err + } + } + + wait := func(s *State) (stdout, stderr string, err error) { + err = cmd.Wait() + return stdoutBuf.String(), stderrBuf.String(), err + } + return wait, nil +} + +// lookPath is (roughly) like exec.LookPath, but it uses the script's current +// PATH to find the executable. +func lookPath(s *State, command string) (string, error) { + var strEqual func(string, string) bool + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Using GOOS as a proxy for case-insensitive file system. + // TODO(bcmills): Remove this assumption. + strEqual = strings.EqualFold + } else { + strEqual = func(a, b string) bool { return a == b } + } + + var pathExt []string + var searchExt bool + var isExecutable func(os.FileInfo) bool + if runtime.GOOS == "windows" { + // Use the test process's PathExt instead of the script's. + // If PathExt is set in the command's environment, cmd.Start fails with + // "parameter is invalid". Not sure why. + // If the command already has an extension in PathExt (like "cmd.exe") + // don't search for other extensions (not "cmd.bat.exe"). + pathExt = strings.Split(os.Getenv("PathExt"), string(filepath.ListSeparator)) + searchExt = true + cmdExt := filepath.Ext(command) + for _, ext := range pathExt { + if strEqual(cmdExt, ext) { + searchExt = false + break + } + } + isExecutable = func(fi os.FileInfo) bool { + return fi.Mode().IsRegular() + } + } else { + isExecutable = func(fi os.FileInfo) bool { + return fi.Mode().IsRegular() && fi.Mode().Perm()&0111 != 0 + } + } + + pathEnv, _ := s.LookupEnv(pathEnvName()) + for _, dir := range strings.Split(pathEnv, string(filepath.ListSeparator)) { + if dir == "" { + continue + } + + // Determine whether dir needs a trailing path separator. + // Note: we avoid filepath.Join in this function because it cleans the + // result: we want to preserve the exact dir prefix from the environment. + sep := string(filepath.Separator) + if os.IsPathSeparator(dir[len(dir)-1]) { + sep = "" + } + + if searchExt { + ents, err := os.ReadDir(dir) + if err != nil { + continue + } + for _, ent := range ents { + for _, ext := range pathExt { + if !ent.IsDir() && strEqual(ent.Name(), command+ext) { + return dir + sep + ent.Name(), nil + } + } + } + } else { + path := dir + sep + command + if fi, err := os.Stat(path); err == nil && isExecutable(fi) { + return path, nil + } + } + } + return "", &exec.Error{Name: command, Err: exec.ErrNotFound} +} + +// pathEnvName returns the platform-specific variable used by os/exec.LookPath +// to look up executable names (either "PATH" or "path"). +// +// TODO(bcmills): Investigate whether we can instead use PATH uniformly and +// rewrite it to $path when executing subprocesses. +func pathEnvName() string { + switch runtime.GOOS { + case "plan9": + return "path" + default: + return "PATH" + } +} + +// Exists checks that the named file(s) exist. +func Exists() Cmd { + return Command( + CmdUsage{ + Summary: "check that files exist", + Args: "file...", + Flags: func(fs *pflag.FlagSet) { + fs.Bool("readonly", false, "File must not be writable") + fs.Bool("exec", false, "File must not be executable") + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + readonly, err := s.Flags.GetBool("readonly") + if err != nil { + return nil, err + } + exec, err := s.Flags.GetBool("exec") + if err != nil { + return nil, err + } + + if len(args) == 0 { + return nil, ErrUsage + } + + for _, file := range args { + file = s.Path(file) + info, err := os.Stat(file) + if err != nil { + return nil, err + } + if readonly && info.Mode()&0222 != 0 { + return nil, fmt.Errorf("%s exists but is writable", file) + } + if exec && runtime.GOOS != "windows" && info.Mode()&0111 == 0 { + return nil, fmt.Errorf("%s exists but is not executable", file) + } + } + + return nil, nil + }) +} + +// Grep checks that file content matches a regexp. +// Like stdout/stderr and unlike Unix grep, it accepts Go regexp syntax. +// +// Grep does not modify the State's stdout or stderr buffers. +// (Its output goes to the script log, not stdout.) +func Grep() Cmd { + return Command( + CmdUsage{ + Summary: "find lines in a file that match a pattern", + Args: "'pattern' file", + Flags: matchFlags, + Detail: []string{ + "The command succeeds if at least one match (or the exact count, if given) is found.", + "The -q flag suppresses printing of matches.", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, match(s, args, "", "grep") + }) +} + +func matchFlags(fs *pflag.FlagSet) { + fs.Int("count", 0, "Exact count of matches") + fs.BoolP("quiet", "q", false, "Suppress printing of matches") +} + +// match implements the Grep, Stdout, and Stderr commands. +func match(s *State, args []string, text, name string) error { + n, err := s.Flags.GetInt("count") + if err != nil { + return err + } + quiet, err := s.Flags.GetBool("quiet") + if err != nil { + return err + } + + isGrep := name == "grep" + + wantArgs := 1 + if isGrep { + wantArgs = 2 + } + if len(args) != wantArgs { + return ErrUsage + } + + pattern := `(?m)` + args[0] + re, err := regexp.Compile(pattern) + if err != nil { + return err + } + + if isGrep { + name = args[1] // for error messages + data, err := os.ReadFile(s.Path(args[1])) + if err != nil { + return err + } + text = string(data) + } + + if n > 0 { + count := len(re.FindAllString(text, -1)) + if count != n { + return fmt.Errorf("found %d matches for %#q in %s", count, pattern, name) + } + return nil + } + + if !re.MatchString(text) { + return fmt.Errorf("no match for %#q in %s", pattern, name) + } + + if !quiet { + // Print the lines containing the match. + loc := re.FindStringIndex(text) + for loc[0] > 0 && text[loc[0]-1] != '\n' { + loc[0]-- + } + for loc[1] < len(text) && text[loc[1]] != '\n' { + loc[1]++ + } + lines := strings.TrimSuffix(text[loc[0]:loc[1]], "\n") + s.Logf("matched: %s\n", lines) + } + return nil +} + +// Help writes command documentation to the script log. +func Help() Cmd { + return Command( + CmdUsage{ + Summary: "log help text for commands and conditions", + Args: "[-v] (regexp)", + Detail: []string{ + "To display help for a specific condition, enclose it in brackets: 'help [amd64]'.", + "To display complete documentation when listing all commands, pass the -v flag.", + "Commands can be filtered with a regexp: 'help ^db'", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + if s.engine == nil { + return nil, errors.New("no engine configured") + } + + verbose := false + if len(args) > 0 { + verbose = true + if args[0] == "-v" { + args = args[1:] + } + } + + var cmds, conds []string + for _, arg := range args { + if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") { + conds = append(conds, arg[1:len(arg)-1]) + } else { + cmds = append(cmds, arg) + } + } + + out := new(strings.Builder) + + if len(conds) > 0 || (len(args) == 0 && len(s.engine.Conds) > 0) { + if conds == nil { + out.WriteString("conditions:\n\n") + } + s.engine.ListConds(out, s, conds...) + } + + if len(cmds) > 0 || len(args) == 0 { + if len(args) == 0 { + out.WriteString("\ncommands:\n\n") + } + s.engine.ListCmds(out, verbose, strings.Join(cmds, " ")) + } + + wait := func(*State) (stdout, stderr string, err error) { + return out.String(), "", nil + } + return wait, nil + }) +} + +// Mkdir creates a directory and any needed parent directories. +func Mkdir() Cmd { + return Command( + CmdUsage{ + Summary: "create directories, if they do not already exist", + Args: "path...", + Detail: []string{ + "Unlike Unix mkdir, parent directories are always created if needed.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 1 { + return nil, ErrUsage + } + for _, arg := range args { + if err := os.MkdirAll(s.Path(arg), 0777); err != nil { + return nil, err + } + } + return nil, nil + }) +} + +// Mv renames an existing file or directory to a new path. +func Mv() Cmd { + return Command( + CmdUsage{ + Summary: "rename a file or directory to a new path", + Args: "old new", + Detail: []string{ + "OS-specific restrictions may apply when old and new are in different directories.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 2 { + return nil, ErrUsage + } + return nil, os.Rename(s.Path(args[0]), s.Path(args[1])) + }) +} + +// Program returns a new command that runs the named program, found from the +// host process's PATH (not looked up in the script's PATH). +func Program(name string, cancel func(*exec.Cmd) error, waitDelay time.Duration) Cmd { + var ( + shortName string + summary string + lookPathOnce sync.Once + path string + pathErr error + ) + if filepath.IsAbs(name) { + lookPathOnce.Do(func() { path = filepath.Clean(name) }) + shortName = strings.TrimSuffix(filepath.Base(path), ".exe") + summary = "run the '" + shortName + "' program provided by the script host" + } else { + shortName = name + summary = "run the '" + shortName + "' program from the script host's PATH" + } + + return Command( + CmdUsage{ + Summary: summary, + Args: "[args...]", + Async: true, + }, + func(s *State, args ...string) (WaitFunc, error) { + lookPathOnce.Do(func() { + path, pathErr = exec.LookPath(name) + }) + if pathErr != nil { + return nil, pathErr + } + return startCommand(s, shortName, path, args, cancel, waitDelay) + }) +} + +// Replace replaces all occurrences of a string in a file with another string. +func Replace() Cmd { + return Command( + CmdUsage{ + Summary: "replace strings in a file", + Args: "[old new]... file", + Detail: []string{ + "The 'old' and 'new' arguments are unquoted as if in quoted Go strings.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args)%2 != 1 { + return nil, ErrUsage + } + + oldNew := make([]string, 0, len(args)-1) + for _, arg := range args[:len(args)-1] { + s, err := strconv.Unquote(`"` + arg + `"`) + if err != nil { + return nil, err + } + oldNew = append(oldNew, s) + } + + r := strings.NewReplacer(oldNew...) + file := s.Path(args[len(args)-1]) + + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + replaced := r.Replace(string(data)) + + return nil, os.WriteFile(file, []byte(replaced), 0666) + }) +} + +// Sed implements a simple regexp replacement of text in a file. +func Sed() Cmd { + return Command( + CmdUsage{ + Summary: "substitute strings in a file with a regexp", + Args: "regexp replacement file", + Detail: []string{ + "A simple sed-like command for replacing text matching regular expressions", + "in a file.", + "", + "Implemented using regexp.ReplaceAll, see its docs for what is supported:", + "https://pkg.go.dev/regexp#Regexp.ReplaceAll", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 3 { + return nil, ErrUsage + } + + re, err := regexp.Compile(args[0]) + if err != nil { + return nil, err + } + replacement := args[1] + file := s.Path(args[2]) + + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + lines := strings.Split(string(data), "\n") + for i, line := range lines { + lines[i] = re.ReplaceAllString(line, replacement) + } + return nil, os.WriteFile(file, []byte(strings.Join(lines, "\n")), 0666) + }) +} + +// Rm removes a file or directory. +// +// If a directory, Rm also recursively removes that directory's +// contents. +func Rm() Cmd { + return Command( + CmdUsage{ + Summary: "remove a file or directory", + Args: "path...", + Detail: []string{ + "If the path is a directory, its contents are removed recursively.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) < 1 { + return nil, ErrUsage + } + for _, arg := range args { + if err := removeAll(s.Path(arg)); err != nil { + return nil, err + } + } + return nil, nil + }) +} + +// removeAll removes dir and all files and directories it contains. +// +// Unlike os.RemoveAll, removeAll attempts to make the directories writable if +// needed in order to remove their contents. +func removeAll(dir string) error { + // module cache has 0444 directories; + // make them writable in order to remove content. + filepath.WalkDir(dir, func(path string, info fs.DirEntry, err error) error { + // chmod not only directories, but also things that we couldn't even stat + // due to permission errors: they may also be unreadable directories. + if err != nil || info.IsDir() { + os.Chmod(path, 0777) + } + return nil + }) + return os.RemoveAll(dir) +} + +// Sleep sleeps for the given Go duration or until the script's context is +// cancelled, whichever happens first. +func Sleep() Cmd { + return Command( + CmdUsage{ + Summary: "sleep for a specified duration", + Args: "duration", + Detail: []string{ + "The duration must be given as a Go time.Duration string.", + }, + Async: true, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 1 { + return nil, ErrUsage + } + + d, err := time.ParseDuration(args[0]) + if err != nil { + return nil, err + } + + timer := time.NewTimer(d) + wait := func(s *State) (stdout, stderr string, err error) { + ctx := s.Context() + select { + case <-ctx.Done(): + timer.Stop() + return "", "", ctx.Err() + case <-timer.C: + return "", "", nil + } + } + return wait, nil + }) +} + +// Stderr searches for a regular expression in the stderr buffer. +func Stderr() Cmd { + return Command( + CmdUsage{ + Summary: "find lines in the stderr buffer that match a pattern", + Args: "'pattern'", + Flags: matchFlags, + Detail: []string{ + "The command succeeds if at least one match (or the exact count, if given) is found.", + "The -q flag suppresses printing of matches.", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, match(s, args, s.Stderr(), "stderr") + }) +} + +// Stdout searches for a regular expression in the stdout buffer. +func Stdout() Cmd { + return Command( + CmdUsage{ + Summary: "find lines in the stdout buffer that match a pattern", + Args: "'pattern'", + Flags: matchFlags, + Detail: []string{ + "The command succeeds if at least one match (or the exact count, if given) is found.", + "The -q flag suppresses printing of matches.", + }, + RegexpArgs: firstNonFlag, + }, + func(s *State, args ...string) (WaitFunc, error) { + return nil, match(s, args, s.Stdout(), "stdout") + }) +} + +// Stop returns a sentinel error that causes script execution to halt +// and s.Execute to return with a nil error. +func Stop() Cmd { + return Command( + CmdUsage{ + Summary: "stop execution of the script", + Args: "[msg]", + Detail: []string{ + "The message is written to the script log, but no error is reported from the script engine.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) > 1 { + return nil, ErrUsage + } + // TODO(bcmills): The argument passed to stop seems redundant with comments. + // Either use it systematically or remove it. + if len(args) == 1 { + return nil, stopError{msg: args[0]} + } + return nil, stopError{} + }) +} + +// stopError is the sentinel error type returned by the Stop command. +type stopError struct { + msg string +} + +func (s stopError) Error() string { + if s.msg == "" { + return "stop" + } + return "stop: " + s.msg +} + +// Symlink creates a symbolic link. +func Symlink() Cmd { + return Command( + CmdUsage{ + Summary: "create a symlink", + Args: "path -> target", + Detail: []string{ + "Creates path as a symlink to target.", + "The '->' token (like in 'ls -l' output on Unix) is required.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) != 3 || args[1] != "->" { + return nil, ErrUsage + } + + // Note that the link target args[2] is not interpreted with s.Path: + // it will be interpreted relative to the directory file is in. + return nil, os.Symlink(filepath.FromSlash(args[2]), s.Path(args[0])) + }) +} + +// Wait waits for the completion of background commands. +// +// When Wait returns, the stdout and stderr buffers contain the concatenation of +// the background commands' respective outputs in the order in which those +// commands were started. +func Wait() Cmd { + return Command( + CmdUsage{ + Summary: "wait for completion of background commands", + Args: "", + Detail: []string{ + "Waits for all background commands to complete.", + "The output (and any error) from each command is printed to the log in the order in which the commands were started.", + "After the call to 'wait', the script's stdout and stderr buffers contain the concatenation of the background commands' outputs.", + }, + }, + func(s *State, args ...string) (WaitFunc, error) { + if len(args) > 0 { + return nil, ErrUsage + } + + var stdouts, stderrs []string + var errs []*CommandError + for _, bg := range s.background { + stdout, stderr, err := bg.wait(s) + + beforeArgs := "" + if len(bg.args) > 0 { + beforeArgs = " " + } + s.Logf("[background] %s%s%s\n", bg.name, beforeArgs, quoteArgs(bg.args)) + + if stdout != "" { + s.Logf("[stdout]\n%s", stdout) + stdouts = append(stdouts, stdout) + } + if stderr != "" { + s.Logf("[stderr]\n%s", stderr) + stderrs = append(stderrs, stderr) + } + if err != nil { + s.Logf("[%v]\n", err) + } + if cmdErr := checkStatus(bg.command, err); cmdErr != nil { + errs = append(errs, cmdErr.(*CommandError)) + } + } + + s.stdout = strings.Join(stdouts, "") + s.stderr = strings.Join(stderrs, "") + s.background = nil + if len(errs) > 0 { + return nil, waitError{errs: errs} + } + return nil, nil + }) +} + +// A waitError wraps one or more errors returned by background commands. +type waitError struct { + errs []*CommandError +} + +func (w waitError) Error() string { + b := new(strings.Builder) + for i, err := range w.errs { + if i != 0 { + b.WriteString("\n") + } + b.WriteString(err.Error()) + } + return b.String() +} + +func (w waitError) Unwrap() error { + if len(w.errs) == 1 { + return w.errs[0] + } + return nil +} + +func Break() Cmd { + return Command( + CmdUsage{ + Summary: "break into interactive prompt", + }, + func(s *State, args ...string) (WaitFunc, error) { + tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0) + if err != nil { + return nil, fmt.Errorf("open /dev/tty: %w", err) + } + defer tty.Close() + + prev, err := term.MakeRaw(int(tty.Fd())) + if err != nil { + return nil, fmt.Errorf("cannot set /dev/tty to raw mode") + } + defer term.Restore(int(tty.Fd()), prev) + + // Flush any pending logs + engine := s.engine + + term := term.NewTerminal(tty, "debug> ") + s.FlushLog() + fmt.Fprintf(term, "\nBreak! Control-d to continue.\n") + + for { + line, err := term.ReadLine() + if err != nil { + return nil, nil + } + err = engine.ExecuteLine(s, line, term) + if err != nil { + fmt.Fprintln(term, err.Error()) + } + } + }, + ) +} diff --git a/vendor/github.com/cilium/hive/script/cmds_other.go b/vendor/github.com/cilium/hive/script/cmds_other.go new file mode 100644 index 0000000000..847b225ae6 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/cmds_other.go @@ -0,0 +1,11 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(unix || windows) + +package script + +func isETXTBSY(err error) bool { + return false +} diff --git a/vendor/github.com/cilium/hive/script/cmds_posix.go b/vendor/github.com/cilium/hive/script/cmds_posix.go new file mode 100644 index 0000000000..2525f6e752 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/cmds_posix.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || windows + +package script + +import ( + "errors" + "syscall" +) + +func isETXTBSY(err error) bool { + return errors.Is(err, syscall.ETXTBSY) +} diff --git a/vendor/github.com/cilium/hive/script/conds.go b/vendor/github.com/cilium/hive/script/conds.go new file mode 100644 index 0000000000..ffe5e3f0db --- /dev/null +++ b/vendor/github.com/cilium/hive/script/conds.go @@ -0,0 +1,198 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "fmt" + "os" + "runtime" + "sync" +) + +// DefaultConds returns a set of broadly useful script conditions. +// +// Run the 'help' command within a script engine to view a list of the available +// conditions. +func DefaultConds() map[string]Cond { + conds := make(map[string]Cond) + + conds["GOOS"] = PrefixCondition( + "runtime.GOOS == ", + func(_ *State, suffix string) (bool, error) { + if suffix == runtime.GOOS { + return true, nil + } + return false, nil + }) + + conds["GOARCH"] = PrefixCondition( + "runtime.GOARCH == ", + func(_ *State, suffix string) (bool, error) { + if suffix == runtime.GOARCH { + return true, nil + } + return false, nil + }) + + conds["compiler"] = PrefixCondition( + "runtime.Compiler == ", + func(_ *State, suffix string) (bool, error) { + if suffix == runtime.Compiler { + return true, nil + } + switch suffix { + case "gc", "gccgo": + return false, nil + default: + return false, fmt.Errorf("unrecognized compiler %q", suffix) + } + }) + + conds["root"] = BoolCondition("os.Geteuid() == 0", os.Geteuid() == 0) + + return conds +} + +// Condition returns a Cond with the given summary and evaluation function. +func Condition(summary string, eval func(*State) (bool, error)) Cond { + return &funcCond{eval: eval, usage: CondUsage{Summary: summary}} +} + +type funcCond struct { + eval func(*State) (bool, error) + usage CondUsage +} + +func (c *funcCond) Usage() *CondUsage { return &c.usage } + +func (c *funcCond) Eval(s *State, suffix string) (bool, error) { + if suffix != "" { + return false, ErrUsage + } + return c.eval(s) +} + +// PrefixCondition returns a Cond with the given summary and evaluation function. +func PrefixCondition(summary string, eval func(*State, string) (bool, error)) Cond { + return &prefixCond{eval: eval, usage: CondUsage{Summary: summary, Prefix: true}} +} + +type prefixCond struct { + eval func(*State, string) (bool, error) + usage CondUsage +} + +func (c *prefixCond) Usage() *CondUsage { return &c.usage } + +func (c *prefixCond) Eval(s *State, suffix string) (bool, error) { + return c.eval(s, suffix) +} + +// BoolCondition returns a Cond with the given truth value and summary. +// The Cond rejects the use of condition suffixes. +func BoolCondition(summary string, v bool) Cond { + return &boolCond{v: v, usage: CondUsage{Summary: summary}} +} + +type boolCond struct { + v bool + usage CondUsage +} + +func (b *boolCond) Usage() *CondUsage { return &b.usage } + +func (b *boolCond) Eval(s *State, suffix string) (bool, error) { + if suffix != "" { + return false, ErrUsage + } + return b.v, nil +} + +// OnceCondition returns a Cond that calls eval the first time the condition is +// evaluated. Future calls reuse the same result. +// +// The eval function is not passed a *State because the condition is cached +// across all execution states and must not vary by state. +func OnceCondition(summary string, eval func() (bool, error)) Cond { + return &onceCond{eval: eval, usage: CondUsage{Summary: summary}} +} + +type onceCond struct { + once sync.Once + v bool + err error + eval func() (bool, error) + usage CondUsage +} + +func (l *onceCond) Usage() *CondUsage { return &l.usage } + +func (l *onceCond) Eval(s *State, suffix string) (bool, error) { + if suffix != "" { + return false, ErrUsage + } + l.once.Do(func() { l.v, l.err = l.eval() }) + return l.v, l.err +} + +// CachedCondition is like Condition but only calls eval the first time the +// condition is evaluated for a given suffix. +// Future calls with the same suffix reuse the earlier result. +// +// The eval function is not passed a *State because the condition is cached +// across all execution states and must not vary by state. +func CachedCondition(summary string, eval func(string) (bool, error)) Cond { + return &cachedCond{eval: eval, usage: CondUsage{Summary: summary, Prefix: true}} +} + +type cachedCond struct { + m sync.Map + eval func(string) (bool, error) + usage CondUsage +} + +func (c *cachedCond) Usage() *CondUsage { return &c.usage } + +func (c *cachedCond) Eval(_ *State, suffix string) (bool, error) { + for { + var ready chan struct{} + + v, loaded := c.m.Load(suffix) + if !loaded { + ready = make(chan struct{}) + v, loaded = c.m.LoadOrStore(suffix, (<-chan struct{})(ready)) + + if !loaded { + inPanic := true + defer func() { + if inPanic { + c.m.Delete(suffix) + } + close(ready) + }() + + b, err := c.eval(suffix) + inPanic = false + + if err == nil { + c.m.Store(suffix, b) + return b, nil + } else { + c.m.Store(suffix, err) + return false, err + } + } + } + + switch v := v.(type) { + case bool: + return v, nil + case error: + return false, v + case <-chan struct{}: + <-v + } + } +} diff --git a/vendor/github.com/cilium/hive/script/engine.go b/vendor/github.com/cilium/hive/script/engine.go new file mode 100644 index 0000000000..ed4fa6fdf3 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/engine.go @@ -0,0 +1,997 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package script implements a small, customizable, platform-agnostic scripting +// language. +// +// Scripts are run by an [Engine] configured with a set of available commands +// and conditions that guard those commands. Each script has an associated +// working directory and environment, along with a buffer containing the stdout +// and stderr output of a prior command, tracked in a [State] that commands can +// inspect and modify. +// +// The default commands configured by [NewEngine] resemble a simplified Unix +// shell. +// +// # Script Language +// +// Each line of a script is parsed into a sequence of space-separated command +// words, with environment variable expansion within each word and # marking an +// end-of-line comment. Additional variables named ':' and '/' are expanded +// within script arguments (expanding to the value of os.PathListSeparator and +// os.PathSeparator respectively) but are not inherited in subprocess +// environments. +// +// Adding single quotes around text keeps spaces in that text from being treated +// as word separators and also disables environment variable expansion. +// Inside a single-quoted block of text, a repeated single quote indicates +// a literal single quote, as in: +// +// 'Don''t communicate by sharing memory.' +// +// A line beginning with # is a comment and conventionally explains what is +// being done or tested at the start of a new section of the script. +// +// Commands are executed one at a time, and errors are checked for each command; +// if any command fails unexpectedly, no subsequent commands in the script are +// executed. The command prefix ! indicates that the command on the rest of the +// line (typically go or a matching predicate) must fail instead of succeeding. +// The command prefix ? indicates that the command may or may not succeed, but +// the script should continue regardless. +// +// The command prefix [cond] indicates that the command on the rest of the line +// should only run when the condition is satisfied. +// +// A condition can be negated: [!root] means to run the rest of the line only if +// the user is not root. Multiple conditions may be given for a single command, +// for example, '[linux] [amd64] skip'. The command will run if all conditions +// are satisfied. +// +// Package script is particularly good for writing tests. +// Ironically, it has no tests. +package script + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "regexp" + "sort" + "strings" + "time" + + "github.com/spf13/pflag" +) + +// An Engine stores the configuration for executing a set of scripts. +// +// The same Engine may execute multiple scripts concurrently. +type Engine struct { + Cmds map[string]Cmd + Conds map[string]Cond + + // If Quiet is true, Execute deletes log prints from the previous + // section when starting a new section. + Quiet bool + + // RetryInterval for retrying commands marked with '*'. If zero, then + // the default retry interval is used. + RetryInterval time.Duration +} + +// NewEngine returns an Engine configured with a basic set of commands and conditions. +func NewEngine() *Engine { + return &Engine{ + Cmds: DefaultCmds(), + Conds: DefaultConds(), + RetryInterval: defaultRetryInterval, + } +} + +const defaultRetryInterval = 100 * time.Millisecond + +// A Cmd is a command that is available to a script. +type Cmd interface { + // Run begins running the command. + // + // If the command produces output or can be run in the background, run returns + // a WaitFunc that will be called to obtain the result of the command and + // update the engine's stdout and stderr buffers. + // + // Run itself and the returned WaitFunc may inspect and/or modify the State, + // but the State's methods must not be called concurrently after Run has + // returned. + // + // Run may retain and access the args slice until the WaitFunc has returned. + Run(s *State, args ...string) (WaitFunc, error) + + // Usage returns the usage for the command, which the caller must not modify. + Usage() *CmdUsage +} + +// A WaitFunc is a function called to retrieve the results of a Cmd. +type WaitFunc func(*State) (stdout, stderr string, err error) + +// A CmdUsage describes the usage of a Cmd, independent of its name +// (which can change based on its registration). +type CmdUsage struct { + Summary string // in the style of the Name section of a Unix 'man' page, omitting the name + + // synopsis of arguments, e.g. "files...". If [Flags] is provided then these will be prepended + // to [Args] in usage output. + Args string + + // flags for the command, optional. If provided, then args passed to the command will not include any + // of the flag arguments. The [plafg.FlagSet] is available to the command via [State.Flags]. + Flags func(fs *pflag.FlagSet) + + Detail []string // zero or more sentences in the style of the Description section of a Unix 'man' page + + // If Async is true, the Cmd is meaningful to run in the background, and its + // Run method must return either a non-nil WaitFunc or a non-nil error. + Async bool + + // RegexpArgs reports which arguments, if any, should be treated as regular + // expressions. It takes as input the raw, unexpanded arguments and returns + // the list of argument indices that will be interpreted as regular + // expressions. + // + // If RegexpArgs is nil, all arguments are assumed not to be regular + // expressions. + RegexpArgs func(rawArgs ...string) []int +} + +// A Cond is a condition deciding whether a command should be run. +type Cond interface { + // Eval reports whether the condition applies to the given State. + // + // If the condition's usage reports that it is a prefix, + // the condition must be used with a suffix. + // Otherwise, the passed-in suffix argument is always the empty string. + Eval(s *State, suffix string) (bool, error) + + // Usage returns the usage for the condition, which the caller must not modify. + Usage() *CondUsage +} + +// A CondUsage describes the usage of a Cond, independent of its name +// (which can change based on its registration). +type CondUsage struct { + Summary string // a single-line summary of when the condition is true + + // If Prefix is true, the condition is a prefix and requires a + // colon-separated suffix (like "[GOOS:linux]" for the "GOOS" condition). + // The suffix may be the empty string (like "[prefix:]"). + Prefix bool +} + +// Execute reads and executes script, writing the output to log. +// +// Execute stops and returns an error at the first command that does not succeed. +// The returned error's text begins with "file:line: ". +// +// If the script runs to completion or ends by a 'stop' command, +// Execute returns nil. +// +// Execute does not stop background commands started by the script +// before returning. To stop those, use [State.CloseAndWait] or the +// [Wait] command. +func (e *Engine) Execute(s *State, file string, script *bufio.Reader, log io.Writer) (err error) { + defer func(prev *Engine) { s.engine = prev }(s.engine) + s.engine = e + defer func(prev io.Writer) { s.logOut = prev }(s.logOut) + s.logOut = log + + retryInterval := e.RetryInterval + if retryInterval == 0 { + retryInterval = defaultRetryInterval + } + + var ( + sectionStart time.Time + sectionCmds []*command + ) + // endSection flushes the logs for the current section from s.log to log. + // ok indicates whether all commands in the section succeeded. + endSection := func(ok bool) error { + var err error + if sectionStart.IsZero() { + // We didn't write a section header or record a timestamp, so just dump the + // whole log without those. + if s.log.Len() > 0 { + err = s.FlushLog() + } + } else if s.log.Len() == 0 { + // Adding elapsed time for doing nothing is meaningless, so don't. + _, err = io.WriteString(log, "\n") + } else { + // Insert elapsed time for section at the end of the section's comment. + _, err = fmt.Fprintf(log, " (%.3fs)\n", time.Since(sectionStart).Seconds()) + + if err == nil && (!ok || !e.Quiet) { + err = s.FlushLog() + } else { + s.log.Reset() + } + } + + sectionStart = time.Time{} + sectionCmds = nil + return err + } + + var lineno int + lineErr := func(err error) error { + if errors.As(err, new(*CommandError)) { + return err + } + return fmt.Errorf("%s:%d: %w", file, lineno, err) + } + + // In case of failure or panic, flush any pending logs for the section. + defer func() { + if sErr := endSection(false); sErr != nil && err == nil { + err = lineErr(sErr) + } + }() + + for { + if err := s.ctx.Err(); err != nil { + // This error wasn't produced by any particular command, + // so don't wrap it in a CommandError. + return lineErr(err) + } + + line, err := script.ReadString('\n') + if err == io.EOF { + if line == "" { + break // Reached the end of the script. + } + // If the script doesn't end in a newline, interpret the final line. + } else if err != nil { + return lineErr(err) + } + line = strings.TrimSuffix(line, "\n") + lineno++ + + // The comment character "#" at the start of the line delimits a section of + // the script. + if strings.HasPrefix(line, "#") { + // If there was a previous section, the fact that we are starting a new + // one implies the success of the previous one. + // + // At the start of the script, the state may also contain accumulated logs + // from commands executed on the State outside of the engine in order to + // set it up; flush those logs too. + if err := endSection(true); err != nil { + return lineErr(err) + } + + // Log the section start without a newline so that we can add + // a timestamp for the section when it ends. + _, err = fmt.Fprintf(log, "%s", line) + sectionStart = time.Now() + if err != nil { + return lineErr(err) + } + continue + } + + cmd, err := parse(file, lineno, line) + if cmd == nil && err == nil { + continue // Ignore blank lines. + } + sectionCmds = append(sectionCmds, cmd) + + s.Logf("> %s\n", line) + if err != nil { + return lineErr(err) + } + + // Evaluate condition guards. + ok, err := e.conditionsActive(s, cmd.conds) + if err != nil { + return lineErr(err) + } + if !ok { + s.Logf("[condition not met]\n") + continue + } + + impl := e.Cmds[cmd.name] + + // Expand variables in arguments. + var regexpArgs []int + if impl != nil { + usage := impl.Usage() + if usage.RegexpArgs != nil { + // First join rawArgs without expansion to pass to RegexpArgs. + rawArgs := make([]string, 0, len(cmd.rawArgs)) + for _, frags := range cmd.rawArgs { + var b strings.Builder + for _, frag := range frags { + b.WriteString(frag.s) + } + rawArgs = append(rawArgs, b.String()) + } + regexpArgs = usage.RegexpArgs(rawArgs...) + } + } + cmd.origArgs = expandArgs(s, cmd.rawArgs, regexpArgs) + cmd.args = cmd.origArgs + + // Run the command. + err = e.runCommand(s, cmd, impl) + if err != nil { + if cmd.want == successRetryOnFailure || cmd.want == failureRetryOnSuccess { + // Command wants retries. Retry the whole section + numRetries := 0 + for err != nil { + s.FlushLog() + select { + case <-s.Context().Done(): + return lineErr(s.Context().Err()) + case <-time.After(retryInterval): + } + s.Logf("(command %q failed, retrying...)\n", line) + numRetries++ + for _, cmd := range sectionCmds { + impl := e.Cmds[cmd.name] + if err = e.runCommand(s, cmd, impl); err != nil { + break + } + } + } + s.Logf("(command %q succeeded after %d retries)\n", line, numRetries) + } else { + if stop := (stopError{}); errors.As(err, &stop) { + // Since the 'stop' command halts execution of the entire script, + // log its message separately from the section in which it appears. + err = endSection(true) + s.Logf("%v\n", stop) + if err == nil { + return nil + } + } + return lineErr(err) + } + } + } + + if err := endSection(true); err != nil { + return lineErr(err) + } + return nil +} + +func (e *Engine) ExecuteLine(s *State, line string, log io.Writer) (err error) { + defer func(prev *Engine) { s.engine = prev }(s.engine) + s.engine = e + defer func(prev io.Writer) { s.logOut = prev }(s.logOut) + s.logOut = log + defer s.FlushLog() + + cmd, err := parse("", 0, line) + if cmd == nil && err == nil { + return nil + } + if err != nil { + return err + } + + // Evaluate condition guards. + ok, err := e.conditionsActive(s, cmd.conds) + if err != nil { + return err + } + if !ok { + s.Logf("[condition not met]\n") + return + } + + impl := e.Cmds[cmd.name] + + // Expand variables in arguments. + var regexpArgs []int + if impl != nil { + usage := impl.Usage() + if usage.RegexpArgs != nil { + // First join rawArgs without expansion to pass to RegexpArgs. + rawArgs := make([]string, 0, len(cmd.rawArgs)) + for _, frags := range cmd.rawArgs { + var b strings.Builder + for _, frag := range frags { + b.WriteString(frag.s) + } + rawArgs = append(rawArgs, b.String()) + } + regexpArgs = usage.RegexpArgs(rawArgs...) + } + } + cmd.args = expandArgs(s, cmd.rawArgs, regexpArgs) + cmd.origArgs = cmd.args + + // Run the command. + err = e.runCommand(s, cmd, impl) + if err != nil { + if stop := (stopError{}); errors.As(err, &stop) { + // Since the 'stop' command halts execution of the entire script, + // log its message separately from the section in which it appears. + s.Logf("%v\n", stop) + if err == nil { + return nil + } + } + return err + } + return nil +} + +// A command is a complete command parsed from a script. +type command struct { + file string + line int + want expectedStatus + conds []condition // all must be satisfied + name string // the name of the command; must be non-empty + rawArgs [][]argFragment + origArgs []string // original arguments before pflag parsing + args []string // shell-expanded arguments following name + background bool // command should run in background (ends with a trailing &) +} + +// An expectedStatus describes the expected outcome of a command. +// Script execution halts when a command does not match its expected status. +type expectedStatus string + +const ( + success expectedStatus = "" + failure expectedStatus = "!" + successOrFailure expectedStatus = "?" + successRetryOnFailure expectedStatus = "*" + failureRetryOnSuccess expectedStatus = "!*" +) + +type argFragment struct { + s string + quoted bool // if true, disable variable expansion for this fragment +} + +type condition struct { + want bool + tag string +} + +const argSepChars = " \t\r\n#" + +// parse parses a single line as a list of space-separated arguments. +// subject to environment variable expansion (but not resplitting). +// Single quotes around text disable splitting and expansion. +// To embed a single quote, double it: +// +// 'Don''t communicate by sharing memory.' +func parse(filename string, lineno int, line string) (cmd *command, err error) { + cmd = &command{file: filename, line: lineno} + var ( + rawArg []argFragment // text fragments of current arg so far (need to add line[start:i]) + start = -1 // if >= 0, position where current arg text chunk starts + quoted = false // currently processing quoted text + ) + + flushArg := func() error { + if len(rawArg) == 0 { + return nil // Nothing to flush. + } + defer func() { rawArg = nil }() + + if cmd.name == "" && len(rawArg) == 1 && !rawArg[0].quoted { + arg := rawArg[0].s + + // Command prefix ! means negate the expectations about this command: + // go command should fail, match should not be found, etc. + // Prefix ? means allow either success or failure. + // Prefix * means to retry the command until success (or context cancelled) + // Prefix !* means to retry the command until failure (or context cancelled) + switch want := expectedStatus(arg); want { + case failure, successOrFailure, successRetryOnFailure, failureRetryOnSuccess: + if cmd.want != "" { + return errors.New("duplicated '!', '?', '*' or '!*' token") + } + cmd.want = want + return nil + } + + // Command prefix [cond] means only run this command if cond is satisfied. + if strings.HasPrefix(arg, "[") && strings.HasSuffix(arg, "]") { + want := true + arg = strings.TrimSpace(arg[1 : len(arg)-1]) + if strings.HasPrefix(arg, "!") { + want = false + arg = strings.TrimSpace(arg[1:]) + } + if arg == "" { + return errors.New("empty condition") + } + cmd.conds = append(cmd.conds, condition{want: want, tag: arg}) + return nil + } + + if arg == "" { + return errors.New("empty command") + } + cmd.name = arg + return nil + } + + cmd.rawArgs = append(cmd.rawArgs, rawArg) + return nil + } + + for i := 0; ; i++ { + if !quoted && (i >= len(line) || strings.ContainsRune(argSepChars, rune(line[i]))) { + // Found arg-separating space. + if start >= 0 { + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: false}) + start = -1 + } + if err := flushArg(); err != nil { + return nil, err + } + if i >= len(line) || line[i] == '#' { + break + } + continue + } + if i >= len(line) { + return nil, errors.New("unterminated quoted argument") + } + if line[i] == '\'' { + if !quoted { + // starting a quoted chunk + if start >= 0 { + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: false}) + } + start = i + 1 + quoted = true + continue + } + // 'foo''bar' means foo'bar, like in rc shell and Pascal. + if i+1 < len(line) && line[i+1] == '\'' { + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: true}) + start = i + 1 + i++ // skip over second ' before next iteration + continue + } + // ending a quoted chunk + rawArg = append(rawArg, argFragment{s: line[start:i], quoted: true}) + start = i + 1 + quoted = false + continue + } + // found character worth saving; make sure we're saving + if start < 0 { + start = i + } + } + + if cmd.name == "" { + if cmd.want != "" || len(cmd.conds) > 0 || len(cmd.rawArgs) > 0 || cmd.background { + // The line contains a command prefix or suffix, but no actual command. + return nil, errors.New("missing command") + } + + // The line is blank, or contains only a comment. + return nil, nil + } + + if n := len(cmd.rawArgs); n > 0 { + last := cmd.rawArgs[n-1] + if len(last) == 1 && !last[0].quoted && last[0].s == "&" { + cmd.background = true + cmd.rawArgs = cmd.rawArgs[:n-1] + } + } + return cmd, nil +} + +// expandArgs expands the shell variables in rawArgs and joins them to form the +// final arguments to pass to a command. +func expandArgs(s *State, rawArgs [][]argFragment, regexpArgs []int) []string { + args := make([]string, 0, len(rawArgs)) + for i, frags := range rawArgs { + isRegexp := false + for _, j := range regexpArgs { + if i == j { + isRegexp = true + break + } + } + + var b strings.Builder + for _, frag := range frags { + if frag.quoted { + b.WriteString(frag.s) + } else { + b.WriteString(s.ExpandEnv(frag.s, isRegexp)) + } + } + args = append(args, b.String()) + } + return args +} + +// quoteArgs returns a string that parse would parse as args when passed to a command. +// +// TODO(bcmills): This function should have a fuzz test. +func quoteArgs(args []string) string { + var b strings.Builder + for i, arg := range args { + if i > 0 { + b.WriteString(" ") + } + if strings.ContainsAny(arg, "'"+argSepChars) { + // Quote the argument to a form that would be parsed as a single argument. + b.WriteString("'") + b.WriteString(strings.ReplaceAll(arg, "'", "''")) + b.WriteString("'") + } else { + b.WriteString(arg) + } + } + return b.String() +} + +func (e *Engine) conditionsActive(s *State, conds []condition) (bool, error) { + for _, cond := range conds { + var impl Cond + prefix, suffix, ok := strings.Cut(cond.tag, ":") + if ok { + impl = e.Conds[prefix] + if impl == nil { + return false, fmt.Errorf("unknown condition prefix %q", prefix) + } + if !impl.Usage().Prefix { + return false, fmt.Errorf("condition %q cannot be used with a suffix", prefix) + } + } else { + impl = e.Conds[cond.tag] + if impl == nil { + return false, fmt.Errorf("unknown condition %q", cond.tag) + } + if impl.Usage().Prefix { + return false, fmt.Errorf("condition %q requires a suffix", cond.tag) + } + } + active, err := impl.Eval(s, suffix) + + if err != nil { + return false, fmt.Errorf("evaluating condition %q: %w", cond.tag, err) + } + if active != cond.want { + return false, nil + } + } + + return true, nil +} + +func (e *Engine) runCommand(s *State, cmd *command, impl Cmd) error { + if impl == nil { + return cmdError(cmd, errors.New("unknown command")) + } + + usage := impl.Usage() + + async := usage.Async + if cmd.background && !async { + return cmdError(cmd, errors.New("command cannot be run in background")) + } + + // Register and parse the flags. We do this regardless of whether [usage.Flags] + // is set in order to handle -h/--help. + fs := pflag.NewFlagSet(cmd.name, pflag.ContinueOnError) + fs.Usage = func() {} // Don't automatically handle error + fs.SetOutput(s.logOut) + if usage.Flags != nil { + usage.Flags(fs) + } + if err := fs.Parse(cmd.origArgs); err != nil { + if errors.Is(err, pflag.ErrHelp) { + out := new(strings.Builder) + err = e.ListCmds(out, true, "^"+cmd.name+"$") + s.stdout = out.String() + if s.stdout != "" { + s.Logf("[stdout]\n%s", s.stdout) + } + return err + } + if usage.Flags != nil { + return cmdError(cmd, err) + } + + // [usage.Flags] wasn't given, so ignore the parsing errors as the + // command might do its own argument parsing. + } else { + cmd.args = fs.Args() + s.Flags = fs + } + + wait, runErr := impl.Run(s, cmd.args...) + if wait == nil { + if async && runErr == nil { + return cmdError(cmd, errors.New("internal error: async command returned a nil WaitFunc")) + } + return checkStatus(cmd, runErr) + } + if runErr != nil { + return cmdError(cmd, errors.New("internal error: command returned both an error and a WaitFunc")) + } + + if cmd.background { + s.background = append(s.background, backgroundCmd{ + command: cmd, + wait: wait, + }) + // Clear stdout and stderr, since they no longer correspond to the last + // command executed. + s.stdout = "" + s.stderr = "" + return nil + } + + stdout, stderr, waitErr := wait(s) + s.stdout = stdout + s.stderr = stderr + if stdout != "" { + s.Logf("[stdout]\n%s", stdout) + } + if stderr != "" { + s.Logf("[stderr]\n%s", stderr) + } + if cmdErr := checkStatus(cmd, waitErr); cmdErr != nil { + return cmdErr + } + if waitErr != nil { + // waitErr was expected (by cmd.want), so log it instead of returning it. + s.Logf("[%v]\n", waitErr) + } + return nil +} + +func checkStatus(cmd *command, err error) error { + if err == nil { + if cmd.want == failure || cmd.want == failureRetryOnSuccess { + return cmdError(cmd, ErrUnexpectedSuccess) + } + return nil + } + + if s := (stopError{}); errors.As(err, &s) { + // This error originated in the Stop command. + // Propagate it as-is. + return cmdError(cmd, err) + } + + if w := (waitError{}); errors.As(err, &w) { + // This error was surfaced from a background process by a call to Wait. + // Add a call frame for Wait itself, but ignore its "want" field. + // (Wait itself cannot fail to wait on commands or else it would leak + // processes and/or goroutines — so a negative assertion for it would be at + // best ambiguous.) + return cmdError(cmd, err) + } + + if cmd.want == success || cmd.want == successRetryOnFailure { + return cmdError(cmd, err) + } + + if (cmd.want == failure || cmd.want == failureRetryOnSuccess) && (errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled)) { + // The command was terminated because the script is no longer interested in + // its output, so we don't know what it would have done had it run to + // completion — for all we know, it could have exited without error if it + // ran just a smidge faster. + return cmdError(cmd, err) + } + + return nil +} + +// ListCmds prints to w a list of the named commands, +// annotating each with its arguments and a short usage summary. +// If verbose is true, ListCmds prints full details for each command. +func (e *Engine) ListCmds(w io.Writer, verbose bool, regexMatch string) error { + var re *regexp.Regexp + if regexMatch != "" { + var err error + re, err = regexp.Compile(regexMatch) + if err != nil { + return err + } + } + names := make([]string, 0, len(e.Cmds)) + for name := range e.Cmds { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + if re != nil && !re.MatchString(name) { + continue + } + cmd, ok := e.Cmds[name] + if !ok { + _, err := fmt.Fprintf(w, "command %q is not registered\n", name) + if err != nil { + return err + } + return nil + } + + usage := cmd.Usage() + + suffix := "" + if usage.Async { + suffix = " [&]" + } + + flagArgs := "" + flagUsages := "" + if usage.Flags != nil { + fs := pflag.NewFlagSet(name, pflag.ContinueOnError) + fs.SetOutput(w) + usage.Flags(fs) + flagUsages = fs.FlagUsages() + shortArgs := []string{} + longArgs := []string{} + fs.VisitAll(func(flag *pflag.Flag) { + if flag.Shorthand != "" { + shortArgs = append(shortArgs, flag.Shorthand) + } + switch flag.Value.Type() { + case "bool": + longArgs = append(longArgs, fmt.Sprintf("[--%s]", flag.Name)) + default: + longArgs = append(longArgs, fmt.Sprintf("[--%s=%s]", flag.Name, flag.Value.Type())) + } + }) + if len(shortArgs) > 0 { + flagArgs = fmt.Sprintf("[-%s] ", strings.Join(shortArgs, "")) + } + flagArgs += strings.Join(longArgs, " ") + " " + } + + _, err := fmt.Fprintf(w, "%s %s%s\n\t%s\n", name, flagArgs+usage.Args, suffix, usage.Summary) + if err != nil { + return err + } + + if verbose { + if len(usage.Detail) > 0 { + if _, err := io.WriteString(w, "\n"); err != nil { + return err + } + + for _, line := range usage.Detail { + if err := wrapLine(w, line, 60, "\t"); err != nil { + return err + } + } + } + if flagUsages != "" { + if _, err := io.WriteString(w, "\n"); err != nil { + return err + } + lines := strings.Split(flagUsages, "\n") + if len(lines) > 0 { + if _, err := fmt.Fprintf(w, "\tFlags:\n"); err != nil { + return err + } + for _, line := range lines { + if _, err := fmt.Fprintf(w, "\t%s\n", line); err != nil { + return err + } + } + } + } + } + } + + return nil +} + +func wrapLine(w io.Writer, line string, cols int, indent string) error { + line = strings.TrimLeft(line, " ") + for len(line) > cols { + bestSpace := -1 + for i, r := range line { + if r == ' ' { + if i <= cols || bestSpace < 0 { + bestSpace = i + } + if i > cols { + break + } + } + } + if bestSpace < 0 { + break + } + + if _, err := fmt.Fprintf(w, "%s%s\n", indent, line[:bestSpace]); err != nil { + return err + } + line = line[bestSpace+1:] + } + + _, err := fmt.Fprintf(w, "%s%s\n", indent, line) + return err +} + +// ListConds prints to w a list of conditions, one per line, +// annotating each with a description and whether the condition +// is true in the state s (if s is non-nil). +// +// Each of the tag arguments should be a condition string of +// the form "name" or "name:suffix". If no tags are passed as +// arguments, ListConds lists all conditions registered in +// the engine e. +func (e *Engine) ListConds(w io.Writer, s *State, tags ...string) error { + if tags == nil { + tags = make([]string, 0, len(e.Conds)) + for name := range e.Conds { + tags = append(tags, name) + } + sort.Strings(tags) + } + + for _, tag := range tags { + if prefix, suffix, ok := strings.Cut(tag, ":"); ok { + cond := e.Conds[prefix] + if cond == nil { + return fmt.Errorf("unknown condition prefix %q", prefix) + } + usage := cond.Usage() + if !usage.Prefix { + return fmt.Errorf("condition %q cannot be used with a suffix", prefix) + } + + activeStr := "" + if s != nil { + if active, _ := cond.Eval(s, suffix); active { + activeStr = " (active)" + } + } + _, err := fmt.Fprintf(w, "[%s]%s\n\t%s\n", tag, activeStr, usage.Summary) + if err != nil { + return err + } + continue + } + + cond := e.Conds[tag] + if cond == nil { + return fmt.Errorf("unknown condition %q", tag) + } + var err error + usage := cond.Usage() + if usage.Prefix { + _, err = fmt.Fprintf(w, "[%s:*]\n\t%s\n", tag, usage.Summary) + } else { + activeStr := "" + if s != nil { + if ok, _ := cond.Eval(s, ""); ok { + activeStr = " (active)" + } + } + _, err = fmt.Fprintf(w, "[%s]%s\n\t%s\n", tag, activeStr, usage.Summary) + } + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/cilium/hive/script/errors.go b/vendor/github.com/cilium/hive/script/errors.go new file mode 100644 index 0000000000..7f43e72888 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/errors.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "errors" + "fmt" +) + +// ErrUnexpectedSuccess indicates that a script command that was expected to +// fail (as indicated by a "!" prefix) instead completed successfully. +var ErrUnexpectedSuccess = errors.New("unexpected success") + +// A CommandError describes an error resulting from attempting to execute a +// specific command. +type CommandError struct { + File string + Line int + Op string + Args []string + Err error +} + +func cmdError(cmd *command, err error) *CommandError { + return &CommandError{ + File: cmd.file, + Line: cmd.line, + Op: cmd.name, + Args: cmd.args, + Err: err, + } +} + +func (e *CommandError) Error() string { + if len(e.Args) == 0 { + return fmt.Sprintf("%s:%d: %s: %v", e.File, e.Line, e.Op, e.Err) + } + return fmt.Sprintf("%s:%d: %s %s: %v", e.File, e.Line, e.Op, quoteArgs(e.Args), e.Err) +} + +func (e *CommandError) Unwrap() error { return e.Err } + +// A UsageError reports the valid arguments for a command. +// +// It may be returned in response to invalid arguments. +type UsageError struct { + Name string + Command Cmd +} + +func (e *UsageError) Error() string { + usage := e.Command.Usage() + suffix := "" + if usage.Async { + suffix = " [&]" + } + return fmt.Sprintf("usage: %s %s%s", e.Name, usage.Args, suffix) +} + +// ErrUsage may be returned by a Command to indicate that it was called with +// invalid arguments; its Usage method may be called to obtain details. +var ErrUsage = errors.New("invalid usage") diff --git a/vendor/github.com/cilium/hive/script/internal/diff/diff.go b/vendor/github.com/cilium/hive/script/internal/diff/diff.go new file mode 100644 index 0000000000..0aeeb75eb0 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/internal/diff/diff.go @@ -0,0 +1,261 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "fmt" + "sort" + "strings" +) + +// A pair is a pair of values tracked for both the x and y side of a diff. +// It is typically a pair of line indexes. +type pair struct{ x, y int } + +// Diff returns an anchored diff of the two texts old and new +// in the “unified diff” format. If old and new are identical, +// Diff returns a nil slice (no output). +// +// Unix diff implementations typically look for a diff with +// the smallest number of lines inserted and removed, +// which can in the worst case take time quadratic in the +// number of lines in the texts. As a result, many implementations +// either can be made to run for a long time or cut off the search +// after a predetermined amount of work. +// +// In contrast, this implementation looks for a diff with the +// smallest number of “unique” lines inserted and removed, +// where unique means a line that appears just once in both old and new. +// We call this an “anchored diff” because the unique lines anchor +// the chosen matching regions. An anchored diff is usually clearer +// than a standard diff, because the algorithm does not try to +// reuse unrelated blank lines or closing braces. +// The algorithm also guarantees to run in O(n log n) time +// instead of the standard O(n²) time. +// +// Some systems call this approach a “patience diff,” named for +// the “patience sorting” algorithm, itself named for a solitaire card game. +// We avoid that name for two reasons. First, the name has been used +// for a few different variants of the algorithm, so it is imprecise. +// Second, the name is frequently interpreted as meaning that you have +// to wait longer (to be patient) for the diff, meaning that it is a slower algorithm, +// when in fact the algorithm is faster than the standard one. +func Diff(oldName string, old []byte, newName string, new []byte) []byte { + if bytes.Equal(old, new) { + return nil + } + x := lines(old) + y := lines(new) + + // Print diff header. + var out bytes.Buffer + fmt.Fprintf(&out, "diff %s %s\n", oldName, newName) + fmt.Fprintf(&out, "--- %s\n", oldName) + fmt.Fprintf(&out, "+++ %s\n", newName) + + // Loop over matches to consider, + // expanding each match to include surrounding lines, + // and then printing diff chunks. + // To avoid setup/teardown cases outside the loop, + // tgs returns a leading {0,0} and trailing {len(x), len(y)} pair + // in the sequence of matches. + var ( + done pair // printed up to x[:done.x] and y[:done.y] + chunk pair // start lines of current chunk + count pair // number of lines from each side in current chunk + ctext []string // lines for current chunk + ) + for _, m := range tgs(x, y) { + if m.x < done.x { + // Already handled scanning forward from earlier match. + continue + } + + // Expand matching lines as far possible, + // establishing that x[start.x:end.x] == y[start.y:end.y]. + // Note that on the first (or last) iteration we may (or definitely do) + // have an empty match: start.x==end.x and start.y==end.y. + start := m + for start.x > done.x && start.y > done.y && x[start.x-1] == y[start.y-1] { + start.x-- + start.y-- + } + end := m + for end.x < len(x) && end.y < len(y) && x[end.x] == y[end.y] { + end.x++ + end.y++ + } + + // Emit the mismatched lines before start into this chunk. + // (No effect on first sentinel iteration, when start = {0,0}.) + for _, s := range x[done.x:start.x] { + ctext = append(ctext, "-"+s) + count.x++ + } + for _, s := range y[done.y:start.y] { + ctext = append(ctext, "+"+s) + count.y++ + } + + // If we're not at EOF and have too few common lines, + // the chunk includes all the common lines and continues. + const C = 3 // number of context lines + if (end.x < len(x) || end.y < len(y)) && + (end.x-start.x < C || (len(ctext) > 0 && end.x-start.x < 2*C)) { + for _, s := range x[start.x:end.x] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = end + continue + } + + // End chunk with common lines for context. + if len(ctext) > 0 { + n := end.x - start.x + if n > C { + n = C + } + for _, s := range x[start.x : start.x+n] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = pair{start.x + n, start.y + n} + + // Format and emit chunk. + // Convert line numbers to 1-indexed. + // Special case: empty file shows up as 0,0 not 1,0. + if count.x > 0 { + chunk.x++ + } + if count.y > 0 { + chunk.y++ + } + fmt.Fprintf(&out, "@@ -%d,%d +%d,%d @@\n", chunk.x, count.x, chunk.y, count.y) + for _, s := range ctext { + out.WriteString(s) + } + count.x = 0 + count.y = 0 + ctext = ctext[:0] + } + + // If we reached EOF, we're done. + if end.x >= len(x) && end.y >= len(y) { + break + } + + // Otherwise start a new chunk. + chunk = pair{end.x - C, end.y - C} + for _, s := range x[chunk.x:end.x] { + ctext = append(ctext, " "+s) + count.x++ + count.y++ + } + done = end + } + + return out.Bytes() +} + +// lines returns the lines in the file x, including newlines. +// If the file does not end in a newline, one is supplied +// along with a warning about the missing newline. +func lines(x []byte) []string { + l := strings.SplitAfter(string(x), "\n") + if l[len(l)-1] == "" { + l = l[:len(l)-1] + } else { + // Treat last line as having a message about the missing newline attached, + // using the same text as BSD/GNU diff (including the leading backslash). + l[len(l)-1] += "\n\\ No newline at end of file\n" + } + return l +} + +// tgs returns the pairs of indexes of the longest common subsequence +// of unique lines in x and y, where a unique line is one that appears +// once in x and once in y. +// +// The longest common subsequence algorithm is as described in +// Thomas G. Szymanski, “A Special Case of the Maximal Common +// Subsequence Problem,” Princeton TR #170 (January 1975), +// available at https://research.swtch.com/tgs170.pdf. +func tgs(x, y []string) []pair { + // Count the number of times each string appears in a and b. + // We only care about 0, 1, many, counted as 0, -1, -2 + // for the x side and 0, -4, -8 for the y side. + // Using negative numbers now lets us distinguish positive line numbers later. + m := make(map[string]int) + for _, s := range x { + if c := m[s]; c > -2 { + m[s] = c - 1 + } + } + for _, s := range y { + if c := m[s]; c > -8 { + m[s] = c - 4 + } + } + + // Now unique strings can be identified by m[s] = -1+-4. + // + // Gather the indexes of those strings in x and y, building: + // xi[i] = increasing indexes of unique strings in x. + // yi[i] = increasing indexes of unique strings in y. + // inv[i] = index j such that x[xi[i]] = y[yi[j]]. + var xi, yi, inv []int + for i, s := range y { + if m[s] == -1+-4 { + m[s] = len(yi) + yi = append(yi, i) + } + } + for i, s := range x { + if j, ok := m[s]; ok && j >= 0 { + xi = append(xi, i) + inv = append(inv, j) + } + } + + // Apply Algorithm A from Szymanski's paper. + // In those terms, A = J = inv and B = [0, n). + // We add sentinel pairs {0,0}, and {len(x),len(y)} + // to the returned sequence, to help the processing loop. + J := inv + n := len(xi) + T := make([]int, n) + L := make([]int, n) + for i := range T { + T[i] = n + 1 + } + for i := 0; i < n; i++ { + k := sort.Search(n, func(k int) bool { + return T[k] >= J[i] + }) + T[k] = J[i] + L[i] = k + 1 + } + k := 0 + for _, v := range L { + if k < v { + k = v + } + } + seq := make([]pair, 2+k) + seq[1+k] = pair{len(x), len(y)} // sentinel at end + lastj := n + for i := n - 1; i >= 0; i-- { + if L[i] == k && J[i] < lastj { + seq[k] = pair{xi[i], yi[J[i]]} + k-- + } + } + seq[0] = pair{0, 0} // sentinel at start + return seq +} diff --git a/vendor/github.com/cilium/hive/script/makeraw_unix.go b/vendor/github.com/cilium/hive/script/makeraw_unix.go new file mode 100644 index 0000000000..3fffbc74dc --- /dev/null +++ b/vendor/github.com/cilium/hive/script/makeraw_unix.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd || aix || linux || solaris || zos + +package script + +import ( + "golang.org/x/sys/unix" +) + +// MakeRaw sets the terminal to raw mode, but with interrupt signals enabled. +func MakeRaw(fd int) (restore func(), err error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + oldState := *termios + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.IEXTEN + termios.Lflag |= unix.ISIG // Enable interrupt signals + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil { + return nil, err + } + + return func() { + unix.IoctlSetTermios(fd, ioctlWriteTermios, &oldState) + }, nil +} diff --git a/vendor/github.com/cilium/hive/script/makeraw_unix_bsd.go b/vendor/github.com/cilium/hive/script/makeraw_unix_bsd.go new file mode 100644 index 0000000000..064368d40a --- /dev/null +++ b/vendor/github.com/cilium/hive/script/makeraw_unix_bsd.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd + +package script + +import ( + "golang.org/x/sys/unix" +) + +const ( + ioctlReadTermios = unix.TIOCGETA + ioctlWriteTermios = unix.TIOCSETA +) diff --git a/vendor/github.com/cilium/hive/script/makeraw_unix_other.go b/vendor/github.com/cilium/hive/script/makeraw_unix_other.go new file mode 100644 index 0000000000..84449a5f56 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/makeraw_unix_other.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || linux || solaris || zos + +package script + +import "golang.org/x/sys/unix" + +const ( + ioctlReadTermios = unix.TCGETS + ioctlWriteTermios = unix.TCSETS +) diff --git a/vendor/github.com/cilium/hive/script/makeraw_unsupported.go b/vendor/github.com/cilium/hive/script/makeraw_unsupported.go new file mode 100644 index 0000000000..fe88ed87b2 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/makeraw_unsupported.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin && !linux + +package script + +import ( + "fmt" + "runtime" +) + +func MakeRaw(fd int) (restore func(), err error) { + return func() {}, fmt.Errorf("MakeRaw: not supported on %s", runtime.GOOS) +} diff --git a/vendor/github.com/cilium/hive/script/state.go b/vendor/github.com/cilium/hive/script/state.go new file mode 100644 index 0000000000..29c3287f95 --- /dev/null +++ b/vendor/github.com/cilium/hive/script/state.go @@ -0,0 +1,249 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package script + +import ( + "bytes" + "context" + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/spf13/pflag" + "golang.org/x/tools/txtar" +) + +// A State encapsulates the current state of a running script engine, +// including the script environment and any running background commands. +type State struct { + engine *Engine // the engine currently executing the script, if any + + ctx context.Context + cancel context.CancelFunc + log bytes.Buffer + logOut io.Writer + + workdir string // initial working directory + pwd string // current working directory during execution + env []string // environment list (for os/exec) + envMap map[string]string // environment mapping (matches env) + stdout string // standard output from last 'go' command; for 'stdout' command + stderr string // standard error from last 'go' command; for 'stderr' command + + Flags *pflag.FlagSet + DoUpdate bool + FileUpdates map[string]string + + background []backgroundCmd +} + +type backgroundCmd struct { + *command + wait WaitFunc +} + +// NewState returns a new State permanently associated with ctx, with its +// initial working directory in workdir and its initial environment set to +// initialEnv (or os.Environ(), if initialEnv is nil). +// +// The new State also contains pseudo-environment-variables for +// ${/} and ${:} (for the platform's path and list separators respectively), +// but does not pass those to subprocesses. +func NewState(ctx context.Context, workdir string, initialEnv []string) (*State, error) { + absWork, err := filepath.Abs(workdir) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(ctx) + + // Make a fresh copy of the env slice to avoid aliasing bugs if we ever + // start modifying it in place; this also establishes the invariant that + // s.env contains no duplicates. + env := cleanEnv(initialEnv, absWork) + + envMap := make(map[string]string, len(env)) + + // Add entries for ${:} and ${/} to make it easier to write platform-independent + // paths in scripts. + envMap["/"] = string(os.PathSeparator) + envMap[":"] = string(os.PathListSeparator) + + for _, kv := range env { + if k, v, ok := strings.Cut(kv, "="); ok { + envMap[k] = v + } + } + + s := &State{ + ctx: ctx, + cancel: cancel, + workdir: absWork, + pwd: absWork, + env: env, + envMap: envMap, + FileUpdates: make(map[string]string), + } + s.Setenv("PWD", absWork) + return s, nil +} + +// CloseAndWait cancels the State's Context and waits for any background commands to +// finish. If any remaining background command ended in an unexpected state, +// Close returns a non-nil error. +func (s *State) CloseAndWait(log io.Writer) error { + s.cancel() + wait, err := Wait().Run(s) + if wait != nil { + panic("script: internal error: Wait unexpectedly returns its own WaitFunc") + } + defer func(prev io.Writer) { s.logOut = prev }(s.logOut) + s.logOut = log + if flushErr := s.FlushLog(); err == nil { + err = flushErr + } + return err +} + +// Chdir changes the State's working directory to the given path. +func (s *State) Chdir(path string) error { + dir := s.Path(path) + if _, err := os.Stat(dir); err != nil { + return &fs.PathError{Op: "Chdir", Path: dir, Err: err} + } + s.pwd = dir + s.Setenv("PWD", dir) + return nil +} + +// Context returns the Context with which the State was created. +func (s *State) Context() context.Context { + return s.ctx +} + +// Environ returns a copy of the current script environment, +// in the form "key=value". +func (s *State) Environ() []string { + return append([]string(nil), s.env...) +} + +// ExpandEnv replaces ${var} or $var in the string according to the values of +// the environment variables in s. References to undefined variables are +// replaced by the empty string. +func (s *State) ExpandEnv(str string, inRegexp bool) string { + return os.Expand(str, func(key string) string { + e := s.envMap[key] + if inRegexp { + // Quote to literal strings: we want paths like C:\work\go1.4 to remain + // paths rather than regular expressions. + e = regexp.QuoteMeta(e) + } + return e + }) +} + +// ExtractFiles extracts the files in ar to the state's current directory, +// expanding any environment variables within each name. +// +// The files must reside within the working directory with which the State was +// originally created. +func (s *State) ExtractFiles(ar *txtar.Archive) error { + wd := s.workdir + + // Add trailing separator to terminate wd. + // This prevents extracting to outside paths which prefix wd, + // e.g. extracting to /home/foobar when wd is /home/foo + if wd == "" { + panic("s.workdir is unexpectedly empty") + } + if !os.IsPathSeparator(wd[len(wd)-1]) { + wd += string(filepath.Separator) + } + + for _, f := range ar.Files { + name := s.Path(s.ExpandEnv(f.Name, false)) + + if !strings.HasPrefix(name, wd) { + return fmt.Errorf("file %#q is outside working directory", f.Name) + } + + if err := os.MkdirAll(filepath.Dir(name), 0777); err != nil { + return err + } + if err := os.WriteFile(name, f.Data, 0666); err != nil { + return err + } + } + + return nil +} + +// Getwd returns the directory in which to run the next script command. +func (s *State) Getwd() string { return s.pwd } + +// Logf writes output to the script's log without updating its stdout or stderr +// buffers. (The output log functions as a kind of meta-stderr.) +func (s *State) Logf(format string, args ...any) { + fmt.Fprintf(&s.log, format, args...) +} + +func (s *State) LogWriter() io.Writer { + return &s.log +} + +// FlushLog writes out the contents of the script's log and clears the buffer. +func (s *State) FlushLog() error { + _, err := s.logOut.Write(s.log.Bytes()) + s.log.Reset() + return err +} + +// LookupEnv retrieves the value of the environment variable in s named by the key. +func (s *State) LookupEnv(key string) (string, bool) { + v, ok := s.envMap[key] + return v, ok +} + +// Path returns the absolute path in the host operating system for a +// script-based (generally slash-separated and relative) path. +func (s *State) Path(path string) string { + if filepath.IsAbs(path) { + return filepath.Clean(path) + } + return filepath.Join(s.pwd, path) +} + +// Setenv sets the value of the environment variable in s named by the key. +func (s *State) Setenv(key, value string) error { + s.env = cleanEnv(append(s.env, key+"="+value), s.pwd) + s.envMap[key] = value + return nil +} + +// Stdout returns the stdout output of the last command run, +// or the empty string if no command has been run. +func (s *State) Stdout() string { return s.stdout } + +// Stderr returns the stderr output of the last command run, +// or the empty string if no command has been run. +func (s *State) Stderr() string { return s.stderr } + +// cleanEnv returns a copy of env with any duplicates removed in favor of +// later values and any required system variables defined. +// +// If env is nil, cleanEnv copies the environment from os.Environ(). +func cleanEnv(env []string, pwd string) []string { + // There are some funky edge-cases in this logic, especially on Windows (with + // case-insensitive environment variables and variables with keys like "=C:"). + // Rather than duplicating exec.dedupEnv here, cheat and use exec.Cmd directly. + cmd := &exec.Cmd{Env: env} + cmd.Dir = pwd + return cmd.Environ() +} diff --git a/vendor/github.com/cilium/cilium/pkg/hive/shutdowner.go b/vendor/github.com/cilium/hive/shutdowner.go similarity index 100% rename from vendor/github.com/cilium/cilium/pkg/hive/shutdowner.go rename to vendor/github.com/cilium/hive/shutdowner.go diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/github.com/cilium/statedb/LICENSE similarity index 99% rename from vendor/google.golang.org/appengine/LICENSE rename to vendor/github.com/cilium/statedb/LICENSE index d645695673..a2e486a803 100644 --- a/vendor/google.golang.org/appengine/LICENSE +++ b/vendor/github.com/cilium/statedb/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -179,7 +178,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -187,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} Authors of Cilium Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -200,3 +199,4 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + diff --git a/vendor/github.com/cilium/statedb/index/bool.go b/vendor/github.com/cilium/statedb/index/bool.go new file mode 100644 index 0000000000..8e37371542 --- /dev/null +++ b/vendor/github.com/cilium/statedb/index/bool.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package index + +import "strconv" + +var ( + trueKey = []byte{'T'} + falseKey = []byte{'F'} +) + +func Bool(b bool) Key { + if b { + return trueKey + } + return falseKey +} + +func BoolString(s string) (Key, error) { + b, err := strconv.ParseBool(s) + return Bool(b), err +} diff --git a/vendor/github.com/cilium/statedb/index/int.go b/vendor/github.com/cilium/statedb/index/int.go new file mode 100644 index 0000000000..caf26d8a88 --- /dev/null +++ b/vendor/github.com/cilium/statedb/index/int.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package index + +import ( + "encoding/binary" + "strconv" +) + +// The indexing functions on integers should use big-endian encoding. +// This allows prefix searching on integers as the most significant +// byte is first. +// For example to find 16-bit key larger than 260 (0x0104) from 3 (0x0003) +// and 270 (0x0109) +// 00 (3) < 01 (260) => skip, +// 01 (270) >= 01 (260) => 09 > 04 => found! + +func Int(n int) Key { + return Int32(int32(n)) +} + +func IntString(s string) (Key, error) { + return Int32String(s) +} + +func Int64(n int64) Key { + return Uint64(uint64(n)) +} + +func Int64String(s string) (Key, error) { + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return Key{}, err + } + return Uint64(uint64(n)), nil +} + +func Int32(n int32) Key { + return Uint32(uint32(n)) +} + +func Int32String(s string) (Key, error) { + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return Key{}, err + } + return Uint32(uint32(n)), nil +} + +func Int16(n int16) Key { + return Uint16(uint16(n)) +} + +func Int16String(s string) (Key, error) { + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + return Key{}, err + } + return Uint16(uint16(n)), nil +} + +func Uint64(n uint64) Key { + return binary.BigEndian.AppendUint64(nil, n) +} + +func Uint64String(s string) (Key, error) { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return Key{}, err + } + return Uint64(n), nil +} + +func Uint32(n uint32) Key { + return binary.BigEndian.AppendUint32(nil, n) +} + +func Uint32String(s string) (Key, error) { + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return Key{}, err + } + return Uint32(uint32(n)), nil +} + +func Uint16(n uint16) Key { + return binary.BigEndian.AppendUint16(nil, n) +} + +func Uint16String(s string) (Key, error) { + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + return Key{}, err + } + return Uint16(uint16(n)), nil +} diff --git a/vendor/github.com/cilium/statedb/index/keyset.go b/vendor/github.com/cilium/statedb/index/keyset.go new file mode 100644 index 0000000000..b9dd90f62b --- /dev/null +++ b/vendor/github.com/cilium/statedb/index/keyset.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package index + +import ( + "bytes" +) + +// Key is a byte slice describing a key used in an index by statedb. +type Key []byte + +func (k Key) Equal(k2 Key) bool { + return bytes.Equal(k, k2) +} + +type KeySet struct { + head Key + tail []Key +} + +func (ks KeySet) First() Key { + return ks.head +} + +func (ks KeySet) Foreach(fn func(Key)) { + if ks.head == nil { + return + } + fn(ks.head) + for _, k := range ks.tail { + fn(k) + } +} + +func (ks KeySet) Exists(k Key) bool { + if ks.head.Equal(k) { + return true + } + for _, k2 := range ks.tail { + if k2.Equal(k) { + return true + } + } + return false +} + +func NewKeySet(keys ...Key) KeySet { + if len(keys) == 0 { + return KeySet{} + } + return KeySet{keys[0], keys[1:]} +} diff --git a/vendor/github.com/cilium/statedb/index/map.go b/vendor/github.com/cilium/statedb/index/map.go new file mode 100644 index 0000000000..8ea6548aeb --- /dev/null +++ b/vendor/github.com/cilium/statedb/index/map.go @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package index + +func StringMap[V any](m map[string]V) KeySet { + keys := make([]Key, 0, len(m)) + for k := range m { + keys = append(keys, String(k)) + } + return NewKeySet(keys...) +} diff --git a/vendor/github.com/cilium/statedb/index/netip.go b/vendor/github.com/cilium/statedb/index/netip.go new file mode 100644 index 0000000000..7a95f63469 --- /dev/null +++ b/vendor/github.com/cilium/statedb/index/netip.go @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package index + +import ( + "bytes" + "net" + "net/netip" +) + +func NetIP(ip net.IP) Key { + // Use the 16-byte form to have a constant-size key. + return bytes.Clone(ip.To16()) +} + +func NetIPAddr(addr netip.Addr) Key { + // Use the 16-byte form to have a constant-size key. + buf := addr.As16() + return buf[:] +} + +func NetIPAddrString(s string) (Key, error) { + addr, err := netip.ParseAddr(s) + if err != nil { + return Key{}, err + } + return NetIPAddr(addr), nil +} + +func NetIPPrefix(prefix netip.Prefix) Key { + // Use the 16-byte form plus bits to have a constant-size key. + addrBytes := prefix.Addr().As16() + return append(addrBytes[:], uint8(prefix.Bits())) +} + +func NetIPPrefixString(s string) (Key, error) { + prefix, err := netip.ParsePrefix(s) + if err != nil { + return Key{}, err + } + return NetIPPrefix(prefix), nil +} diff --git a/vendor/github.com/cilium/statedb/index/seq.go b/vendor/github.com/cilium/statedb/index/seq.go new file mode 100644 index 0000000000..d7063ba2f5 --- /dev/null +++ b/vendor/github.com/cilium/statedb/index/seq.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package index + +import "iter" + +// Seq creates a KeySet from an iter.Seq[T] with the given indexing function. +// Example usage: +// +// var strings iter.Seq[string] +// keys := Seq[string](index.String, strings) +func Seq[T any]( + toKey func(T) Key, + seq iter.Seq[T], +) KeySet { + keys := []Key{} + for v := range seq { + keys = append(keys, toKey(v)) + } + return NewKeySet(keys...) +} + +// Seq2 creates a KeySet from an iter.Seq2[A,B] with the given indexing function. +// Example usage: +// +// var seq iter.Seq2[string, int] +// keys := Seq2(index.String, seq) +func Seq2[A, B any]( + toKey func(A) Key, + seq iter.Seq2[A, B], +) KeySet { + keys := []Key{} + for a := range seq { + keys = append(keys, toKey(a)) + } + return NewKeySet(keys...) +} diff --git a/vendor/github.com/cilium/statedb/index/set.go b/vendor/github.com/cilium/statedb/index/set.go new file mode 100644 index 0000000000..ae90bab82c --- /dev/null +++ b/vendor/github.com/cilium/statedb/index/set.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package index + +import "github.com/cilium/statedb/part" + +// Set creates a KeySet from a part.Set. +func Set[T any](s part.Set[T]) KeySet { + toBytes := s.ToBytesFunc() + switch s.Len() { + case 0: + return NewKeySet() + case 1: + for v := range s.All() { + return NewKeySet(toBytes(v)) + } + panic("BUG: Set.Len() == 1, but ranging returned nothing") + default: + keys := make([]Key, 0, s.Len()) + for v := range s.All() { + keys = append(keys, toBytes(v)) + } + return NewKeySet(keys...) + } +} diff --git a/vendor/github.com/cilium/statedb/index/string.go b/vendor/github.com/cilium/statedb/index/string.go new file mode 100644 index 0000000000..99430bc3bd --- /dev/null +++ b/vendor/github.com/cilium/statedb/index/string.go @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package index + +import ( + "fmt" + "iter" +) + +func String(s string) Key { + return []byte(s) +} + +func FromString(s string) (Key, error) { + return String(s), nil +} + +func Stringer[T fmt.Stringer](s T) Key { + return String(s.String()) +} + +func StringSlice(ss []string) KeySet { + keys := make([]Key, 0, len(ss)) + for _, s := range ss { + keys = append(keys, String(s)) + } + return NewKeySet(keys...) +} + +func StringerSlice[T fmt.Stringer](ss []T) KeySet { + keys := make([]Key, 0, len(ss)) + for _, s := range ss { + keys = append(keys, Stringer(s)) + } + return NewKeySet(keys...) +} + +func StringerSeq[T fmt.Stringer](seq iter.Seq[T]) KeySet { + return Seq[T](Stringer, seq) +} + +func StringerSeq2[A fmt.Stringer, B any](seq iter.Seq2[A, B]) KeySet { + return Seq2[A, B](Stringer, seq) +} diff --git a/vendor/github.com/cilium/statedb/part/cache.go b/vendor/github.com/cilium/statedb/part/cache.go new file mode 100644 index 0000000000..16425f8953 --- /dev/null +++ b/vendor/github.com/cilium/statedb/part/cache.go @@ -0,0 +1,37 @@ +package part + +import "unsafe" + +const nodeMutatedSize = 32 // must be power-of-two + +type nodeMutated[T any] struct { + ptrs [nodeMutatedSize]*header[T] + used bool +} + +func (p *nodeMutated[T]) put(ptr *header[T]) { + ptrInt := uintptr(unsafe.Pointer(ptr)) + p.ptrs[slot(ptrInt)] = ptr + p.used = true +} + +func (p *nodeMutated[T]) exists(ptr *header[T]) bool { + ptrInt := uintptr(unsafe.Pointer(ptr)) + return p.ptrs[slot(ptrInt)] == ptr +} + +func slot(p uintptr) int { + var slot uint8 + // use some relevant bits from the pointer + slot = slot + uint8(p>>4) + slot = slot + uint8(p>>12) + slot = slot + uint8(p>>20) + return int(slot & (nodeMutatedSize - 1)) +} + +func (p *nodeMutated[T]) clear() { + if p.used { + clear(p.ptrs[:]) + } + p.used = false +} diff --git a/vendor/github.com/cilium/statedb/part/iterator.go b/vendor/github.com/cilium/statedb/part/iterator.go new file mode 100644 index 0000000000..3292844711 --- /dev/null +++ b/vendor/github.com/cilium/statedb/part/iterator.go @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package part + +import ( + "bytes" + "slices" + "sort" +) + +// Iterator for key and value pairs where value is of type T +type Iterator[T any] struct { + next [][]*header[T] // sets of edges to explore +} + +// Clone returns a copy of the iterator, allowing restarting +// the iterator from scratch. +func (it *Iterator[T]) Clone() *Iterator[T] { + // Since the iterator does not mutate the edge array elements themselves ([]*header[T]) + // it is enough to do a shallow clone here. + return &Iterator[T]{slices.Clone(it.next)} +} + +// Next returns the next key, value and true if the value exists, +// otherwise it returns false. +func (it *Iterator[T]) Next() (key []byte, value T, ok bool) { + for len(it.next) > 0 { + // Pop the next set of edges to explore + edges := it.next[len(it.next)-1] + for len(edges) > 0 && edges[0] == nil { + // Node256 may have nil children, so jump over them. + edges = edges[1:] + } + it.next = it.next[:len(it.next)-1] + + if len(edges) == 0 { + continue + } else if len(edges) > 1 { + // More edges remain to be explored, add them back. + it.next = append(it.next, edges[1:]) + } + + // Follow the smallest edge and add its children to the queue. + node := edges[0] + + if node.size() > 0 { + it.next = append(it.next, node.children()) + } + if leaf := node.getLeaf(); leaf != nil { + key = leaf.key + value = leaf.value + ok = true + return + } + } + return +} + +func newIterator[T any](start *header[T]) *Iterator[T] { + if start == nil { + return &Iterator[T]{nil} + } + return &Iterator[T]{[][]*header[T]{{start}}} +} + +func prefixSearch[T any](root *header[T], key []byte) (*Iterator[T], <-chan struct{}) { + this := root + var watch <-chan struct{} + for { + if !this.isLeaf() && this.watch != nil { + // Leaf watch channels only close when the leaf is manipulated, + // thus we only return non-leaf watch channels. + watch = this.watch + } + + switch { + case bytes.Equal(key, this.prefix[:min(len(key), len(this.prefix))]): + return newIterator(this), watch + + case bytes.HasPrefix(key, this.prefix): + key = key[len(this.prefix):] + if len(key) == 0 { + return newIterator(this), this.watch + } + + default: + return newIterator[T](nil), root.watch + } + + this = this.find(key[0]) + if this == nil { + return newIterator[T](nil), root.watch + } + } +} + +func traverseToMin[T any](n *header[T], edges [][]*header[T]) [][]*header[T] { + if leaf := n.getLeaf(); leaf != nil { + return append(edges, []*header[T]{n}) + } + children := n.children() + + // Find the first non-nil child + for len(children) > 0 && children[0] == nil { + children = children[1:] + } + + if len(children) > 0 { + // Add the larger children. + if len(children) > 1 { + edges = append(edges, children[1:]) + } + // Recurse into the smallest child + return traverseToMin(children[0], edges) + } + return edges +} + +func lowerbound[T any](start *header[T], key []byte) *Iterator[T] { + // The starting edges to explore. This contains all larger nodes encountered + // on the path to the node larger or equal to the key. + edges := [][]*header[T]{} + this := start +loop: + for { + switch bytes.Compare(this.prefix, key[:min(len(key), len(this.prefix))]) { + case -1: + // Prefix is smaller, stop here and return an iterator for + // the larger nodes in the parent's. + break loop + + case 0: + if len(this.prefix) == len(key) { + // Exact match. + edges = append(edges, []*header[T]{this}) + break loop + } + + // Prefix matches the beginning of the key, but more + // remains of the key. Drop the matching part and keep + // going further. + key = key[len(this.prefix):] + + if this.kind() == nodeKind256 { + children := this.node256().children[:] + idx := int(key[0]) + this = children[idx] + + // Add all larger children and recurse further. + children = children[idx+1:] + for len(children) > 0 && children[0] == nil { + children = children[1:] + } + edges = append(edges, children) + + if this == nil { + break loop + } + } else { + children := this.children() + + // Find the smallest child that is equal or larger than the lower bound + idx := sort.Search(len(children), func(i int) bool { + return children[i].prefix[0] >= key[0] + }) + if idx >= this.size() { + break loop + } + // Add all larger children and recurse further. + if len(children) > idx+1 { + edges = append(edges, children[idx+1:]) + } + this = children[idx] + } + + case 1: + // Prefix bigger than lowerbound, go to smallest node and stop. + edges = traverseToMin(this, edges) + break loop + } + } + + if len(edges) > 0 { + return &Iterator[T]{edges} + } + return &Iterator[T]{nil} +} diff --git a/vendor/github.com/cilium/statedb/part/map.go b/vendor/github.com/cilium/statedb/part/map.go new file mode 100644 index 0000000000..f3d045598b --- /dev/null +++ b/vendor/github.com/cilium/statedb/part/map.go @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package part + +import ( + "bytes" + "encoding/json" + "fmt" + "iter" + "reflect" + + "gopkg.in/yaml.v3" +) + +// Map of key-value pairs. The zero value is ready for use, provided +// that the key type has been registered with RegisterKeyType. +// +// Map is a typed wrapper around Tree[T] for working with +// keys that are not []byte. +type Map[K, V any] struct { + bytesFromKey func(K) []byte + tree *Tree[mapKVPair[K, V]] +} + +type mapKVPair[K, V any] struct { + Key K `json:"k" yaml:"k"` + Value V `json:"v" yaml:"v"` +} + +// FromMap copies values from the hash map into the given Map. +// This is not implemented as a method on Map[K,V] as hash maps require the +// comparable constraint and we do not need to limit Map[K, V] to that. +func FromMap[K comparable, V any](m Map[K, V], hm map[K]V) Map[K, V] { + m.ensureTree() + txn := m.tree.Txn() + for k, v := range hm { + txn.Insert(m.bytesFromKey(k), mapKVPair[K, V]{k, v}) + } + m.tree = txn.CommitOnly() + return m +} + +// ensureTree checks that the tree is not nil and allocates it if +// it is. The whole nil tree thing is to make sure that creating +// an empty map does not allocate anything. +func (m *Map[K, V]) ensureTree() { + if m.tree == nil { + m.tree = New[mapKVPair[K, V]](RootOnlyWatch) + } + m.bytesFromKey = lookupKeyType[K]() +} + +// Get a value from the map by its key. +func (m Map[K, V]) Get(key K) (value V, found bool) { + if m.tree == nil { + return + } + kv, _, found := m.tree.Get(m.bytesFromKey(key)) + return kv.Value, found +} + +// Set a value. Returns a new map with the value set. +// Original map is unchanged. +func (m Map[K, V]) Set(key K, value V) Map[K, V] { + m.ensureTree() + txn := m.tree.Txn() + txn.Insert(m.bytesFromKey(key), mapKVPair[K, V]{key, value}) + m.tree = txn.CommitOnly() + return m +} + +// Delete a value from the map. Returns a new map +// without the element pointed to by the key (if found). +func (m Map[K, V]) Delete(key K) Map[K, V] { + if m.tree != nil { + txn := m.tree.Txn() + txn.Delete(m.bytesFromKey(key)) + // Map is a struct passed by value, so we can modify + // it without changing the caller's view of it. + m.tree = txn.CommitOnly() + } + return m +} + +func toSeq2[K, V any](iter *Iterator[mapKVPair[K, V]]) iter.Seq2[K, V] { + return func(yield func(K, V) bool) { + if iter == nil { + return + } + iter = iter.Clone() + for _, kv, ok := iter.Next(); ok; _, kv, ok = iter.Next() { + if !yield(kv.Key, kv.Value) { + break + } + } + } +} + +// LowerBound iterates over all keys in order with value equal +// to or greater than [from]. +func (m Map[K, V]) LowerBound(from K) iter.Seq2[K, V] { + if m.tree == nil { + return toSeq2[K, V](nil) + } + return toSeq2(m.tree.LowerBound(m.bytesFromKey(from))) +} + +// Prefix iterates in order over all keys that start with +// the given prefix. +func (m Map[K, V]) Prefix(prefix K) iter.Seq2[K, V] { + if m.tree == nil { + return toSeq2[K, V](nil) + } + iter, _ := m.tree.Prefix(m.bytesFromKey(prefix)) + return toSeq2(iter) +} + +// All iterates every key-value in the map in order. +// The order is in bytewise order of the byte slice +// returned by bytesFromKey. +func (m Map[K, V]) All() iter.Seq2[K, V] { + if m.tree == nil { + return toSeq2[K, V](nil) + } + return toSeq2(m.tree.Iterator()) +} + +// EqualKeys returns true if both maps contain the same keys. +func (m Map[K, V]) EqualKeys(other Map[K, V]) bool { + switch { + case m.tree == nil && other.tree == nil: + return true + case m.Len() != other.Len(): + return false + default: + iter1 := m.tree.Iterator() + iter2 := other.tree.Iterator() + for { + k1, _, ok := iter1.Next() + if !ok { + break + } + k2, _, _ := iter2.Next() + // Equal lengths, no need to check 'ok' for 'iter2'. + if !bytes.Equal(k1, k2) { + return false + } + } + return true + } +} + +// SlowEqual returns true if the two maps contain the same keys and values. +// Value comparison is implemented with reflect.DeepEqual which makes this +// slow and mostly useful for testing. +func (m Map[K, V]) SlowEqual(other Map[K, V]) bool { + switch { + case m.tree == nil && other.tree == nil: + return true + case m.Len() != other.Len(): + return false + default: + iter1 := m.tree.Iterator() + iter2 := other.tree.Iterator() + for { + k1, v1, ok := iter1.Next() + if !ok { + break + } + k2, v2, _ := iter2.Next() + // Equal lengths, no need to check 'ok' for 'iter2'. + if !bytes.Equal(k1, k2) || !reflect.DeepEqual(v1, v2) { + return false + } + } + return true + } +} + +// Len returns the number of elements in the map. +func (m Map[K, V]) Len() int { + if m.tree == nil { + return 0 + } + return m.tree.size +} + +func (m Map[K, V]) MarshalJSON() ([]byte, error) { + if m.tree == nil { + return []byte("[]"), nil + } + + var b bytes.Buffer + b.WriteRune('[') + iter := m.tree.Iterator() + _, kv, ok := iter.Next() + for ok { + bs, err := json.Marshal(kv) + if err != nil { + return nil, err + } + b.Write(bs) + _, kv, ok = iter.Next() + if ok { + b.WriteRune(',') + } + } + b.WriteRune(']') + return b.Bytes(), nil +} + +func (m *Map[K, V]) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + if d, ok := t.(json.Delim); !ok || d != '[' { + return fmt.Errorf("%T.UnmarshalJSON: expected '[' got %v", m, t) + } + m.ensureTree() + txn := m.tree.Txn() + for dec.More() { + var kv mapKVPair[K, V] + err := dec.Decode(&kv) + if err != nil { + return err + } + txn.Insert(m.bytesFromKey(kv.Key), mapKVPair[K, V]{kv.Key, kv.Value}) + } + + t, err = dec.Token() + if err != nil { + return err + } + if d, ok := t.(json.Delim); !ok || d != ']' { + return fmt.Errorf("%T.UnmarshalJSON: expected ']' got %v", m, t) + } + m.tree = txn.CommitOnly() + return nil +} + +func (m Map[K, V]) MarshalYAML() (any, error) { + kvs := make([]mapKVPair[K, V], 0, m.Len()) + if m.tree != nil { + iter := m.tree.Iterator() + for _, kv, ok := iter.Next(); ok; _, kv, ok = iter.Next() { + kvs = append(kvs, kv) + } + } + return kvs, nil +} + +func (m *Map[K, V]) UnmarshalYAML(value *yaml.Node) error { + if value.Kind != yaml.SequenceNode { + return fmt.Errorf("%T.UnmarshalYAML: expected sequence", m) + } + m.ensureTree() + txn := m.tree.Txn() + for _, e := range value.Content { + var kv mapKVPair[K, V] + if err := e.Decode(&kv); err != nil { + return err + } + txn.Insert(m.bytesFromKey(kv.Key), mapKVPair[K, V]{kv.Key, kv.Value}) + } + m.tree = txn.CommitOnly() + return nil +} diff --git a/vendor/github.com/cilium/statedb/part/node.go b/vendor/github.com/cilium/statedb/part/node.go new file mode 100644 index 0000000000..1f89c9af55 --- /dev/null +++ b/vendor/github.com/cilium/statedb/part/node.go @@ -0,0 +1,510 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package part + +import ( + "bytes" + "fmt" + "sort" + "strings" + "unsafe" +) + +type nodeKind uint8 + +const ( + nodeKindUnknown = iota + nodeKindLeaf + nodeKind4 + nodeKind16 + nodeKind48 + nodeKind256 +) + +// header is the common header shared by all node kinds. +type header[T any] struct { + flags uint16 // kind(4b) | unused(3b) | size(9b) + prefix []byte // the compressed prefix, [0] is the key + watch chan struct{} // watch channel that is closed when this node mutates +} + +const kindMask = uint16(0b1111_000_00000000_0) + +func (n *header[T]) kind() nodeKind { + return nodeKind(n.flags >> 12) +} + +func (n *header[T]) setKind(k nodeKind) { + n.flags = (n.flags & ^kindMask) | (uint16(k&0b1111) << 12) +} + +const sizeMask = uint16(0b0000_000_1111_1111_1) + +func (n *header[T]) cap() int { + switch n.kind() { + case nodeKindLeaf: + return 0 + case nodeKind4: + return 4 + case nodeKind16: + return 16 + case nodeKind48: + return 48 + case nodeKind256: + return 256 + default: + panic(fmt.Sprintf("unknown node kind: %x", n.kind())) + } +} + +func (n *header[T]) isLeaf() bool { + return n.kind() == nodeKindLeaf +} + +func (n *header[T]) getLeaf() *leaf[T] { + switch n.kind() { + case nodeKindLeaf: + return (*leaf[T])(unsafe.Pointer(n)) + case nodeKind4: + return n.node4().leaf + case nodeKind16: + return n.node16().leaf + case nodeKind48: + return n.node48().leaf + case nodeKind256: + return n.node256().leaf + default: + panic(fmt.Sprintf("unknown node kind: %x", n.kind())) + } +} + +func (n *header[T]) setLeaf(l *leaf[T]) { + switch n.kind() { + case nodeKindLeaf: + panic("cannot setLeaf on a leaf[T]") + case nodeKind4: + n.node4().leaf = l + case nodeKind16: + n.node16().leaf = l + case nodeKind48: + n.node48().leaf = l + case nodeKind256: + n.node256().leaf = l + default: + panic(fmt.Sprintf("unknown node kind: %x", n.kind())) + } +} + +func (n *header[T]) size() int { + return int(n.flags & sizeMask) +} + +func (n *header[T]) setSize(size int) { + n.flags = (n.flags & ^sizeMask) | uint16(size)&sizeMask +} + +func (n *header[T]) self() *header[T] { + return n +} + +func (n *header[T]) node4() *node4[T] { + return (*node4[T])(unsafe.Pointer(n)) +} + +func (n *header[T]) node16() *node16[T] { + return (*node16[T])(unsafe.Pointer(n)) +} + +func (n *header[T]) node48() *node48[T] { + return (*node48[T])(unsafe.Pointer(n)) +} + +func (n *header[T]) node256() *node256[T] { + return (*node256[T])(unsafe.Pointer(n)) +} + +// clone returns a shallow clone of the node. +// We are working on the assumption here that only +// value-types are mutated in the returned clone. +func (n *header[T]) clone(watch bool) *header[T] { + var nCopy *header[T] + switch n.kind() { + case nodeKindLeaf: + l := *n.getLeaf() + nCopy = (&l).self() + case nodeKind4: + n4 := *n.node4() + nCopy = (&n4).self() + case nodeKind16: + n16 := *n.node16() + nCopy = (&n16).self() + case nodeKind48: + n48 := *n.node48() + nCopy = (&n48).self() + case nodeKind256: + nCopy256 := *n.node256() + nCopy = (&nCopy256).self() + default: + panic(fmt.Sprintf("unknown node kind: %x", n.kind())) + } + if watch { + nCopy.watch = make(chan struct{}) + } else { + nCopy.watch = nil + } + return nCopy +} + +func (n *header[T]) promote(watch bool) *header[T] { + switch n.kind() { + case nodeKindLeaf: + node4 := &node4[T]{} + node4.prefix = n.prefix + node4.leaf = n.getLeaf() + node4.setKind(nodeKind4) + if watch { + node4.watch = make(chan struct{}) + } + return node4.self() + case nodeKind4: + node4 := n.node4() + node16 := &node16[T]{header: *n} + node16.setKind(nodeKind16) + node16.leaf = n.getLeaf() + size := node4.size() + copy(node16.children[:], node4.children[:size]) + copy(node16.keys[:], node4.keys[:size]) + if watch { + node16.watch = make(chan struct{}) + } + return node16.self() + case nodeKind16: + node16 := n.node16() + node48 := &node48[T]{header: *n} + node48.setKind(nodeKind48) + node48.leaf = n.getLeaf() + copy(node48.children[:], node16.children[:node16.size()]) + for i, k := range node16.keys[:node16.size()] { + node48.index[k] = int8(i) + } + if watch { + node48.watch = make(chan struct{}) + } + return node48.self() + case nodeKind48: + node48 := n.node48() + node256 := &node256[T]{header: *n} + node256.setKind(nodeKind256) + node256.leaf = n.getLeaf() + + // Since Node256 has children indexed directly, iterate over the children + // to assign them to the right index. + for _, child := range node48.children[:node48.size()] { + node256.children[child.prefix[0]] = child + } + if watch { + node256.watch = make(chan struct{}) + } + return node256.self() + case nodeKind256: + panic("BUG: should not need to promote node256") + default: + panic(fmt.Sprintf("unknown node kind: %x", n.kind())) + } +} + +func (n *header[T]) printTree(level int) { + fmt.Print(strings.Repeat(" ", level)) + + var children []*header[T] + switch n.kind() { + case nodeKindLeaf: + fmt.Printf("leaf[%x]:", n.prefix) + case nodeKind4: + fmt.Printf("node4[%x]:", n.prefix) + children = n.node4().children[:n.size()] + case nodeKind16: + fmt.Printf("node16[%x]:", n.prefix) + children = n.node16().children[:n.size()] + case nodeKind48: + fmt.Printf("node48[%x]:", n.prefix) + children = n.node48().children[:n.size()] + case nodeKind256: + fmt.Printf("node256[%x]:", n.prefix) + children = n.node256().children[:] + default: + panic("unknown node kind") + } + if leaf := n.getLeaf(); leaf != nil { + fmt.Printf(" %x -> %v (L:%p W:%p)", leaf.key, leaf.value, leaf, leaf.watch) + } + fmt.Printf(" (N:%p, W:%p)\n", n, n.watch) + + for _, child := range children { + if child != nil { + child.printTree(level + 1) + } + } +} + +func (n *header[T]) children() []*header[T] { + switch n.kind() { + case nodeKindLeaf: + return nil + case nodeKind4: + return n.node4().children[0:n.size():4] + case nodeKind16: + return n.node16().children[0:n.size():16] + case nodeKind48: + return n.node48().children[0:n.size():48] + case nodeKind256: + return n.node256().children[:] + default: + panic(fmt.Sprintf("unknown node kind: %x", n.kind())) + } +} + +func (n *header[T]) findIndex(key byte) (*header[T], int) { + switch n.kind() { + case nodeKindLeaf: + return nil, 0 + case nodeKind4: + n4 := n.node4() + size := n4.size() + for i := 0; i < int(size); i++ { + if n4.keys[i] == key { + return n4.children[i], i + } else if n4.keys[i] > key { + return nil, i + } + } + return nil, size + case nodeKind16: + n16 := n.node16() + size := n16.size() + for i := 0; i < int(size); i++ { + if n16.keys[i] == key { + return n16.children[i], i + } else if n16.keys[i] > key { + return nil, i + } + } + return nil, size + case nodeKind48: + children := n.children() + idx := sort.Search(len(children), func(i int) bool { + return children[i].prefix[0] >= key + }) + if idx >= n.size() || children[idx].prefix[0] != key { + // No node found, return nil and the index into + // which it should go. + return nil, idx + } + return children[idx], idx + case nodeKind256: + return n.node256().children[key], int(key) + default: + panic(fmt.Sprintf("unknown node kind: %x", n.kind())) + } +} + +func (n *header[T]) find(key byte) *header[T] { + switch n.kind() { + case nodeKindLeaf: + return nil + case nodeKind4: + n4 := n.node4() + size := n4.size() + for i := 0; i < int(size); i++ { + if n4.keys[i] == key { + return n4.children[i] + } else if n4.keys[i] > key { + return nil + } + } + return nil + case nodeKind16: + n16 := n.node16() + size := n16.size() + for i := 0; i < int(size); i++ { + if n16.keys[i] == key { + return n16.children[i] + } else if n16.keys[i] > key { + return nil + } + } + return nil + case nodeKind48: + n48 := n.node48() + idx := n48.index[key] + if idx < 0 { + return nil + } + return n48.children[idx] + case nodeKind256: + return n.node256().children[key] + default: + panic(fmt.Sprintf("unknown node kind: %x", n.kind())) + } +} + +func (n *header[T]) insert(idx int, child *header[T]) { + size := n.size() + newSize := size + 1 + switch n.kind() { + case nodeKind4: + n4 := n.node4() + // Shift to make room + copy(n4.children[idx+1:newSize], n4.children[idx:newSize]) + copy(n4.keys[idx+1:newSize], n4.keys[idx:newSize]) + n4.children[idx] = child + n4.keys[idx] = child.prefix[0] + case nodeKind16: + n16 := n.node16() + // Shift to make room + copy(n16.children[idx+1:newSize], n16.children[idx:newSize]) + copy(n16.keys[idx+1:newSize], n16.keys[idx:newSize]) + n16.children[idx] = child + n16.keys[idx] = child.prefix[0] + case nodeKind48: + // Shift to make room + n48 := n.node48() + for i := size - 1; i >= idx; i-- { + c := n48.children[i] + n48.index[c.prefix[0]] = int8(i + 1) + n48.children[i+1] = c + } + n48.children[idx] = child + n48.index[child.prefix[0]] = int8(idx) + case nodeKind256: + n.node256().children[child.prefix[0]] = child + default: + panic(fmt.Sprintf("unknown node kind: %x", n.kind())) + } + n.setSize(size + 1) +} + +func (n *header[T]) remove(idx int) { + newSize := n.size() - 1 + switch n.kind() { + case nodeKind4: + size := n.size() + n4 := n.node4() + copy(n4.keys[idx:size], n4.keys[idx+1:size]) + copy(n4.children[idx:size], n4.children[idx+1:size]) + n4.children[newSize] = nil + n4.keys[newSize] = 255 + case nodeKind16: + size := n.size() + n16 := n.node16() + copy(n16.keys[idx:size], n16.keys[idx+1:size]) + copy(n16.children[idx:size], n16.children[idx+1:size]) + n16.children[newSize] = nil + n16.keys[newSize] = 255 + case nodeKind48: + children := n.children() + key := children[idx].prefix[0] + n48 := n.node48() + for i := idx; i < newSize; i++ { + child := children[i+1] + children[i] = child + n48.index[child.prefix[0]] = int8(i) + } + n48.index[key] = -1 + children[newSize] = nil + case nodeKind256: + n.node256().children[idx] = nil + default: + panic(fmt.Sprintf("unknown node kind: %x", n.kind())) + } + n.setSize(newSize) +} + +type leaf[T any] struct { + header[T] + key []byte + value T +} + +func newLeaf[T any](o *options, prefix, key []byte, value T) *leaf[T] { + leaf := &leaf[T]{key: key, value: value} + leaf.prefix = prefix + leaf.setKind(nodeKindLeaf) + + if !o.rootOnlyWatch { + leaf.watch = make(chan struct{}) + } + + return leaf +} + +type node4[T any] struct { + header[T] + keys [4]byte + children [4]*header[T] + leaf *leaf[T] // non-nil if this node contains a value +} + +type node16[T any] struct { + header[T] + keys [16]byte + children [16]*header[T] + leaf *leaf[T] // non-nil if this node contains a value +} + +type node48[T any] struct { + header[T] + index [256]int8 + children [48]*header[T] + leaf *leaf[T] // non-nil if this node contains a value +} + +type node256[T any] struct { + header[T] + children [256]*header[T] + leaf *leaf[T] // non-nil if this node contains a value +} + +func newNode4[T any]() *header[T] { + n := &node4[T]{header: header[T]{watch: make(chan struct{})}} + n.setKind(nodeKind4) + return n.self() +} + +func search[T any](root *header[T], key []byte) (value T, watch <-chan struct{}, ok bool) { + this := root + for { + watch = this.watch + + // Consume the prefix + if !bytes.HasPrefix(key, this.prefix) { + return + } + key = key[len(this.prefix):] + + if len(key) == 0 { + if leaf := this.getLeaf(); leaf != nil { + value = leaf.value + watch = leaf.watch + ok = true + } + return + } + + this = this.find(key[0]) + if this == nil { + return + } + } +} + +func commonPrefix(a, b []byte) []byte { + n := min(len(a), len(b)) + for i := 0; i < n; i++ { + if a[i] != b[i] { + return a[:i] + } + } + return a[:n] +} diff --git a/vendor/github.com/cilium/statedb/part/ops.go b/vendor/github.com/cilium/statedb/part/ops.go new file mode 100644 index 0000000000..3787bfca36 --- /dev/null +++ b/vendor/github.com/cilium/statedb/part/ops.go @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package part + +// Ops is the common operations that can be performed with a Tree +// or Txn. +type Ops[T any] interface { + // Len returns the number of objects in the tree. + Len() int + + // Get fetches the value associated with the given key. + // Returns the value, a watch channel (which is closed on + // modification to the key) and boolean which is true if + // value was found. + Get(key []byte) (T, <-chan struct{}, bool) + + // Prefix returns an iterator for all objects that starts with the + // given prefix, and a channel that closes when any objects matching + // the given prefix are upserted or deleted. + Prefix(key []byte) (*Iterator[T], <-chan struct{}) + + // LowerBound returns an iterator for all objects that have a + // key equal or higher than the given 'key'. + LowerBound(key []byte) *Iterator[T] + + // RootWatch returns a watch channel for the root of the tree. + // Since this is the channel associated with the root, this closes + // when there are any changes to the tree. + RootWatch() <-chan struct{} + + // Iterator returns an iterator for all objects. + Iterator() *Iterator[T] + + // PrintTree to the standard output. For debugging. + PrintTree() +} + +var ( + _ Ops[int] = &Tree[int]{} + _ Ops[int] = &Txn[int]{} +) diff --git a/vendor/github.com/cilium/statedb/part/registry.go b/vendor/github.com/cilium/statedb/part/registry.go new file mode 100644 index 0000000000..d54245bc54 --- /dev/null +++ b/vendor/github.com/cilium/statedb/part/registry.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package part + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "sync" + "unicode/utf8" +) + +// keyTypeRegistry is a registry of functions to convert to/from keys (of type K). +// This mechanism enables use of zero value and JSON marshalling and unmarshalling +// with Map and Set. +var keyTypeRegistry sync.Map // map[reflect.Type]func(K) []byte + +// RegisterKeyType registers a new key type to be used with the Map and Set types. +// Intended to be called from init() functions. +// For Set-only usage only the [bytesFromKey] function is needed. +func RegisterKeyType[K any](bytesFromKey func(K) []byte) { + keyType := reflect.TypeFor[K]() + keyTypeRegistry.Store( + keyType, + bytesFromKey, + ) +} + +func lookupKeyType[K any]() func(K) []byte { + keyType := reflect.TypeFor[K]() + funcAny, ok := keyTypeRegistry.Load(keyType) + if !ok { + panic(fmt.Sprintf("Key type %q not registered with part.RegisterMapKeyType()", keyType)) + } + return funcAny.(func(K) []byte) +} + +func init() { + // Register common key types. + RegisterKeyType[string](func(s string) []byte { return []byte(s) }) + RegisterKeyType[[]byte](func(b []byte) []byte { return b }) + RegisterKeyType[byte](func(b byte) []byte { return []byte{b} }) + RegisterKeyType[rune](func(r rune) []byte { return utf8.AppendRune(nil, r) }) + RegisterKeyType[complex128](func(c complex128) []byte { + buf := make([]byte, 0, 16) + buf = binary.BigEndian.AppendUint64(buf, math.Float64bits(real(c))) + buf = binary.BigEndian.AppendUint64(buf, math.Float64bits(imag(c))) + return buf + }) + RegisterKeyType[float64](func(x float64) []byte { return binary.BigEndian.AppendUint64(nil, math.Float64bits(x)) }) + RegisterKeyType[float32](func(x float32) []byte { return binary.BigEndian.AppendUint32(nil, math.Float32bits(x)) }) + RegisterKeyType[uint64](func(x uint64) []byte { return binary.BigEndian.AppendUint64(nil, x) }) + RegisterKeyType[uint32](func(x uint32) []byte { return binary.BigEndian.AppendUint32(nil, x) }) + RegisterKeyType[uint16](func(x uint16) []byte { return binary.BigEndian.AppendUint16(nil, x) }) + RegisterKeyType[int64](func(x int64) []byte { return binary.BigEndian.AppendUint64(nil, uint64(x)) }) + RegisterKeyType[int32](func(x int32) []byte { return binary.BigEndian.AppendUint32(nil, uint32(x)) }) + RegisterKeyType[int16](func(x int16) []byte { return binary.BigEndian.AppendUint16(nil, uint16(x)) }) + RegisterKeyType[int](func(x int) []byte { return binary.BigEndian.AppendUint64(nil, uint64(x)) }) + + var ( + trueBytes = []byte{'T'} + falseBytes = []byte{'F'} + ) + RegisterKeyType[bool](func(b bool) []byte { + if b { + return trueBytes + } else { + return falseBytes + } + }) + +} diff --git a/vendor/github.com/cilium/statedb/part/set.go b/vendor/github.com/cilium/statedb/part/set.go new file mode 100644 index 0000000000..89a91f0a67 --- /dev/null +++ b/vendor/github.com/cilium/statedb/part/set.go @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package part + +import ( + "bytes" + "encoding/json" + "fmt" + "iter" + "slices" + + "gopkg.in/yaml.v3" +) + +// Set is a persistent (immutable) set of values. A Set can be +// defined for any type for which a byte slice key can be derived. +// +// A zero value Set[T] can be used provided that the conversion +// function for T have been registered with RegisterKeyType. +// For Set-only use only [bytesFromKey] needs to be defined. +type Set[T any] struct { + toBytes func(T) []byte + tree *Tree[T] +} + +// NewSet creates a new set of T. +// The value type T must be registered with RegisterKeyType. +func NewSet[T any](values ...T) Set[T] { + s := Set[T]{tree: New[T](RootOnlyWatch)} + s.toBytes = lookupKeyType[T]() + if len(values) > 0 { + txn := s.tree.Txn() + for _, v := range values { + txn.Insert(s.toBytes(v), v) + } + s.tree = txn.CommitOnly() + } + return s +} + +// Set a value. Returns a new set. Original is unchanged. +func (s Set[T]) Set(v T) Set[T] { + if s.tree == nil { + return NewSet(v) + } + txn := s.tree.Txn() + txn.Insert(s.toBytes(v), v) + s.tree = txn.CommitOnly() // As Set is passed by value we can just modify it. + return s +} + +// Delete returns a new set without the value. The original +// set is unchanged. +func (s Set[T]) Delete(v T) Set[T] { + if s.tree == nil { + return s + } + txn := s.tree.Txn() + txn.Delete(s.toBytes(v)) + s.tree = txn.CommitOnly() + return s +} + +// Has returns true if the set has the value. +func (s Set[T]) Has(v T) bool { + if s.tree == nil { + return false + } + _, _, found := s.tree.Get(s.toBytes(v)) + return found +} + +// All returns an iterator for all values. +func (s Set[T]) All() iter.Seq[T] { + if s.tree == nil { + return toSeq[T](nil) + } + return toSeq(s.tree.Iterator()) +} + +// Union returns a set that is the union of the values +// in the input sets. +func (s Set[T]) Union(s2 Set[T]) Set[T] { + if s2.tree == nil { + return s + } + if s.tree == nil { + return s2 + } + txn := s.tree.Txn() + iter := s2.tree.Iterator() + for k, v, ok := iter.Next(); ok; k, v, ok = iter.Next() { + txn.Insert(k, v) + } + s.tree = txn.CommitOnly() + return s +} + +// Difference returns a set with values that only +// appear in the first set. +func (s Set[T]) Difference(s2 Set[T]) Set[T] { + if s.tree == nil || s2.tree == nil { + return s + } + + txn := s.tree.Txn() + iter := s2.tree.Iterator() + for k, _, ok := iter.Next(); ok; k, _, ok = iter.Next() { + txn.Delete(k) + } + s.tree = txn.CommitOnly() + return s +} + +// Len returns the number of values in the set. +func (s Set[T]) Len() int { + if s.tree == nil { + return 0 + } + return s.tree.size +} + +// Equal returns true if the two sets contain the equal keys. +func (s Set[T]) Equal(other Set[T]) bool { + switch { + case s.tree == nil && other.tree == nil: + return true + case s.Len() != other.Len(): + return false + default: + iter1 := s.tree.Iterator() + iter2 := other.tree.Iterator() + for { + k1, _, ok := iter1.Next() + if !ok { + break + } + k2, _, _ := iter2.Next() + // Equal lengths, no need to check 'ok' for 'iter2'. + if !bytes.Equal(k1, k2) { + return false + } + } + return true + } +} + +// ToBytesFunc returns the function to extract the key from +// the element type. Useful for utilities that are interested +// in the key. +func (s Set[T]) ToBytesFunc() func(T) []byte { + return s.toBytes +} + +func (s Set[T]) MarshalJSON() ([]byte, error) { + if s.tree == nil { + return []byte("[]"), nil + } + var b bytes.Buffer + b.WriteRune('[') + iter := s.tree.Iterator() + _, v, ok := iter.Next() + for ok { + bs, err := json.Marshal(v) + if err != nil { + return nil, err + } + b.Write(bs) + _, v, ok = iter.Next() + if ok { + b.WriteRune(',') + } + } + b.WriteRune(']') + return b.Bytes(), nil +} + +func (s *Set[T]) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + if d, ok := t.(json.Delim); !ok || d != '[' { + return fmt.Errorf("%T.UnmarshalJSON: expected '[' got %v", s, t) + } + + if s.tree == nil { + *s = NewSet[T]() + } + txn := s.tree.Txn() + + for dec.More() { + var x T + err := dec.Decode(&x) + if err != nil { + return err + } + txn.Insert(s.toBytes(x), x) + } + s.tree = txn.CommitOnly() + + t, err = dec.Token() + if err != nil { + return err + } + if d, ok := t.(json.Delim); !ok || d != ']' { + return fmt.Errorf("%T.UnmarshalJSON: expected ']' got %v", s, t) + } + return nil +} + +func (s Set[T]) MarshalYAML() (any, error) { + // TODO: Once yaml.v3 supports iter.Seq, drop the Collect(). + return slices.Collect(s.All()), nil +} + +func (s *Set[T]) UnmarshalYAML(value *yaml.Node) error { + if value.Kind != yaml.SequenceNode { + return fmt.Errorf("%T.UnmarshalYAML: expected sequence", s) + } + + if s.tree == nil { + *s = NewSet[T]() + } + txn := s.tree.Txn() + + for _, e := range value.Content { + var v T + if err := e.Decode(&v); err != nil { + return err + } + txn.Insert(s.toBytes(v), v) + } + s.tree = txn.CommitOnly() + return nil +} + +func toSeq[T any](iter *Iterator[T]) iter.Seq[T] { + return func(yield func(T) bool) { + if iter == nil { + return + } + iter = iter.Clone() + for _, x, ok := iter.Next(); ok; _, x, ok = iter.Next() { + if !yield(x) { + break + } + } + } +} diff --git a/vendor/github.com/cilium/statedb/part/tree.go b/vendor/github.com/cilium/statedb/part/tree.go new file mode 100644 index 0000000000..8e542a7331 --- /dev/null +++ b/vendor/github.com/cilium/statedb/part/tree.go @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package part + +// Tree is a persistent (immutable) adaptive radix tree. It supports +// map-like operations on values keyed by []byte and additionally +// prefix searching and lower bound searching. Each node in the tree +// has an associated channel that is closed when that node is mutated. +// This allows watching any part of the tree (any prefix) for changes. +type Tree[T any] struct { + opts *options + root *header[T] + size int // the number of objects in the tree +} + +// New constructs a new tree. +func New[T any](opts ...Option) *Tree[T] { + var o options + for _, opt := range opts { + opt(&o) + } + return &Tree[T]{ + root: newNode4[T](), + size: 0, + opts: &o, + } +} + +type Option func(*options) + +// RootOnlyWatch sets the tree to only have a watch channel on the root +// node. This improves the speed at the cost of having a much more coarse +// grained notifications. +func RootOnlyWatch(o *options) { o.rootOnlyWatch = true } + +// Txn constructs a new transaction against the tree. Transactions +// enable efficient large mutations of the tree by caching cloned +// nodes. +func (t *Tree[T]) Txn() *Txn[T] { + txn := &Txn[T]{ + Tree: *t, + watches: make(map[chan struct{}]struct{}), + } + return txn +} + +// Len returns the number of objects in the tree. +func (t *Tree[T]) Len() int { + return t.size +} + +// Get fetches the value associated with the given key. +// Returns the value, a watch channel (which is closed on +// modification to the key) and boolean which is true if +// value was found. +func (t *Tree[T]) Get(key []byte) (T, <-chan struct{}, bool) { + value, watch, ok := search(t.root, key) + if t.opts.rootOnlyWatch { + watch = t.root.watch + } + return value, watch, ok +} + +// Prefix returns an iterator for all objects that starts with the +// given prefix, and a channel that closes when any objects matching +// the given prefix are upserted or deleted. +func (t *Tree[T]) Prefix(prefix []byte) (*Iterator[T], <-chan struct{}) { + iter, watch := prefixSearch(t.root, prefix) + if t.opts.rootOnlyWatch { + watch = t.root.watch + } + return iter, watch +} + +// RootWatch returns a watch channel for the root of the tree. +// Since this is the channel associated with the root, this closes +// when there are any changes to the tree. +func (t *Tree[T]) RootWatch() <-chan struct{} { + return t.root.watch +} + +// LowerBound returns an iterator for all keys that have a value +// equal to or higher than 'key'. +func (t *Tree[T]) LowerBound(key []byte) *Iterator[T] { + return lowerbound(t.root, key) +} + +// Insert inserts the key into the tree with the given value. +// Returns the old value if it exists and a new tree. +func (t *Tree[T]) Insert(key []byte, value T) (old T, hadOld bool, tree *Tree[T]) { + txn := t.Txn() + old, hadOld = txn.Insert(key, value) + tree = txn.Commit() + return +} + +// Modify a value in the tree. If the key does not exist the modify +// function is called with the zero value for T. It is up to the +// caller to not mutate the value in-place and to return a clone. +// Returns the old value if it exists. +func (t *Tree[T]) Modify(key []byte, mod func(T) T) (old T, hadOld bool, tree *Tree[T]) { + txn := t.Txn() + old, hadOld = txn.Modify(key, mod) + tree = txn.Commit() + return +} + +// Delete the given key from the tree. +// Returns the old value if it exists and the new tree. +func (t *Tree[T]) Delete(key []byte) (old T, hadOld bool, tree *Tree[T]) { + txn := t.Txn() + old, hadOld = txn.Delete(key) + tree = txn.Commit() + return +} + +// Iterator returns an iterator for all objects. +func (t *Tree[T]) Iterator() *Iterator[T] { + return newIterator[T](t.root) +} + +// PrintTree to the standard output. For debugging. +func (t *Tree[T]) PrintTree() { + t.root.printTree(0) +} + +type options struct { + rootOnlyWatch bool +} diff --git a/vendor/github.com/cilium/statedb/part/txn.go b/vendor/github.com/cilium/statedb/part/txn.go new file mode 100644 index 0000000000..943ab23aa5 --- /dev/null +++ b/vendor/github.com/cilium/statedb/part/txn.go @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package part + +import ( + "bytes" +) + +// Txn is a transaction against a tree. It allows doing efficient +// modifications to a tree by caching and reusing cloned nodes. +type Txn[T any] struct { + // tree is the tree being modified + Tree[T] + + // mutated is the set of nodes mutated in this transaction + // that we can keep mutating without cloning them again. + // It is cleared if the transaction is cloned or iterated + // upon. + mutated nodeMutated[T] + + // watches contains the channels of cloned nodes that should be closed + // when transaction is committed. + watches map[chan struct{}]struct{} + + // deleteParentsCache keeps the last allocated slice to avoid + // reallocating it on every deletion. + deleteParentsCache []deleteParent[T] +} + +// Len returns the number of objects in the tree. +func (txn *Txn[T]) Len() int { + return txn.size +} + +// Clone returns a clone of the transaction. The clone is unaffected +// by any future changes done with the original transaction. +func (txn *Txn[T]) Clone() *Txn[T] { + // Clear the mutated nodes so that the returned clone won't be changed by + // further modifications in this transaction. + txn.mutated.clear() + return &Txn[T]{ + Tree: txn.Tree, + watches: map[chan struct{}]struct{}{}, + deleteParentsCache: nil, + } +} + +// Insert or update the tree with the given key and value. +// Returns the old value if it exists. +func (txn *Txn[T]) Insert(key []byte, value T) (old T, hadOld bool) { + old, hadOld, txn.root = txn.insert(txn.root, key, value) + if !hadOld { + txn.size++ + } + return +} + +// Modify a value in the tree. If the key does not exist the modify +// function is called with the zero value for T. It is up to the +// caller to not mutate the value in-place and to return a clone. +// Returns the old value if it exists. +func (txn *Txn[T]) Modify(key []byte, mod func(T) T) (old T, hadOld bool) { + old, hadOld, txn.root = txn.modify(txn.root, key, mod) + if !hadOld { + txn.size++ + } + return +} + +// Delete the given key from the tree. +// Returns the old value if it exists. +func (txn *Txn[T]) Delete(key []byte) (old T, hadOld bool) { + old, hadOld, txn.root = txn.delete(txn.root, key) + if hadOld { + txn.size-- + } + return +} + +// RootWatch returns a watch channel for the root of the tree. +// Since this is the channel associated with the root, this closes +// when there are any changes to the tree. +func (txn *Txn[T]) RootWatch() <-chan struct{} { + return txn.root.watch +} + +// Get fetches the value associated with the given key. +// Returns the value, a watch channel (which is closed on +// modification to the key) and boolean which is true if +// value was found. +func (txn *Txn[T]) Get(key []byte) (T, <-chan struct{}, bool) { + value, watch, ok := search(txn.root, key) + if txn.opts.rootOnlyWatch { + watch = txn.root.watch + } + return value, watch, ok +} + +// Prefix returns an iterator for all objects that starts with the +// given prefix, and a channel that closes when any objects matching +// the given prefix are upserted or deleted. +func (txn *Txn[T]) Prefix(key []byte) (*Iterator[T], <-chan struct{}) { + txn.mutated.clear() + iter, watch := prefixSearch(txn.root, key) + if txn.opts.rootOnlyWatch { + watch = txn.root.watch + } + return iter, watch +} + +// LowerBound returns an iterator for all objects that have a +// key equal or higher than the given 'key'. +func (txn *Txn[T]) LowerBound(key []byte) *Iterator[T] { + txn.mutated.clear() + return lowerbound(txn.root, key) +} + +// Iterator returns an iterator for all objects. +func (txn *Txn[T]) Iterator() *Iterator[T] { + txn.mutated.clear() + return newIterator[T](txn.root) +} + +// Commit the transaction and produce the new tree. +func (txn *Txn[T]) Commit() *Tree[T] { + txn.mutated.clear() + for ch := range txn.watches { + close(ch) + } + txn.watches = nil + return &Tree[T]{txn.opts, txn.root, txn.size} +} + +// CommitOnly the transaction, but do not close the +// watch channels. Returns the new tree. +// To close the watch channels call Notify(). +func (txn *Txn[T]) CommitOnly() *Tree[T] { + txn.mutated.clear() + return &Tree[T]{txn.opts, txn.root, txn.size} +} + +// Notify closes the watch channels of nodes that were +// mutated as part of this transaction. +func (txn *Txn[T]) Notify() { + for ch := range txn.watches { + close(ch) + } + txn.watches = nil +} + +// PrintTree to the standard output. For debugging. +func (txn *Txn[T]) PrintTree() { + txn.root.printTree(0) +} + +func (txn *Txn[T]) cloneNode(n *header[T]) *header[T] { + if txn.mutated.exists(n) { + return n + } + if n.watch != nil { + txn.watches[n.watch] = struct{}{} + } + n = n.clone(!txn.opts.rootOnlyWatch || n == txn.root) + txn.mutated.put(n) + return n +} + +func (txn *Txn[T]) insert(root *header[T], key []byte, value T) (oldValue T, hadOld bool, newRoot *header[T]) { + return txn.modify(root, key, func(_ T) T { return value }) +} + +func (txn *Txn[T]) modify(root *header[T], key []byte, mod func(T) T) (oldValue T, hadOld bool, newRoot *header[T]) { + fullKey := key + + this := root + thisp := &newRoot + + // Try to insert the key into the tree. If we find a free slot into which to insert + // it, we do it and return. If an existing node exists where the key should go, then + // we stop. 'this' points to that node, and 'thisp' to its memory location. It has + // not been cloned. + for { + if this.isLeaf() { + // We've reached a leaf node, cannot go further. + break + } + + if !bytes.HasPrefix(key, this.prefix) { + break + } + + // Prefix matched. Consume it and go further. + key = key[len(this.prefix):] + if len(key) == 0 { + // Our key matches this node. + break + } + + child, idx := this.findIndex(key[0]) + if child == nil { + // We've found a free slot where to insert the key. + if this.size()+1 > this.cap() { + // Node too small, promote it to the next size. + if this.watch != nil { + txn.watches[this.watch] = struct{}{} + } + this = this.promote(!txn.opts.rootOnlyWatch || this == newRoot) + txn.mutated.put(this) + } else { + // Node is big enough, clone it so we can mutate it + this = txn.cloneNode(this) + } + var zero T + this.insert(idx, newLeaf(txn.opts, key, fullKey, mod(zero)).self()) + *thisp = this + return + } + + // Clone the parent so we can modify it + this = txn.cloneNode(this) + *thisp = this + // And recurse into the child + thisp = &this.children()[idx] + this = *thisp + } + + // A node exists where we wanted to insert the key. + // 'this' points to it, and 'thisp' is its memory location. The parents + // have been cloned. + switch { + case this.isLeaf(): + common := commonPrefix(key, this.prefix) + if len(common) == len(this.prefix) && len(common) == len(key) { + // Exact match, clone and update the value. + oldValue = this.getLeaf().value + hadOld = true + this = txn.cloneNode(this) + *thisp = this + this.getLeaf().value = mod(oldValue) + } else { + // Partially matching prefix. + newNode := &node4[T]{ + header: header[T]{prefix: common}, + } + newNode.setKind(nodeKind4) + + // Make a shallow copy of the leaf. But keep its watch channel + // intact since we're only manipulating its prefix. + oldLeafCopy := *this.getLeaf() + oldLeaf := &oldLeafCopy + oldLeaf.prefix = oldLeaf.prefix[len(common):] + key = key[len(common):] + var zero T + newLeaf := newLeaf(txn.opts, key, fullKey, mod(zero)) + + // Insert the two leaves into the node we created. If one has + // a key that is a subset of the other, then we can insert them + // as a leaf of the node4, otherwise they become children. + switch { + case len(oldLeaf.prefix) == 0: + oldLeaf.prefix = common + newNode.setLeaf(oldLeaf) + newNode.children[0] = newLeaf.self() + newNode.keys[0] = newLeaf.prefix[0] + newNode.setSize(1) + + case len(key) == 0: + newLeaf.prefix = common + newNode.setLeaf(newLeaf) + newNode.children[0] = oldLeaf.self() + newNode.keys[0] = oldLeaf.prefix[0] + newNode.setSize(1) + + case oldLeaf.prefix[0] < key[0]: + newNode.children[0] = oldLeaf.self() + newNode.keys[0] = oldLeaf.prefix[0] + newNode.children[1] = newLeaf.self() + newNode.keys[1] = key[0] + newNode.setSize(2) + + default: + newNode.children[0] = newLeaf.self() + newNode.keys[0] = key[0] + newNode.children[1] = oldLeaf.self() + newNode.keys[1] = oldLeaf.prefix[0] + newNode.setSize(2) + } + *thisp = newNode.self() + } + case len(key) == 0: + // Exact match, but not a leaf node + this = txn.cloneNode(this) + *thisp = this + if leaf := this.getLeaf(); leaf != nil { + // Replace the existing leaf + oldValue = leaf.value + hadOld = true + leaf = txn.cloneNode(leaf.self()).getLeaf() + leaf.value = mod(oldValue) + this.setLeaf(leaf) + } else { + // Set the leaf + var zero T + this.setLeaf(newLeaf(txn.opts, this.prefix, fullKey, mod(zero))) + } + + default: + // Partially matching prefix, non-leaf node. + common := commonPrefix(key, this.prefix) + + this = txn.cloneNode(this) + *thisp = this + this.prefix = this.prefix[len(common):] + key = key[len(common):] + + var zero T + newLeaf := newLeaf(txn.opts, key, fullKey, mod(zero)) + newNode := &node4[T]{ + header: header[T]{prefix: common}, + } + newNode.setKind(nodeKind4) + + switch { + case len(key) == 0: + newLeaf.prefix = common + newNode.setLeaf(newLeaf) + newNode.children[0] = this + newNode.keys[0] = this.prefix[0] + newNode.setSize(1) + + case this.prefix[0] < key[0]: + newNode.children[0] = this + newNode.keys[0] = this.prefix[0] + newNode.children[1] = newLeaf.self() + newNode.keys[1] = key[0] + newNode.setSize(2) + default: + newNode.children[0] = newLeaf.self() + newNode.keys[0] = key[0] + newNode.children[1] = this + newNode.keys[1] = this.prefix[0] + newNode.setSize(2) + } + *thisp = newNode.self() + } + return +} + +// deleteParent tracks a node on the path to the target node that is being +// deleted. +type deleteParent[T any] struct { + node *header[T] + index int // the index of this node at its parent +} + +func (txn *Txn[T]) delete(root *header[T], key []byte) (oldValue T, hadOld bool, newRoot *header[T]) { + // Reuse the same slice in the transaction to hold the parents in order to avoid + // allocations. Pre-allocate 32 levels to cover most of the use-cases without + // reallocation. + if txn.deleteParentsCache == nil { + txn.deleteParentsCache = make([]deleteParent[T], 0, 32) + } + parents := txn.deleteParentsCache[:1] // Placeholder for root + + newRoot = root + this := root + + // Find the target node and record the path to it. + var leaf *leaf[T] + for { + if bytes.HasPrefix(key, this.prefix) { + key = key[len(this.prefix):] + if len(key) == 0 { + leaf = this.getLeaf() + if leaf == nil { + return + } + // Target node found! + break + } + var idx int + this, idx = this.findIndex(key[0]) + if this == nil { + return + } + parents = append(parents, deleteParent[T]{this, idx}) + } else { + // Reached a node with a different prefix, so node not found. + return + } + } + + oldValue = leaf.value + hadOld = true + + // Mark the watch channel of the target for closing if not mutated already. + if leaf.watch != nil { + txn.watches[leaf.watch] = struct{}{} + } + + if this == root { + // Target is the root, clear it. + if root.isLeaf() || newRoot.size() == 0 { + // Replace leaf or empty root with a node4 + newRoot = newNode4[T]() + } else { + newRoot = txn.cloneNode(root) + newRoot.setLeaf(nil) + } + return + } + + // The target was found, rebuild the tree from the root upwards. + parents[0].node = root + + for i := len(parents) - 1; i > 0; i-- { + parent := &parents[i-1] + target := &parents[i] + + // Clone the parent to mutate it. + parent.node = txn.cloneNode(parent.node) + children := parent.node.children() + + if target.node == this && target.node.size() > 0 { + // This is the node that we want to delete, but it has + // children. Clone and clear the leaf. + target.node = txn.cloneNode(target.node) + target.node.setLeaf(nil) + children[target.index] = target.node + } else if target.node.size() == 0 && (target.node == this || target.node.getLeaf() == nil) { + // The node is empty, remove it from the parent. + parent.node.remove(target.index) + } else { + // Update the target (as it may have been cloned) + children[target.index] = target.node + } + + if parent.node.size() > 0 { + // Check if the node should be demoted. + // To avoid thrashing we don't demote at the boundary, but at a slightly + // smaller size. + // TODO: Can we avoid the initial clone of parent.node? + var newNode *header[T] + switch { + case parent.node.kind() == nodeKind256 && parent.node.size() <= 37: + newNode = (&node48[T]{header: *parent.node}).self() + newNode.setKind(nodeKind48) + n48 := newNode.node48() + n48.leaf = parent.node.getLeaf() + children := n48.children[:0] + for k, n := range parent.node.node256().children[:] { + if n != nil { + n48.index[k] = int8(len(children)) + children = append(children, n) + } + } + case parent.node.kind() == nodeKind48 && parent.node.size() <= 12: + newNode = (&node16[T]{header: *parent.node}).self() + newNode.setKind(nodeKind16) + copy(newNode.children()[:], parent.node.children()) + n16 := newNode.node16() + n16.leaf = parent.node.getLeaf() + size := n16.size() + for i := 0; i < size; i++ { + n16.keys[i] = n16.children[i].prefix[0] + } + case parent.node.kind() == nodeKind16 && parent.node.size() <= 3: + newNode = (&node4[T]{header: *parent.node}).self() + newNode.setKind(nodeKind4) + n16 := parent.node.node16() + size := n16.size() + n4 := newNode.node4() + n4.leaf = n16.leaf + copy(n4.children[:], n16.children[:size]) + copy(n4.keys[:], n16.keys[:size]) + } + if newNode != nil { + parent.node = newNode + } + } + } + newRoot = parents[0].node + return +} diff --git a/vendor/github.com/cilium/stream/CODEOWNERS b/vendor/github.com/cilium/stream/CODEOWNERS new file mode 100644 index 0000000000..1031f9db44 --- /dev/null +++ b/vendor/github.com/cilium/stream/CODEOWNERS @@ -0,0 +1,12 @@ +# Code owners groups assigned to this repository and a brief description of their areas: +# @cilium/ci-structure Continuous integration, testing +# @cilium/contributing Developer documentation & tools +# @cilium/github-sec GitHub security (handling of secrets, consequences of pull_request_target, etc.) +# @cilium/sig-foundations Core libraries and guidance to overall software architecture. + +# The following filepaths should be sorted so that more specific paths occur +# after the less specific paths, otherwise the ownership for the specific paths +# is not properly picked up in Github. +* @cilium/sig-foundations +/.github/workflows/ @cilium/github-sec @cilium/ci-structure @cilium/sig-foundations +/CODEOWNERS @cilium/contributing @cilium/sig-foundations diff --git a/vendor/github.com/cilium/stream/LICENSE b/vendor/github.com/cilium/stream/LICENSE new file mode 100644 index 0000000000..a2e486a803 --- /dev/null +++ b/vendor/github.com/cilium/stream/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} Authors of Cilium + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/cilium/stream/Makefile b/vendor/github.com/cilium/stream/Makefile new file mode 100644 index 0000000000..df2ee74e7f --- /dev/null +++ b/vendor/github.com/cilium/stream/Makefile @@ -0,0 +1,15 @@ +.PHONY: all build test test-race bench + +all: build test test-race bench + +build: + go build ./... + +test: + go test ./... -cover + +test-race: + go test -race ./... + +bench: + go test ./... -bench . -test.run xxx diff --git a/vendor/github.com/cilium/stream/README.md b/vendor/github.com/cilium/stream/README.md new file mode 100644 index 0000000000..6e9a0a032d --- /dev/null +++ b/vendor/github.com/cilium/stream/README.md @@ -0,0 +1,164 @@ +# Reactive streams for Go + +A reactive streams library for Go in the spirit of Reactive Extensions (Rx) implemented +with generic functions. The library provides a rich set of utilities for wiring +event-passing in a complex application. Included are, for example, operators for +pubsub/fanning out (Multicast), for transforming (Map, Reduce), for rate limiting (Throttle) +and for buffering/coalescing (Buffer). New operators are easy to add as they are normal +top-level functions that take/return the `Observable` type. + +## The Observable + +The stream package provides the Observable interface for observing a stream of +values that can be cancelled and can be either infinite or finite in length. + +The Observable interface is defined as: + +```go +type Observable[T any] interface { + Observe(ctx context.Context, next func(T), complete func(error)) +} +``` + +The `next` function is called for each element in the stream. When the stream +is terminated or cancelled (via `ctx`) `next` will be called for remaining +elements and then `complete` after which neither function is invoked. + +An Observable must adhere to the following rules: + +* Observe() call must not block, e.g. be asynchronous by forking a goroutine. +* `next` must be called sequentially and never in parallel (previous call must complete + before `next` can be called again). +* `complete` can be called at most once. `complete` must not be called in parallel with + `next`. After `complete` is called neither `next` nor `complete` can be called again. +* if `ctx` is completed, calls to `next` should stop in short amount of time and `complete` + must be called with `ctx.Err()`. + +## Operators + +The functions that operate on `Observable[T]` are divided into: + +* [sources](sources.go) that create Observables +* [operators](operators.go) that transform Observables +* [sinks](sinks.go) that consume the Observable + +Since Go's generics does not yet allow new type parameters in methods, all of these +are implemented as top-level functions rather than methods in the Observable interface. +This also makes it easy to add new operators as they're just normal functions. + +## Creating an observable by hand + +As a first example, we'll implement a simple source `Observable` that emits a single integer: + +```go + +type singleIntegerObservable int + +func (num singleIntegerObservable) Observe(ctx context.Context, next func(int), complete func(error)) { + go func() { + next(int(num)) + complete(nil) + }() +} +``` + +We can now try it out with the `Map` operator: + +```go +func main() { + var ten stream.Observable[int] = singleIntegerObservable(10) + + twenty := stream.Map(ten, func(x int) int) { return x * 2 }) + + twenty.Observe( + context.Background(), + func(x int) { + fmt.Printf("%d\n", x) + }, + func(err error) { + fmt.Printf("complete: %s\n", err) + }, + ) +} +``` + +Instead of defining a new type every time we want to implement `Observe`, we can use the `FuncObservable` +helper: + +```go +func singleInt(x int) stream.Observable[int] { + return stream.FuncObservable( + func(ctx context.Context, next func(int), complete func(error)) error { + next(x) + complete(nil) + }, + ) +} +``` + +## Tour of the included operators + +[Sources](sources.go) provide different ways of creating `Observable`s without +having to implement `Observe`: + +```go +Just(10) // emits 10 and completes +Error(errors.New("oh no")) // completes with error +Empty() // completes with nil error +FromSlice([]int{1,2,3}) // emits 1,2,3 and completes +FromChannel(in) // emits items from the given channel +Range(0,3) // emits 0,1,2 and completes + + +// Multicast creates an observable that emits items to all observers. +src, next, complete := Multicast[int]() + +ch1 := ToChannel(ctx, src) +ch2 := ToChannel(ctx, src) +next(1) +<-ch1 // 1 +<-ch2 // 1 +``` + +[Operators](operators.go) transform streams in different ways: +```go +// Map[A, B any](src Observable[A], apply func(A) B) Observable[B] +Map(src, apply) // applies function 'apply' to each item. + +// Filter[T any](src Observable[T], filter func(T) bool) Observable[T] +Filter(src, filter) // applies function 'filter' to each item. If 'filter' returns false the + // item is dropped. + +// Reduce[T, Result any](src Observable[T], init Result, reduce func(T, Result) Result) Observable[Result] +// Applies function 'reduce' to each item to "reduce" the stream into a single value. +Reduce(Range(0, 3), 0, func(x, result int) int { return x + result }) // 0 + 1 + 2 = 3 + +// ToMulticast[T any](src Observable[T], opts ...MulticastOpt) (mcast Observable[T], connect func(context.Context)) +// Converts an observable into a multicast observable +src, connect := ToMulticast(Range(1,5)) +ch1 := ToChannel(ctx, src) +ch2 := ToChannel(ctx, src) +connect(ctx) // start observing the parent observable +<-ch1 // 1 +<-ch2 // 1 +``` + +[Sinks](stream/sinks.go) consume streams: +```go +// First[T any](ctx context.Context, src Observable[T]) (item T, err error) +// Takes the first item from the observable and then cancels it. +item, err := First(ctx, src) + +// ToSlice[T any](ctx context.Context, src Observable[T]) (items []T, err error) +// Converts the observable into a slice. +items, err := ToSlice(ctx, src) + +// ToChannel[T any](ctx context.Context, src Observable[T], opts ...ToChannelOpt) <-chan T +// Converts the observable into a channel. +items := ToChannel(ctx, src) + +// Discard[T any](ctx context.Context, src Observable[T]) error +// Consumes the observable by discarding the elements. +Discard(ctx, src) +``` + diff --git a/vendor/github.com/cilium/stream/observable.go b/vendor/github.com/cilium/stream/observable.go new file mode 100644 index 0000000000..22e96af95d --- /dev/null +++ b/vendor/github.com/cilium/stream/observable.go @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// The stream package provides utilities for working with observable streams. +// Any type that implements the Observable interface can be transformed and +// consumed with these utilities. +package stream + +import "context" + +// Observable defines the Observe method for observing a stream of values. +// +// Also see https://reactivex.io/documentation/observable.html for in-depth +// description of observables. +// +// For interactive diagrams see https://rxmarbles.com/. +type Observable[T any] interface { + // Observe a stream of values as long as the given context is valid. + // 'next' is called for each item, and finally 'complete' is called + // when the stream is complete, or an error has occurred. + // + // Observable implementations are allowed to call 'next' and 'complete' + // from any goroutine, but never concurrently. + Observe(ctx context.Context, next func(T), complete func(error)) +} + +// FuncObservable implements the Observable interface with a function. +// +// This provides a convenient way of creating new observables without having +// to introduce a new type: +// +// var Ones Observable[int] = +// FuncObservable[int]( +// func(ctx context.Context, next func(int), complete func(error)) { +// go func() { +// defer complete(nil) +// for ctx.Err() == nil { +// next(1) +// } +// }() +// }) +// +// versus with a new type: +// +// type onesObservable struct {} +// +// func (o onesObservable) Observe(ctx context.Context, next func(int), complete func(error)) { +// go func() { +// defer complete(nil) +// for ctx.Err() == nil { +// next(1) +// } +// }() +// } +type FuncObservable[T any] func(context.Context, func(T), func(error)) + +func (f FuncObservable[T]) Observe(ctx context.Context, next func(T), complete func(error)) { + f(ctx, next, complete) +} diff --git a/vendor/github.com/cilium/stream/operators.go b/vendor/github.com/cilium/stream/operators.go new file mode 100644 index 0000000000..6823f281a1 --- /dev/null +++ b/vendor/github.com/cilium/stream/operators.go @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package stream + +import ( + "context" + "time" + + "golang.org/x/time/rate" +) + +// +// Operators transform the observable stream. +// + +// Map applies a function onto values of an observable and emits the resulting values. +// +// Map(Range(1,4), func(x int) int { return x * 2}) +// => [2,4,6] +func Map[A, B any](src Observable[A], apply func(A) B) Observable[B] { + return FuncObservable[B]( + func(ctx context.Context, next func(B), complete func(error)) { + src.Observe( + ctx, + func(a A) { next(apply(a)) }, + complete) + }) +} + +// Filter only emits the values for which the provided predicate returns true. +// +// Filter(Range(1,4), func(x int) int { return x%2 == 0 }) +// => [2] +func Filter[T any](src Observable[T], pred func(T) bool) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + src.Observe( + ctx, + func(x T) { + if pred(x) { + next(x) + } + }, + complete) + }) +} + +// Reduce takes an initial state, and a function 'reduce' that is called on each element +// along with a state and returns an observable with a single item: the state produced +// by the last call to 'reduce'. +// +// Reduce(Range(1,4), 0, func(sum, item int) int { return sum + item }) +// => [(0+1+2+3)] => [6] +func Reduce[Item, Result any](src Observable[Item], init Result, reduce func(Result, Item) Result) Observable[Result] { + result := init + return FuncObservable[Result]( + func(ctx context.Context, next func(Result), complete func(error)) { + src.Observe( + ctx, + func(x Item) { + result = reduce(result, x) + }, + func(err error) { + if err == nil { + next(result) + } + complete(err) + }) + }) +} + +// Concat takes one or more observable of the same type and emits the items from each of +// them in order. +func Concat[T any](srcs ...Observable[T]) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go func() { + for _, src := range srcs { + errs := make(chan error, 1) + src.Observe( + ctx, + next, + func(err error) { + if err != nil { + errs <- err + } + close(errs) + }, + ) + if err, ok := <-errs; ok { + complete(err) + return + } + } + complete(nil) + }() + }) +} + +// FlatMap applies a function that returns an observable of Bs to the source observable of As. +// The observable from the 'apply' function is flattened to produce a flat stream of Bs. +func FlatMap[A, B any](src Observable[A], apply func(A) Observable[B]) Observable[B] { + return FuncObservable[B]( + func(ctx context.Context, next func(B), complete func(error)) { + ctx, cancel := context.WithCancel(ctx) + innerErrs := make(chan error, 1) + src.Observe( + ctx, + func(a A) { + done := make(chan struct{}) + apply(a).Observe( + ctx, + next, + func(err error) { + if err != nil { + select { + case innerErrs <- err: + default: + } + cancel() + } + close(done) + }, + ) + <-done + }, + func(err error) { + defer close(innerErrs) + select { + case innerErr := <-innerErrs: + complete(innerErr) + default: + complete(err) + } + }, + ) + }) +} + +// Distinct skips adjacent equal values. +// +// Distinct(FromSlice([]int{1,1,2,2,3}) +// => [1,2,3] +func Distinct[T comparable](src Observable[T]) Observable[T] { + var prev T + first := true + return Filter(src, func(item T) bool { + if first { + first = false + prev = item + return true + } + eq := prev == item + prev = item + return !eq + }) +} + +// RetryFunc decides whether the processing should be retried given the error +type RetryFunc func(err error) bool + +// Retry resubscribes to the observable if it completes with an error. +func Retry[T any](src Observable[T], shouldRetry RetryFunc) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + var observe func() + observe = func() { + src.Observe( + ctx, + next, + func(err error) { + if err != nil && shouldRetry(err) { + observe() + } else { + complete(err) + } + }) + } + observe() + }) +} + +// AlwaysRetry always asks for a retry regardless of the error. +func AlwaysRetry(err error) bool { + return true +} + +// BackoffRetry retries with an exponential backoff. +func BackoffRetry(shouldRetry RetryFunc, minBackoff, maxBackoff time.Duration) RetryFunc { + backoff := minBackoff + return func(err error) bool { + time.Sleep(backoff) + backoff *= 2 + if backoff > maxBackoff { + backoff = maxBackoff + } + return shouldRetry(err) + } + +} + +// LimitRetries limits the number of retries with the given retry method. +// e.g. LimitRetries(BackoffRetry(time.Millisecond, time.Second), 5) +func LimitRetries(shouldRetry RetryFunc, numRetries int) RetryFunc { + return func(err error) bool { + if numRetries <= 0 { + return false + } + numRetries-- + return shouldRetry(err) + } +} + +// ToMulticast makes 'src' a multicast observable, e.g. each observer will observe +// the same sequence. Useful for fanning out items to multiple observers from a source +// that is consumed by the act of observing. +// +// mcast, connect := ToMulticast(FromChannel(values)) +// a := ToSlice(mcast) +// b := ToSlice(mcast) +// connect(ctx) // start! +// => a == b +func ToMulticast[T any](src Observable[T], opts ...MulticastOpt) (mcast Observable[T], connect func(context.Context)) { + mcast, next, complete := Multicast[T](opts...) + connect = func(ctx context.Context) { + src.Observe(ctx, next, complete) + } + return mcast, connect +} + +// Throttle limits the rate at which items are emitted. +func Throttle[T any](src Observable[T], ratePerSecond float64, burst int) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + limiter := rate.NewLimiter(rate.Limit(ratePerSecond), burst) + var limiterErr error + subCtx, cancel := context.WithCancel(ctx) + src.Observe( + subCtx, + func(item T) { + limiterErr = limiter.Wait(ctx) + if limiterErr != nil { + cancel() + return + } + next(item) + }, + func(err error) { + if limiterErr != nil { + complete(limiterErr) + } else { + complete(err) + } + + }, + ) + }) +} + +// Debounce emits an item only after the specified duration has lapsed since +// the previous item was emitted. Only the latest item is emitted. +// +// In: a b c d e |-> +// Out: a d e |-> +func Debounce[T any](src Observable[T], duration time.Duration) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + errs := make(chan error, 1) + items := ToChannel(ctx, src, WithErrorChan(errs)) + go func() { + defer close(errs) + + timer := time.NewTimer(duration) + defer timer.Stop() + + timerElapsed := true // Do not delay the first item. + var latest *T + + for { + select { + case err := <-errs: + complete(err) + return + + case item, ok := <-items: + if !ok { + items = nil + latest = nil + continue + } + + if timerElapsed { + next(item) + timerElapsed = false + latest = nil + timer.Reset(duration) + } else { + latest = &item + } + + case <-timer.C: + if latest != nil { + next(*latest) + latest = nil + timer.Reset(duration) + } else { + timerElapsed = true + } + } + } + }() + }) +} + +// Buffer collects items into a buffer using the given buffering function and +// emits the buffer when 'waitTime' has elapsed. Buffer does not emit empty +// buffers. +// +// In: a b c |-> +// Out: [a,b] [c] |-> +func Buffer[Buf any, T any]( + src Observable[T], + bufferSize int, + waitTime time.Duration, + bufferItem func(Buf, T) Buf) Observable[Buf] { + + return FuncObservable[Buf]( + func(ctx context.Context, next func(Buf), complete func(error)) { + items := make(chan T, bufferSize) + errs := make(chan error, 1) + src.Observe( + ctx, + func(item T) { + items <- item + }, + func(err error) { + close(items) + errs <- err + close(errs) + }) + go func() { + ticker := time.NewTicker(waitTime) + defer ticker.Stop() + + var ( + emptyBuf Buf + buf Buf + ) + n := 0 + loop: + for { + select { + case <-ticker.C: + if n > 0 { + next(buf) + buf = emptyBuf + n = 0 + } + + case item, ok := <-items: + if !ok { + break loop + } + buf = bufferItem(buf, item) + n++ + if n >= bufferSize { + next(buf) + buf = emptyBuf + n = 0 + } + } + } + + if n > 0 { + next(buf) + } + complete(<-errs) + }() + + }) + +} diff --git a/vendor/github.com/cilium/stream/sinks.go b/vendor/github.com/cilium/stream/sinks.go new file mode 100644 index 0000000000..0cfcc01ab5 --- /dev/null +++ b/vendor/github.com/cilium/stream/sinks.go @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package stream + +import ( + "context" + "io" + "sync" + "sync/atomic" +) + +// +// Sinks: operators that consume the observable to produce a value. +// + +// First returns the first item from 'src' observable and then cancels +// the subscription. Blocks until first item is observed or the stream +// is completed. If the observable completes without emitting items +// then io.EOF error is returned. +func First[T any](ctx context.Context, src Observable[T]) (item T, err error) { + subCtx, cancel := context.WithCancel(ctx) + var taken atomic.Bool + errs := make(chan error) + src.Observe(subCtx, + func(x T) { + if !taken.CompareAndSwap(false, true) { + return + } + item = x + cancel() + }, + func(err error) { + errs <- err + close(errs) + }) + + err = <-errs + + if taken.Load() { + // We got the item, ignore any error. + err = nil + } else if err == nil { + // No error and no item => EOF + err = io.EOF + } + + return +} + +// Last returns the last item from 'src' observable. Blocks until +// the stream has been completed. If no items are observed then +// io.EOF error is returned. +func Last[T any](ctx context.Context, src Observable[T]) (item T, err error) { + errs := make(chan error) + var taken atomic.Bool + src.Observe( + ctx, + func(x T) { + item = x + taken.Store(true) + }, + func(err error) { + errs <- err + close(errs) + }) + + err = <-errs + if taken.Load() { + // We got the item, ignore any error. + err = nil + } else if err == nil { + // No error and no item => EOF + err = io.EOF + } + return item, err +} + +// ToSlice converts an Observable into a slice. +// +// ToSlice(ctx, Range(1,4)) +// => ([]int{1,2,3}, nil) +func ToSlice[T any](ctx context.Context, src Observable[T]) (items []T, err error) { + errs := make(chan error) + items = make([]T, 0) + src.Observe( + ctx, + func(item T) { + items = append(items, item) + }, + func(err error) { + errs <- err + close(errs) + }) + return items, <-errs +} + +type toChannelOpts struct { + bufferSize int + errorChan chan error +} + +type ToChannelOpt func(*toChannelOpts) + +// WithBufferSize sets the buffer size of the channel returned by ToChannel. +func WithBufferSize(n int) ToChannelOpt { + return func(o *toChannelOpts) { + o.bufferSize = n + } +} + +// WithErrorChan asks ToChannel to send completion error to the provided channel. +func WithErrorChan(errCh chan error) ToChannelOpt { + return func(o *toChannelOpts) { + o.errorChan = errCh + } +} + +// ToChannel converts an observable into a channel. +// When the provided context is cancelled the underlying subscription is cancelled +// and the channel is closed. To receive completion errors use [WithErrorChan]. +// +// items <- ToChannel(ctx, Range(1,4)) +// a := <- items +// b := <- items +// c := <- items +// _, ok := <- items +// => a=1, b=2, c=3, ok=false +func ToChannel[T any](ctx context.Context, src Observable[T], opts ...ToChannelOpt) <-chan T { + var o toChannelOpts + for _, opt := range opts { + opt(&o) + } + items := make(chan T, o.bufferSize) + src.Observe( + ctx, + func(item T) { items <- item }, + func(err error) { + close(items) + if o.errorChan != nil { + o.errorChan <- err + } + }) + return items +} + +// ToTruncatingChannel is like ToChannel but with a local buffer to decouple the +// source observable from the observer. +// It is useful when the source observable cannot be delayed by a slow consumer +// and it is safe for the consumer to lose intermediate items while busy. +func ToTruncatingChannel[T any](ctx context.Context, src Observable[T], opts ...ToChannelOpt) <-chan T { + items := ToChannel(ctx, src, opts...) + out := make(chan T) + go func() { + defer close(out) + var ( + ch chan T + buf T + ) + for { + select { + case v, ok := <-items: + if !ok { + return + } + buf = v + ch = out + case ch <- buf: + ch = nil + } + } + }() + return out +} + +// Discard discards all items from 'src'. +func Discard[T any](ctx context.Context, src Observable[T]) { + src.Observe(ctx, + func(item T) {}, + func(err error) {}) +} + +// ObserveWithWaitGroup is like Observe(), but adds to a WaitGroup and calls +// Done() when complete. +func ObserveWithWaitGroup[T any](ctx context.Context, wg *sync.WaitGroup, src Observable[T], next func(T), complete func(error)) { + wg.Add(1) + src.Observe( + ctx, + next, + func(err error) { + complete(err) + wg.Done() + }) +} diff --git a/vendor/github.com/cilium/stream/sources.go b/vendor/github.com/cilium/stream/sources.go new file mode 100644 index 0000000000..004532fd29 --- /dev/null +++ b/vendor/github.com/cilium/stream/sources.go @@ -0,0 +1,262 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package stream + +import ( + "context" + "sync" +) + +// Just creates an observable that emits a single item and completes. +// +// xs, err := ToSlice(ctx, Just(1)) +// => xs == []int{1}, err == nil +func Just[T any](item T) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go func() { + if err := ctx.Err(); err != nil { + complete(err) + } else { + next(item) + complete(nil) + } + }() + }) +} + +// Stuck creates an observable that never emits anything and +// just waits for the context to be cancelled. +// Mainly meant for testing. +func Stuck[T any]() Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go func() { + <-ctx.Done() + complete(ctx.Err()) + }() + }) +} + +// Error creates an observable that fails immediately with given error. +// +// failErr = errors.New("fail") +// xs, err := ToSlice(ctx, Error[int](failErr)) +// => xs == []int{}, err == failErr +func Error[T any](err error) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go complete(err) + }) +} + +// Empty creates an "empty" observable that completes immediately. +// +// xs, err := ToSlice(Empty[int]()) +// => xs == []int{}, err == nil +func Empty[T any]() Observable[T] { + return Error[T](nil) +} + +// FromSlice converts a slice into an Observable. +// +// ToSlice(ctx, FromSlice([]int{1,2,3}) +// => []int{1,2,3} +func FromSlice[T any](items []T) Observable[T] { + // Emit items in chunks to reduce overhead of mutex in ctx.Err(). + const chunkSize = 64 + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go func() { + for chunk := 0; chunk < len(items); chunk += chunkSize { + if err := ctx.Err(); err != nil { + complete(err) + return + } + for i := chunk; i < len(items) && i < chunk+chunkSize; i++ { + next(items[i]) + } + } + complete(nil) + }() + }) +} + +// FromChannel creates an observable from a channel. The channel is consumed +// by the first observer. +// +// values := make(chan int) +// go func() { +// values <- 1 +// values <- 2 +// values <- 3 +// close(values) +// }() +// obs := FromChannel(values) +// xs, err := ToSlice(ctx, obs) +// => xs == []int{1,2,3}, err == nil +// +// xs, err = ToSlice(ctx, obs) +// => xs == []int{}, err == nil +func FromChannel[T any](in <-chan T) Observable[T] { + return FuncObservable[T]( + func(ctx context.Context, next func(T), complete func(error)) { + go func() { + done := ctx.Done() + for { + select { + case <-done: + complete(ctx.Err()) + return + case v, ok := <-in: + if !ok { + complete(nil) + return + } + next(v) + } + } + }() + }) +} + +// Range creates an observable that emits integers in range from...to-1. +// +// ToSlice(ctx, Range(1,2,3)) => []int{1,2,3} +func Range(from, to int) Observable[int] { + return FuncObservable[int]( + func(ctx context.Context, next func(int), complete func(error)) { + go func() { + for i := from; i < to; i++ { + if ctx.Err() != nil { + break + } + next(i) + } + complete(ctx.Err()) + }() + }) +} + +type mcastSubscriber[T any] struct { + next func(T) + complete func() +} + +type MulticastOpt func(o *mcastOpts) + +type mcastOpts struct { + emitLatest bool +} + +func (o mcastOpts) apply(opts []MulticastOpt) mcastOpts { + for _, opt := range opts { + opt(&o) + } + return o +} + +// Multicast options +var ( + // Emit the latest seen item when subscribing. + EmitLatest = func(o *mcastOpts) { o.emitLatest = true } +) + +// Multicast creates an observable that "multicasts" the emitted items to all observers. +// +// mcast, next, complete := Multicast[int]() +// next(1) // no observers, none receives this +// sub1 := ToChannel(ctx, mcast, WithBufferSize(10)) +// sub2 := ToChannel(ctx, mcast, WithBufferSize(10)) +// next(2) +// next(3) +// complete(nil) +// => sub1 == sub2 == [2,3] +// +// mcast, next, complete = Multicast[int](EmitLatest) +// next(1) +// next(2) // "EmitLatest" tells Multicast to keep this +// x, err := First(ctx, mcast) +// => x == 2, err == nil +func Multicast[T any](opts ...MulticastOpt) (mcast Observable[T], next func(T), complete func(error)) { + var ( + mu sync.Mutex + subId int + subs = make(map[int]mcastSubscriber[T]) + latestValue T + completed bool + completeErr error + haveLatest bool + opt = mcastOpts{}.apply(opts) + ) + + next = func(item T) { + mu.Lock() + defer mu.Unlock() + if completed { + return + } + if opt.emitLatest { + latestValue = item + haveLatest = true + } + for _, sub := range subs { + sub.next(item) + } + } + + complete = func(err error) { + mu.Lock() + defer mu.Unlock() + completed = true + completeErr = err + for _, sub := range subs { + sub.complete() + } + subs = nil + } + + mcast = FuncObservable[T]( + func(ctx context.Context, subNext func(T), subComplete func(error)) { + mu.Lock() + if completed { + mu.Unlock() + go subComplete(completeErr) + return + } + + subCtx, cancel := context.WithCancel(ctx) + thisId := subId + subId++ + subs[thisId] = mcastSubscriber[T]{ + subNext, + cancel, + } + + // Continue subscribing asynchronously so caller is not blocked. + go func() { + if opt.emitLatest && haveLatest { + subNext(latestValue) + } + mu.Unlock() + + // Wait for cancellation by observer, or completion from upstream. + <-subCtx.Done() + + // Remove the observer and complete. + var err error + mu.Lock() + delete(subs, thisId) + if completed { + err = completeErr + } else { + err = subCtx.Err() + } + mu.Unlock() + subComplete(err) + }() + }) + + return +} diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go index 0d82a2dd3c..201a12e977 100644 --- a/vendor/github.com/containernetworking/cni/libcni/api.go +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -15,7 +15,7 @@ package libcni // Note this is the actual implementation of the CNI specification, which -// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file +// is reflected in the SPEC.md file. // it is typically bundled into runtime providers (i.e. containerd or cri-o would use this // before calling runc or hcsshim). It is also bundled into CNI providers as well, for example, // to add an IP to a container, to parse the configuration of the CNI and so on. @@ -23,10 +23,11 @@ package libcni import ( "context" "encoding/json" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" + "sort" "strings" "github.com/containernetworking/cni/pkg/invoke" @@ -38,6 +39,8 @@ import ( var ( CacheDir = "/var/lib/cni" + // slightly awkward wording to preserve anyone matching on error strings + ErrorCheckNotSupp = fmt.Errorf("does not support the CHECK command") ) const ( @@ -73,10 +76,25 @@ type NetworkConfigList struct { Name string CNIVersion string DisableCheck bool + DisableGC bool Plugins []*NetworkConfig Bytes []byte } +type NetworkAttachment struct { + ContainerID string + Network string + IfName string + Config []byte + NetNS string + CniArgs [][2]string + CapabilityArgs map[string]interface{} +} + +type GCArgs struct { + ValidAttachments []types.GCAttachment +} + type CNI interface { AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error @@ -92,6 +110,13 @@ type CNI interface { ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) + + GCNetworkList(ctx context.Context, net *NetworkConfigList, args *GCArgs) error + GetStatusNetworkList(ctx context.Context, net *NetworkConfigList) error + + GetCachedAttachments(containerID string) ([]*NetworkAttachment, error) + + GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) } type CNIConfig struct { @@ -139,8 +164,11 @@ func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult typ if err != nil { return nil, err } + if rt != nil { + return injectRuntimeConfig(orig, rt) + } - return injectRuntimeConfig(orig, rt) + return orig, nil } // This function takes a libcni RuntimeConf structure and injects values into @@ -195,6 +223,7 @@ type cachedInfo struct { Config []byte `json:"config"` IfName string `json:"ifName"` NetworkName string `json:"networkName"` + NetNS string `json:"netns,omitempty"` CniArgs [][2]string `json:"cniArgs,omitempty"` CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` RawResult map[string]interface{} `json:"result,omitempty"` @@ -229,6 +258,7 @@ func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, Config: config, IfName: rt.IfName, NetworkName: netName, + NetNS: rt.NetNS, CniArgs: rt.Args, CapabilityArgs: rt.CapabilityArgs, } @@ -254,11 +284,11 @@ func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, if err != nil { return err } - if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + if err := os.MkdirAll(filepath.Dir(fname), 0o700); err != nil { return err } - return ioutil.WriteFile(fname, newBytes, 0600) + return os.WriteFile(fname, newBytes, 0o600) } func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { @@ -277,7 +307,7 @@ func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *R if err != nil { return nil, nil, err } - bytes, err = ioutil.ReadFile(fname) + bytes, err = os.ReadFile(fname) if err != nil { // Ignore read errors; the cached result may not exist on-disk return nil, nil, nil @@ -305,7 +335,7 @@ func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *Runtim if err != nil { return nil, err } - data, err := ioutil.ReadFile(fname) + data, err := os.ReadFile(fname) if err != nil { // Ignore read errors; the cached result may not exist on-disk return nil, nil @@ -333,7 +363,7 @@ func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) if err != nil { return nil, err } - fdata, err := ioutil.ReadFile(fname) + fdata, err := os.ReadFile(fname) if err != nil { // Ignore read errors; the cached result may not exist on-disk return nil, nil @@ -390,6 +420,68 @@ func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) return c.getCachedConfig(net.Network.Name, rt) } +// GetCachedAttachments returns a list of network attachments from the cache. +// The returned list will be filtered by the containerID if the value is not empty. +func (c *CNIConfig) GetCachedAttachments(containerID string) ([]*NetworkAttachment, error) { + dirPath := filepath.Join(c.getCacheDir(&RuntimeConf{}), "results") + entries, err := os.ReadDir(dirPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + fileNames := make([]string, 0, len(entries)) + for _, e := range entries { + fileNames = append(fileNames, e.Name()) + } + sort.Strings(fileNames) + + attachments := []*NetworkAttachment{} + for _, fname := range fileNames { + if len(containerID) > 0 { + part := fmt.Sprintf("-%s-", containerID) + pos := strings.Index(fname, part) + if pos <= 0 || pos+len(part) >= len(fname) { + continue + } + } + + cacheFile := filepath.Join(dirPath, fname) + bytes, err := os.ReadFile(cacheFile) + if err != nil { + continue + } + + cachedInfo := cachedInfo{} + + if err := json.Unmarshal(bytes, &cachedInfo); err != nil { + continue + } + if cachedInfo.Kind != CNICacheV1 { + continue + } + if len(containerID) > 0 && cachedInfo.ContainerID != containerID { + continue + } + if cachedInfo.IfName == "" || cachedInfo.NetworkName == "" { + continue + } + + attachments = append(attachments, &NetworkAttachment{ + ContainerID: cachedInfo.ContainerID, + Network: cachedInfo.NetworkName, + IfName: cachedInfo.IfName, + Config: cachedInfo.Config, + NetNS: cachedInfo.NetNS, + CniArgs: cachedInfo.CniArgs, + CapabilityArgs: cachedInfo.CapabilityArgs, + }) + } + return attachments, nil +} + func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { c.ensureExec() pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) @@ -453,7 +545,7 @@ func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigLis if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { return err } else if !gtet { - return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + return fmt.Errorf("configuration version %q %w", list.CNIVersion, ErrorCheckNotSupp) } if list.DisableCheck { @@ -497,9 +589,9 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { return err } else if gtet { - cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) - if err != nil { - return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + if cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt); err != nil { + _ = c.cacheDel(list.Name, rt) + cachedResult = nil } } @@ -509,6 +601,7 @@ func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err) } } + _ = c.cacheDel(list.Name, rt) return nil @@ -547,7 +640,7 @@ func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *Ru if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { return err } else if !gtet { - return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + return fmt.Errorf("configuration version %q %w", net.Network.CNIVersion, ErrorCheckNotSupp) } cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) @@ -666,6 +759,129 @@ func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (vers return invoke.GetVersionInfo(ctx, pluginPath, c.exec) } +// GCNetworkList will do two things +// - dump the list of cached attachments, and issue deletes as necessary +// - issue a GC to the underlying plugins (if the version is high enough) +func (c *CNIConfig) GCNetworkList(ctx context.Context, list *NetworkConfigList, args *GCArgs) error { + // If DisableGC is set, then don't bother GCing at all. + if list.DisableGC { + return nil + } + + // First, get the list of cached attachments + cachedAttachments, err := c.GetCachedAttachments("") + if err != nil { + return nil + } + + var validAttachments map[types.GCAttachment]interface{} + if args != nil { + validAttachments = make(map[types.GCAttachment]interface{}, len(args.ValidAttachments)) + for _, a := range args.ValidAttachments { + validAttachments[a] = nil + } + } + + var errs []error + + for _, cachedAttachment := range cachedAttachments { + if cachedAttachment.Network != list.Name { + continue + } + // we found this attachment + gca := types.GCAttachment{ + ContainerID: cachedAttachment.ContainerID, + IfName: cachedAttachment.IfName, + } + if _, ok := validAttachments[gca]; ok { + continue + } + // otherwise, this attachment wasn't valid and we should issue a CNI DEL + rt := RuntimeConf{ + ContainerID: cachedAttachment.ContainerID, + NetNS: cachedAttachment.NetNS, + IfName: cachedAttachment.IfName, + Args: cachedAttachment.CniArgs, + CapabilityArgs: cachedAttachment.CapabilityArgs, + } + if err := c.DelNetworkList(ctx, list, &rt); err != nil { + errs = append(errs, fmt.Errorf("failed to delete stale attachment %s %s: %w", rt.ContainerID, rt.IfName, err)) + } + } + + // now, if the version supports it, issue a GC + if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); gt { + inject := map[string]interface{}{ + "name": list.Name, + "cniVersion": list.CNIVersion, + } + if args != nil { + inject["cni.dev/valid-attachments"] = args.ValidAttachments + // #1101: spec used incorrect variable name + inject["cni.dev/attachments"] = args.ValidAttachments + } + + for _, plugin := range list.Plugins { + // build config here + pluginConfig, err := InjectConf(plugin, inject) + if err != nil { + errs = append(errs, fmt.Errorf("failed to generate configuration to GC plugin %s: %w", plugin.Network.Type, err)) + } + if err := c.gcNetwork(ctx, pluginConfig); err != nil { + errs = append(errs, fmt.Errorf("failed to GC plugin %s: %w", plugin.Network.Type, err)) + } + } + } + + return errors.Join(errs...) +} + +func (c *CNIConfig) gcNetwork(ctx context.Context, net *NetworkConfig) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + args := c.args("GC", &RuntimeConf{}) + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, net.Bytes, args, c.exec) +} + +func (c *CNIConfig) GetStatusNetworkList(ctx context.Context, list *NetworkConfigList) error { + // If the version doesn't support status, abort. + if gt, _ := version.GreaterThanOrEqualTo(list.CNIVersion, "1.1.0"); !gt { + return nil + } + + inject := map[string]interface{}{ + "name": list.Name, + "cniVersion": list.CNIVersion, + } + + for _, plugin := range list.Plugins { + // build config here + pluginConfig, err := InjectConf(plugin, inject) + if err != nil { + return fmt.Errorf("failed to generate configuration to get plugin STATUS %s: %w", plugin.Network.Type, err) + } + if err := c.getStatusNetwork(ctx, pluginConfig); err != nil { + return err // Don't collect errors here, so we return a clean error code. + } + } + return nil +} + +func (c *CNIConfig) getStatusNetwork(ctx context.Context, net *NetworkConfig) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + args := c.args("STATUS", &RuntimeConf{}) + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, net.Bytes, args, c.exec) +} + // ===== func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { return &invoke.Args{ diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go index 3cd6a59d1c..1d1b821c63 100644 --- a/vendor/github.com/containernetworking/cni/libcni/conf.go +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -16,13 +16,16 @@ package libcni import ( "encoding/json" + "errors" "fmt" - "io/ioutil" "os" "path/filepath" + "slices" "sort" + "strings" "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/version" ) type NotFoundError struct { @@ -54,7 +57,7 @@ func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { } func ConfFromFile(filename string) (*NetworkConfig, error) { - bytes, err := ioutil.ReadFile(filename) + bytes, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("error reading %s: %w", filename, err) } @@ -85,17 +88,89 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { } } - disableCheck := false - if rawDisableCheck, ok := rawList["disableCheck"]; ok { - disableCheck, ok = rawDisableCheck.(bool) + rawVersions, ok := rawList["cniVersions"] + if ok { + // Parse the current package CNI version + rvs, ok := rawVersions.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions: %T", rvs) + } + vs := make([]string, 0, len(rvs)) + for i, rv := range rvs { + v, ok := rv.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid type for cniVersions index %d: %T", i, rv) + } + gt, err := version.GreaterThan(v, version.Current()) + if err != nil { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersions entry %s at index %d: %w", v, i, err) + } else if !gt { + // Skip versions "greater" than this implementation of the spec + vs = append(vs, v) + } + } + + // if cniVersion was already set, append it to the list for sorting. + if cniVersion != "" { + gt, err := version.GreaterThan(cniVersion, version.Current()) + if err != nil { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion %s: %w", cniVersion, err) + } else if !gt { + // ignore any versions higher than the current implemented spec version + vs = append(vs, cniVersion) + } + } + slices.SortFunc[[]string](vs, func(v1, v2 string) int { + if v1 == v2 { + return 0 + } + if gt, _ := version.GreaterThan(v1, v2); gt { + return 1 + } + return -1 + }) + if len(vs) > 0 { + cniVersion = vs[len(vs)-1] + } + } + + readBool := func(key string) (bool, error) { + rawVal, ok := rawList[key] if !ok { - return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + return false, nil } + if b, ok := rawVal.(bool); ok { + return b, nil + } + + s, ok := rawVal.(string) + if !ok { + return false, fmt.Errorf("error parsing configuration list: invalid type %T for %s", rawVal, key) + } + s = strings.ToLower(s) + switch s { + case "false": + return false, nil + case "true": + return true, nil + } + return false, fmt.Errorf("error parsing configuration list: invalid value %q for %s", s, key) + } + + disableCheck, err := readBool("disableCheck") + if err != nil { + return nil, err + } + + disableGC, err := readBool("disableGC") + if err != nil { + return nil, err } list := &NetworkConfigList{ Name: name, DisableCheck: disableCheck, + DisableGC: disableGC, CNIVersion: cniVersion, Bytes: bytes, } @@ -129,7 +204,7 @@ func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { } func ConfListFromFile(filename string) (*NetworkConfigList, error) { - bytes, err := ioutil.ReadFile(filename) + bytes, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("error reading %s: %w", filename, err) } @@ -138,7 +213,7 @@ func ConfListFromFile(filename string) (*NetworkConfigList, error) { func ConfFiles(dir string, extensions []string) ([]string, error) { // In part, adapted from rkt/networking/podenv.go#listFiles - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) switch { case err == nil: // break case os.IsNotExist(err): @@ -206,7 +281,8 @@ func LoadConfList(dir, name string) (*NetworkConfigList, error) { singleConf, err := LoadConf(dir, name) if err != nil { // A little extra logic so the error makes sense - if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + var ncfErr NoConfigsFoundError + if len(files) != 0 && errors.As(err, &ncfErr) { // Config lists found but no config files found return nil, NotFoundError{dir, name} } diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go index 8defe4dd39..c8b548e7c6 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -51,25 +51,34 @@ func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exe // DelegateCheck calls the given delegate plugin with the CNI CHECK action and // JSON configuration func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "CHECK") +} + +func delegateNoResult(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec, verb string) error { pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) if err != nil { return err } - // DelegateCheck will override the original CNI_COMMAND env from process with CHECK - return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs(verb), realExec) } // DelegateDel calls the given delegate plugin with the CNI DEL action and // JSON configuration func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { - pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) - if err != nil { - return err - } + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "DEL") +} - // DelegateDel will override the original CNI_COMMAND env from process with DEL - return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +// DelegateStatus calls the given delegate plugin with the CNI STATUS action and +// JSON configuration +func DelegateStatus(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "STATUS") +} + +// DelegateGC calls the given delegate plugin with the CNI GC action and +// JSON configuration +func DelegateGC(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + return delegateNoResult(ctx, delegatePlugin, netconf, exec, "GC") } // return CNIArgs used by delegation diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go index 3ad07aa8f2..a5e015fc92 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -81,17 +81,17 @@ func fixupResultVersion(netconf, result []byte) (string, []byte, error) { // object to ExecPluginWithResult() to verify the incoming stdin and environment // and provide a tailored response: // -//import ( +// import ( // "encoding/json" // "path" // "strings" -//) +// ) // -//type fakeExec struct { +// type fakeExec struct { // version.PluginDecoder -//} +// } // -//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { // net := &types.NetConf{} // err := json.Unmarshal(stdinData, net) // if err != nil { @@ -109,14 +109,14 @@ func fixupResultVersion(netconf, result []byte) (string, []byte, error) { // } // } // return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil -//} +// } // -//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { // if len(paths) > 0 { // return path.Join(paths[0], plugin), nil // } // return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) -//} +// } func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { if exec == nil { diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go index 9bcfb45536..ed0999bd0e 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris // +build darwin dragonfly freebsd linux netbsd openbsd solaris package invoke diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_darwin.go similarity index 76% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go rename to vendor/github.com/containernetworking/cni/pkg/ns/ns_darwin.go index c318385cbe..cffe136178 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_darwin.go @@ -1,4 +1,4 @@ -// Copyright 2013 Matt T. Proud +// Copyright 2022 CNI authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,5 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil +package ns + +import "github.com/containernetworking/cni/pkg/types" + +func CheckNetNS(nsPath string) (bool, *types.Error) { + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go new file mode 100644 index 0000000000..3d58e75d6c --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_linux.go @@ -0,0 +1,50 @@ +// Copyright 2022 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ns + +import ( + "runtime" + + "github.com/vishvananda/netns" + + "github.com/containernetworking/cni/pkg/types" +) + +// Returns an object representing the current OS thread's network namespace +func getCurrentNS() (netns.NsHandle, error) { + // Lock the thread in case other goroutine executes in it and changes its + // network namespace after getCurrentThreadNetNSPath(), otherwise it might + // return an unexpected network namespace. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + return netns.Get() +} + +func CheckNetNS(nsPath string) (bool, *types.Error) { + ns, err := netns.GetFromPath(nsPath) + // Let plugins check whether nsPath from args is valid. Also support CNI DEL for empty nsPath as already-deleted nsPath. + if err != nil { + return false, nil + } + defer ns.Close() + + pluginNS, err := getCurrentNS() + if err != nil { + return false, types.NewError(types.ErrInvalidNetNS, "get plugin's netns failed", "") + } + defer pluginNS.Close() + + return pluginNS.Equal(ns), nil +} diff --git a/vendor/github.com/petermattis/goid/goid_go1.5_amd64.go b/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go similarity index 57% rename from vendor/github.com/petermattis/goid/goid_go1.5_amd64.go rename to vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go index d689355963..cffe136178 100644 --- a/vendor/github.com/petermattis/goid/goid_go1.5_amd64.go +++ b/vendor/github.com/containernetworking/cni/pkg/ns/ns_windows.go @@ -1,4 +1,4 @@ -// Copyright 2016 Peter Mattis. +// Copyright 2022 CNI authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -8,16 +8,14 @@ // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. See the AUTHORS file -// for names of contributors. +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -//go:build (amd64 || amd64p32) && gc && go1.5 -// +build amd64 amd64p32 -// +build gc -// +build go1.5 +package ns -package goid +import "github.com/containernetworking/cni/pkg/types" -func Get() int64 +func CheckNetNS(nsPath string) (bool, *types.Error) { + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/skel/skel.go b/vendor/github.com/containernetworking/cni/pkg/skel/skel.go index cb8781972d..f29cf34594 100644 --- a/vendor/github.com/containernetworking/cni/pkg/skel/skel.go +++ b/vendor/github.com/containernetworking/cni/pkg/skel/skel.go @@ -19,13 +19,14 @@ package skel import ( "bytes" "encoding/json" + "errors" "fmt" "io" - "io/ioutil" "log" "os" "strings" + "github.com/containernetworking/cni/pkg/ns" "github.com/containernetworking/cni/pkg/types" "github.com/containernetworking/cni/pkg/utils" "github.com/containernetworking/cni/pkg/version" @@ -34,12 +35,13 @@ import ( // CmdArgs captures all the arguments passed in to the plugin // via both env vars and stdin type CmdArgs struct { - ContainerID string - Netns string - IfName string - Args string - Path string - StdinData []byte + ContainerID string + Netns string + IfName string + Args string + Path string + NetnsOverride string + StdinData []byte } type dispatcher struct { @@ -55,21 +57,25 @@ type dispatcher struct { type reqForCmdEntry map[string]bool func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { - var cmd, contID, netns, ifName, args, path string + var cmd, contID, netns, ifName, args, path, netnsOverride string vars := []struct { - name string - val *string - reqForCmd reqForCmdEntry + name string + val *string + reqForCmd reqForCmdEntry + validateFn func(string) *types.Error }{ { "CNI_COMMAND", &cmd, reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": true, + "ADD": true, + "CHECK": true, + "DEL": true, + "GC": true, + "STATUS": true, }, + nil, }, { "CNI_CONTAINERID", @@ -79,6 +85,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": true, "DEL": true, }, + utils.ValidateContainerID, }, { "CNI_NETNS", @@ -88,6 +95,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": true, "DEL": false, }, + nil, }, { "CNI_IFNAME", @@ -97,6 +105,7 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": true, "DEL": true, }, + utils.ValidateInterfaceName, }, { "CNI_ARGS", @@ -106,15 +115,29 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { "CHECK": false, "DEL": false, }, + nil, }, { "CNI_PATH", &path, reqForCmdEntry{ - "ADD": true, - "CHECK": true, - "DEL": true, + "ADD": true, + "CHECK": true, + "DEL": true, + "GC": true, + "STATUS": true, + }, + nil, + }, + { + "CNI_NETNS_OVERRIDE", + &netnsOverride, + reqForCmdEntry{ + "ADD": false, + "CHECK": false, + "DEL": false, }, + nil, }, } @@ -125,6 +148,10 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { if v.reqForCmd[cmd] || v.name == "CNI_COMMAND" { argsMissing = append(argsMissing, v.name) } + } else if v.reqForCmd[cmd] && v.validateFn != nil { + if err := v.validateFn(*v.val); err != nil { + return "", nil, err + } } } @@ -137,18 +164,25 @@ func (t *dispatcher) getCmdArgsFromEnv() (string, *CmdArgs, *types.Error) { t.Stdin = bytes.NewReader(nil) } - stdinData, err := ioutil.ReadAll(t.Stdin) + stdinData, err := io.ReadAll(t.Stdin) if err != nil { return "", nil, types.NewError(types.ErrIOFailure, fmt.Sprintf("error reading from stdin: %v", err), "") } + if cmd != "VERSION" { + if err := validateConfig(stdinData); err != nil { + return "", nil, err + } + } + cmdArgs := &CmdArgs{ - ContainerID: contID, - Netns: netns, - IfName: ifName, - Args: args, - Path: path, - StdinData: stdinData, + ContainerID: contID, + Netns: netns, + IfName: ifName, + Args: args, + Path: path, + StdinData: stdinData, + NetnsOverride: netnsOverride, } return cmd, cmdArgs, nil } @@ -163,8 +197,13 @@ func (t *dispatcher) checkVersionAndCall(cmdArgs *CmdArgs, pluginVersionInfo ver return types.NewError(types.ErrIncompatibleCNIVersion, "incompatible CNI versions", verErr.Details()) } + if toCall == nil { + return nil + } + if err = toCall(cmdArgs); err != nil { - if e, ok := err.(*types.Error); ok { + var e *types.Error + if errors.As(err, &e) { // don't wrap Error in Error return e } @@ -190,7 +229,7 @@ func validateConfig(jsonBytes []byte) *types.Error { return nil } -func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) *types.Error { +func (t *dispatcher) pluginMain(funcs CNIFuncs, versionInfo version.PluginInfo, about string) *types.Error { cmd, cmdArgs, err := t.getCmdArgsFromEnv() if err != nil { // Print the about string to stderr when no command is set @@ -202,21 +241,20 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, return err } - if cmd != "VERSION" { - if err = validateConfig(cmdArgs.StdinData); err != nil { - return err - } - if err = utils.ValidateContainerID(cmdArgs.ContainerID); err != nil { + switch cmd { + case "ADD": + err = t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Add) + if err != nil { return err } - if err = utils.ValidateInterfaceName(cmdArgs.IfName); err != nil { - return err + if strings.ToUpper(cmdArgs.NetnsOverride) != "TRUE" && cmdArgs.NetnsOverride != "1" { + isPluginNetNS, checkErr := ns.CheckNetNS(cmdArgs.Netns) + if checkErr != nil { + return checkErr + } else if isPluginNetNS { + return types.NewError(types.ErrInvalidNetNS, "plugin's netns and netns from CNI_NETNS should not be the same", "") + } } - } - - switch cmd { - case "ADD": - err = t.checkVersionAndCall(cmdArgs, versionInfo, cmdAdd) case "CHECK": configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) if err != nil { @@ -232,7 +270,7 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, if err != nil { return types.NewError(types.ErrDecodingFailure, err.Error(), "") } else if gtet { - if err := t.checkVersionAndCall(cmdArgs, versionInfo, cmdCheck); err != nil { + if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Check); err != nil { return err } return nil @@ -240,7 +278,62 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, } return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow CHECK", "") case "DEL": - err = t.checkVersionAndCall(cmdArgs, versionInfo, cmdDel) + err = t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Del) + if err != nil { + return err + } + if strings.ToUpper(cmdArgs.NetnsOverride) != "TRUE" && cmdArgs.NetnsOverride != "1" { + isPluginNetNS, checkErr := ns.CheckNetNS(cmdArgs.Netns) + if checkErr != nil { + return checkErr + } else if isPluginNetNS { + return types.NewError(types.ErrInvalidNetNS, "plugin's netns and netns from CNI_NETNS should not be the same", "") + } + } + case "GC": + configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } + if gtet, err := version.GreaterThanOrEqualTo(configVersion, "1.1.0"); err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if !gtet { + return types.NewError(types.ErrIncompatibleCNIVersion, "config version does not allow GC", "") + } + for _, pluginVersion := range versionInfo.SupportedVersions() { + gtet, err := version.GreaterThanOrEqualTo(pluginVersion, configVersion) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if gtet { + if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.GC); err != nil { + return err + } + return nil + } + } + return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow GC", "") + case "STATUS": + configVersion, err := t.ConfVersionDecoder.Decode(cmdArgs.StdinData) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } + if gtet, err := version.GreaterThanOrEqualTo(configVersion, "1.1.0"); err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if !gtet { + return types.NewError(types.ErrIncompatibleCNIVersion, "config version does not allow STATUS", "") + } + for _, pluginVersion := range versionInfo.SupportedVersions() { + gtet, err := version.GreaterThanOrEqualTo(pluginVersion, configVersion) + if err != nil { + return types.NewError(types.ErrDecodingFailure, err.Error(), "") + } else if gtet { + if err := t.checkVersionAndCall(cmdArgs, versionInfo, funcs.Status); err != nil { + return err + } + return nil + } + } + return types.NewError(types.ErrIncompatibleCNIVersion, "plugin version does not allow STATUS", "") case "VERSION": if err := versionInfo.Encode(t.Stdout); err != nil { return types.NewError(types.ErrIOFailure, err.Error(), "") @@ -264,13 +357,63 @@ func (t *dispatcher) pluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, // // To let this package automatically handle errors and call os.Exit(1) for you, // use PluginMain() instead. +// +// Deprecated: Use github.com/containernetworking/cni/pkg/skel.PluginMainFuncsWithError instead. func PluginMainWithError(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) *types.Error { + return PluginMainFuncsWithError(CNIFuncs{Add: cmdAdd, Check: cmdCheck, Del: cmdDel}, versionInfo, about) +} + +// CNIFuncs contains a group of callback command funcs to be passed in as +// parameters to the core "main" for a plugin. +type CNIFuncs struct { + Add func(_ *CmdArgs) error + Del func(_ *CmdArgs) error + Check func(_ *CmdArgs) error + GC func(_ *CmdArgs) error + Status func(_ *CmdArgs) error +} + +// PluginMainFuncsWithError is the core "main" for a plugin. It accepts +// callback functions defined within CNIFuncs and returns an error. +// +// The caller must also specify what CNI spec versions the plugin supports. +// +// It is the responsibility of the caller to check for non-nil error return. +// +// For a plugin to comply with the CNI spec, it must print any error to stdout +// as JSON and then exit with nonzero status code. +// +// To let this package automatically handle errors and call os.Exit(1) for you, +// use PluginMainFuncs() instead. +func PluginMainFuncsWithError(funcs CNIFuncs, versionInfo version.PluginInfo, about string) *types.Error { return (&dispatcher{ Getenv: os.Getenv, Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, - }).pluginMain(cmdAdd, cmdCheck, cmdDel, versionInfo, about) + }).pluginMain(funcs, versionInfo, about) +} + +// PluginMainFuncs is the core "main" for a plugin which includes automatic error handling. +// This is a newer alternative func to PluginMain which abstracts CNI commands within a +// CNIFuncs interface. +// +// The caller must also specify what CNI spec versions the plugin supports. +// +// The caller can specify an "about" string, which is printed on stderr +// when no CNI_COMMAND is specified. The recommended output is "CNI plugin v" +// +// When an error occurs in any func in CNIFuncs, PluginMainFuncs will print the error +// as JSON to stdout and call os.Exit(1). +// +// To have more control over error handling, use PluginMainFuncsWithError() instead. +func PluginMainFuncs(funcs CNIFuncs, versionInfo version.PluginInfo, about string) { + if e := PluginMainFuncsWithError(funcs, versionInfo, about); e != nil { + if err := e.Print(); err != nil { + log.Print("Error writing error JSON to stdout: ", err) + } + os.Exit(1) + } } // PluginMain is the core "main" for a plugin which includes automatic error handling. @@ -284,6 +427,8 @@ func PluginMainWithError(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versio // as JSON to stdout and call os.Exit(1). // // To have more control over error handling, use PluginMainWithError() instead. +// +// Deprecated: Use github.com/containernetworking/cni/pkg/skel.PluginMainFuncs instead. func PluginMain(cmdAdd, cmdCheck, cmdDel func(_ *CmdArgs) error, versionInfo version.PluginInfo, about string) { if e := PluginMainWithError(cmdAdd, cmdCheck, cmdDel, versionInfo, about); e != nil { if err := e.Print(); err != nil { diff --git a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go index 0e1e8b857b..f58b91206d 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go @@ -26,9 +26,10 @@ import ( convert "github.com/containernetworking/cni/pkg/types/internal" ) -const ImplementedSpecVersion string = "1.0.0" +// The types did not change between v1.0 and v1.1 +const ImplementedSpecVersion string = "1.1.0" -var supportedVersions = []string{ImplementedSpecVersion} +var supportedVersions = []string{"1.0.0", "1.1.0"} // Register converters for all versions less than the implemented spec version func init() { @@ -38,10 +39,14 @@ func init() { convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x) convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x) convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x) + convert.RegisterConverter("1.0.0", []string{"1.1.0"}, convertFrom100) // Down-converters convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("1.1.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) + convert.RegisterConverter("1.1.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("1.1.0", []string{"1.0.0"}, convertFrom100) // Creator convert.RegisterCreator(supportedVersions, NewResult) @@ -90,12 +95,49 @@ type Result struct { DNS types.DNS `json:"dns,omitempty"` } +// Note: DNS should be omit if DNS is empty but default Marshal function +// will output empty structure hence need to write a Marshal function +func (r *Result) MarshalJSON() ([]byte, error) { + // use type alias to escape recursion for json.Marshal() to MarshalJSON() + type fixObjType = Result + + bytes, err := json.Marshal(fixObjType(*r)) //nolint:all + if err != nil { + return nil, err + } + + fixupObj := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &fixupObj); err != nil { + return nil, err + } + + if r.DNS.IsEmpty() { + delete(fixupObj, "dns") + } + + return json.Marshal(fixupObj) +} + +// convertFrom100 does nothing except set the version; the types are the same +func convertFrom100(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + + result := &Result{ + CNIVersion: toVersion, + Interfaces: fromResult.Interfaces, + IPs: fromResult.IPs, + Routes: fromResult.Routes, + DNS: fromResult.DNS, + } + return result, nil +} + func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { result040, err := convert.Convert(from, "0.4.0") if err != nil { return nil, err } - result100, err := convertFrom04x(result040, ImplementedSpecVersion) + result100, err := convertFrom04x(result040, toVersion) if err != nil { return nil, err } @@ -226,9 +268,12 @@ func (r *Result) PrintTo(writer io.Writer) error { // Interface contains values about the created interfaces type Interface struct { - Name string `json:"name"` - Mac string `json:"mac,omitempty"` - Sandbox string `json:"sandbox,omitempty"` + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Mtu int `json:"mtu,omitempty"` + Sandbox string `json:"sandbox,omitempty"` + SocketPath string `json:"socketPath,omitempty"` + PciID string `json:"pciID,omitempty"` } func (i *Interface) String() string { diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go index 7516f03ef5..68a602bfdb 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/args.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -26,8 +26,8 @@ import ( type UnmarshallableBool bool // UnmarshalText implements the encoding.TextUnmarshaler interface. -// Returns boolean true if the string is "1" or "[Tt]rue" -// Returns boolean false if the string is "0" or "[Ff]alse" +// Returns boolean true if the string is "1" or "true" or "True" +// Returns boolean false if the string is "0" or "false" or "False” func (b *UnmarshallableBool) UnmarshalText(data []byte) error { s := strings.ToLower(string(data)) switch s { diff --git a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go index ed28b33e8e..452cb62201 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go @@ -19,6 +19,9 @@ import ( "fmt" "github.com/containernetworking/cni/pkg/types" + _ "github.com/containernetworking/cni/pkg/types/020" + _ "github.com/containernetworking/cni/pkg/types/040" + _ "github.com/containernetworking/cni/pkg/types/100" convert "github.com/containernetworking/cni/pkg/types/internal" ) diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go index fba17dfc0f..8453bb5d87 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -56,30 +56,73 @@ func (n *IPNet) UnmarshalJSON(data []byte) error { return nil } -// NetConf describes a network. -type NetConf struct { +// NetConfType describes a network. +type NetConfType struct { CNIVersion string `json:"cniVersion,omitempty"` Name string `json:"name,omitempty"` Type string `json:"type,omitempty"` Capabilities map[string]bool `json:"capabilities,omitempty"` IPAM IPAM `json:"ipam,omitempty"` - DNS DNS `json:"dns"` + DNS DNS `json:"dns,omitempty"` RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` PrevResult Result `json:"-"` + + // ValidAttachments is only supplied when executing a GC operation + ValidAttachments []GCAttachment `json:"cni.dev/valid-attachments,omitempty"` +} + +// NetConf is defined as different type as custom MarshalJSON() and issue #1096 +type NetConf NetConfType + +// GCAttachment is the parameters to a GC call -- namely, +// the container ID and ifname pair that represents a +// still-valid attachment. +type GCAttachment struct { + ContainerID string `json:"containerID"` + IfName string `json:"ifname"` +} + +// Note: DNS should be omit if DNS is empty but default Marshal function +// will output empty structure hence need to write a Marshal function +func (n *NetConfType) MarshalJSON() ([]byte, error) { + // use type alias to escape recursion for json.Marshal() to MarshalJSON() + type fixObjType = NetConf + + bytes, err := json.Marshal(fixObjType(*n)) + if err != nil { + return nil, err + } + + fixupObj := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &fixupObj); err != nil { + return nil, err + } + + if n.DNS.IsEmpty() { + delete(fixupObj, "dns") + } + + return json.Marshal(fixupObj) } type IPAM struct { Type string `json:"type,omitempty"` } +// IsEmpty returns true if IPAM structure has no value, otherwise return false +func (i *IPAM) IsEmpty() bool { + return i.Type == "" +} + // NetConfList describes an ordered list of networks. type NetConfList struct { CNIVersion string `json:"cniVersion,omitempty"` Name string `json:"name,omitempty"` DisableCheck bool `json:"disableCheck,omitempty"` + DisableGC bool `json:"disableGC,omitempty"` Plugins []*NetConf `json:"plugins,omitempty"` } @@ -116,31 +159,48 @@ type DNS struct { Options []string `json:"options,omitempty"` } +// IsEmpty returns true if DNS structure has no value, otherwise return false +func (d *DNS) IsEmpty() bool { + if len(d.Nameservers) == 0 && d.Domain == "" && len(d.Search) == 0 && len(d.Options) == 0 { + return true + } + return false +} + func (d *DNS) Copy() *DNS { if d == nil { return nil } to := &DNS{Domain: d.Domain} - for _, ns := range d.Nameservers { - to.Nameservers = append(to.Nameservers, ns) - } - for _, s := range d.Search { - to.Search = append(to.Search, s) - } - for _, o := range d.Options { - to.Options = append(to.Options, o) - } + to.Nameservers = append(to.Nameservers, d.Nameservers...) + to.Search = append(to.Search, d.Search...) + to.Options = append(to.Options, d.Options...) return to } type Route struct { - Dst net.IPNet - GW net.IP + Dst net.IPNet + GW net.IP + MTU int + AdvMSS int + Priority int + Table *int + Scope *int } func (r *Route) String() string { - return fmt.Sprintf("%+v", *r) + table := "" + if r.Table != nil { + table = fmt.Sprintf("%d", *r.Table) + } + + scope := "" + if r.Scope != nil { + scope = fmt.Sprintf("%d", *r.Scope) + } + + return fmt.Sprintf("{Dst:%+v GW:%v MTU:%d AdvMSS:%d Priority:%d Table:%s Scope:%s}", r.Dst, r.GW, r.MTU, r.AdvMSS, r.Priority, table, scope) } func (r *Route) Copy() *Route { @@ -148,14 +208,30 @@ func (r *Route) Copy() *Route { return nil } - return &Route{ - Dst: r.Dst, - GW: r.GW, + route := &Route{ + Dst: r.Dst, + GW: r.GW, + MTU: r.MTU, + AdvMSS: r.AdvMSS, + Priority: r.Priority, + Scope: r.Scope, + } + + if r.Table != nil { + table := *r.Table + route.Table = &table } + + if r.Scope != nil { + scope := *r.Scope + route.Scope = &scope + } + + return route } // Well known error codes -// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +// see https://github.com/containernetworking/cni/blob/main/SPEC.md#well-known-error-codes const ( ErrUnknown uint = iota // 0 ErrIncompatibleCNIVersion // 1 @@ -165,6 +241,7 @@ const ( ErrIOFailure // 5 ErrDecodingFailure // 6 ErrInvalidNetworkConfig // 7 + ErrInvalidNetNS // 8 ErrTryAgainLater uint = 11 ErrInternal uint = 999 ) @@ -200,8 +277,13 @@ func (e *Error) Print() error { // JSON (un)marshallable types type route struct { - Dst IPNet `json:"dst"` - GW net.IP `json:"gw,omitempty"` + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` + MTU int `json:"mtu,omitempty"` + AdvMSS int `json:"advmss,omitempty"` + Priority int `json:"priority,omitempty"` + Table *int `json:"table,omitempty"` + Scope *int `json:"scope,omitempty"` } func (r *Route) UnmarshalJSON(data []byte) error { @@ -212,13 +294,24 @@ func (r *Route) UnmarshalJSON(data []byte) error { r.Dst = net.IPNet(rt.Dst) r.GW = rt.GW + r.MTU = rt.MTU + r.AdvMSS = rt.AdvMSS + r.Priority = rt.Priority + r.Table = rt.Table + r.Scope = rt.Scope + return nil } func (r Route) MarshalJSON() ([]byte, error) { rt := route{ - Dst: IPNet(r.Dst), - GW: r.GW, + Dst: IPNet(r.Dst), + GW: r.GW, + MTU: r.MTU, + AdvMSS: r.AdvMSS, + Priority: r.Priority, + Table: r.Table, + Scope: r.Scope, } return json.Marshal(rt) diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go index b8ec388745..1981d25569 100644 --- a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go +++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -36,7 +36,6 @@ var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) // ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters func ValidateContainerID(containerID string) *types.Error { - if containerID == "" { return types.NewError(types.ErrUnknownContainer, "missing containerID", "") } @@ -48,7 +47,6 @@ func ValidateContainerID(containerID string) *types.Error { // ValidateNetworkName will validate that the supplied networkName does not contain invalid characters func ValidateNetworkName(networkName string) *types.Error { - if networkName == "" { return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") } @@ -58,11 +56,11 @@ func ValidateNetworkName(networkName string) *types.Error { return nil } -// ValidateInterfaceName will validate the interface name based on the three rules below +// ValidateInterfaceName will validate the interface name based on the four rules below // 1. The name must not be empty // 2. The name must be less than 16 characters // 3. The name must not be "." or ".." -// 3. The name must not contain / or : or any whitespace characters +// 4. The name must not contain / or : or any whitespace characters // ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 func ValidateInterfaceName(ifName string) *types.Error { if len(ifName) == 0 { diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go index 17b22b6b0c..e3bd375bca 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -142,3 +142,27 @@ func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { } return false, nil } + +// GreaterThan returns true if the first version is greater than the second +func GreaterThan(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro > secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go index 1326f8038e..a4d442c8ec 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/version.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -19,13 +19,12 @@ import ( "fmt" "github.com/containernetworking/cni/pkg/types" - types100 "github.com/containernetworking/cni/pkg/types/100" "github.com/containernetworking/cni/pkg/types/create" ) // Current reports the version of the CNI spec implemented by this library func Current() string { - return types100.ImplementedSpecVersion + return "1.1.0" } // Legacy PluginInfo describes a plugin that is backwards compatible with the @@ -35,8 +34,10 @@ func Current() string { // // Any future CNI spec versions which meet this definition should be added to // this list. -var Legacy = PluginSupports("0.1.0", "0.2.0") -var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0") +var ( + Legacy = PluginSupports("0.1.0", "0.2.0") + All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0", "1.1.0") +) // VersionsFrom returns a list of versions starting from min, inclusive func VersionsStartingFrom(min string) PluginInfo { diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 5edd5a7ca9..9e790390b6 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,17 @@ # Change history of go-restful + +## [v3.12.0] - 2024-03-11 +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 +- better not have 2 tags on one commit + +## [v3.11.1, v3.11.2] - 2024-01-09 + +- fix by restoring custom JSON handler functions (Mike Beaumont #540) + ## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index e3e30080ec..7234604e47 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,7 +2,6 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) [![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) @@ -95,8 +94,7 @@ There are several hooks to customize the behavior of the go-restful package. - Trace logging - Compression - Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` -- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f99f..80adf55fdf 100644 --- a/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go index 66dfc824f5..9808752acd 100644 --- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go @@ -5,11 +5,18 @@ package restful // that can be found in the LICENSE file. import ( + "encoding/json" "encoding/xml" "strings" "sync" ) +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + // EntityReaderWriter can read and write values using an encoding such as JSON,XML. type EntityReaderWriter interface { // Read a serialized version of the value from the request. diff --git a/vendor/github.com/emicklei/go-restful/v3/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go deleted file mode 100644 index 871165166a..0000000000 --- a/vendor/github.com/emicklei/go-restful/v3/json.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !jsoniter - -package restful - -import "encoding/json" - -var ( - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go deleted file mode 100644 index 11b8f8ae7f..0000000000 --- a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build jsoniter - -package restful - -import "github.com/json-iterator/go" - -var ( - json = jsoniter.ConfigCompatibleWithStandardLibrary - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e94..a9b3faaa81 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -155,7 +155,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") if (method == http.MethodPost || method == http.MethodPut || - method == http.MethodPatch) && length == "" { + method == http.MethodPatch) && (length == "" || length == "0") { return nil, NewError( http.StatusUnsupportedMediaType, fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go new file mode 100644 index 0000000000..e9bb0efe77 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go @@ -0,0 +1,1385 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "encoding" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +var ds = sync.Pool{ + New: func() any { + return new(decodeState) + }, +} + +func UnmarshalWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return nil, err + } + + d.init(data) + err = d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +func UnmarshalValid(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + return d.unmarshal(v) +} + +func UnmarshalValidWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + err := d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Pointer { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v any) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext + savedError error + useNumber bool + disallowUnknownFields bool + lastKeys []string +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + err.Field = strings.Join(d.errorContext.FieldStack, ".") + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the +// common case where we're decoding a literal. The decoder scans the input +// twice, once for syntax errors and to check the length of the value, and the +// second to perform the decoding. +// +// Only in the second step do we use decodeState to tokenize literals, so we +// know there aren't any syntax errors. We can take advantage of that knowledge, +// and scan a literal's bytes much more quickly. +func (d *decodeState) rescanLiteral() { + data, i := d.data, d.off +Switch: + switch data[i-1] { + case '"': // string + for ; i < len(data); i++ { + switch data[i] { + case '\\': + i++ // escaped char + case '"': + i++ // tokenize the closing quote too + break Switch + } + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number + for ; i < len(data); i++ { + switch data[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'e', 'E', '+', '-': + default: + break Switch + } + } + case 't': // true + i += len("rue") + case 'f': // false + i += len("alse") + case 'n': // null + i += len("ull") + } + if i < len(data) { + d.opcode = stateEndValue(&d.scan, data[i]) + } else { + d.opcode = scanEnd + } + d.off = i + 1 +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() any { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Pointer { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v interface{} + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields structFields + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + fields = cachedTypeFields(t) + // ok + default: + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + + var mapElem reflect.Value + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } + + var keys []string + + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquoteBytes(item) + if !ok { + panic(phasePanicMsg) + } + + keys = append(keys, string(key)) + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := t.Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + if i, ok := fields.nameIndex[string(key)]; ok { + // Found an exact name match. + f = &fields.list[i] + } else { + // Fall back to the expensive case-insensitive + // linear search. + for i := range fields.list { + ff := &fields.list[i] + if ff.equalFold(ff.nameBytes, key) { + f = ff + break + } + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Pointer { + if subv.IsNil() { + // If a struct embeds a pointer to an unexported type, + // it is not possible to set a newly allocated value + // since the field is unexported. + // + // See https://golang.org/issue/21357 + if !subv.CanSet() { + d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem())) + // Invalidate subv to ensure d.value(subv) skips over + // the JSON value without assigning it to subv. + subv = reflect.Value{} + destring = false + break + } + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) + d.errorContext.Struct = t + } else if d.disallowUnknownFields { + d.saveError(fmt.Errorf("json: unknown field %q", key)) + } + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + if err := d.literalStore(nullLiteral, subv, false); err != nil { + return err + } + case string: + if err := d.literalStore([]byte(qv), subv, true); err != nil { + return err + } + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + if err := d.value(subv); err != nil { + return err + } + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := t.Key() + var kv reflect.Value + switch { + case reflect.PointerTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(kt) + if err := d.literalStore(item, kv, true); err != nil { + return err + } + kv = kv.Elem() + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(kt) + default: + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + default: + panic("json: Unexpected key type") // should never occur + } + } + if kv.IsValid() { + v.SetMapIndex(kv, subv) + } + } + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + + if v.Kind() == reflect.Map { + d.lastKeys = keys + } + return nil +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (any, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + isNull := item[0] == 'n' // null + u, ut, pv := indirect(v, isNull) + if u != nil { + return u.UnmarshalJSON(item) + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + val := "number" + switch item[0] { + case 'n': + val = "null" + case 't', 'f': + val = "bool" + } + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) + return nil + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + return ut.UnmarshalText(s) + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "null" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := item[0] == 't' + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "true" && string(item) != "false" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + if v.Type() == numberType && !isValidNumber(string(s)) { + return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) + } + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. + v.SetString(s) + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() (val any) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []any { + var v = make([]any, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]any { + m := make(map[string]any) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() any { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go new file mode 100644 index 0000000000..2e6eca4487 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go @@ -0,0 +1,1486 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// So that the JSON will be safe to embed inside HTML