diff --git a/.gitignore b/.gitignore index 2d1ed11ee39..d34b9e404af 100644 --- a/.gitignore +++ b/.gitignore @@ -4,8 +4,11 @@ *~ _book/ lib/jemalloc +cmake-build-debug/ tests/internal/flb_tests_internal.h tests/runtime/flb_tests_runtime.h +tests/internal/cmake-build-debug/ +tests/runtime/cmake-build-debug/ build/* include/fluent-bit/flb_info.h include/fluent-bit/flb_plugins.h diff --git a/include/fluent-bit/flb_hash.h b/include/fluent-bit/flb_hash.h index 29b9f7b8ef5..42666c75433 100644 --- a/include/fluent-bit/flb_hash.h +++ b/include/fluent-bit/flb_hash.h @@ -55,6 +55,7 @@ struct flb_hash { int max_entries; int total_count; int cache_ttl; + int force_remove_pointer; size_t size; struct mk_list entries; struct flb_hash_table *table; @@ -63,6 +64,8 @@ struct flb_hash { struct flb_hash *flb_hash_create(int evict_mode, size_t size, int max_entries); struct flb_hash *flb_hash_create_with_ttl(int cache_ttl, int evict_mode, size_t size, int max_entries); +struct flb_hash *flb_hash_create_with_ttl_force_destroy(int cache_ttl, int evict_mode, + size_t size, int max_entries); void flb_hash_destroy(struct flb_hash *ht); int flb_hash_add(struct flb_hash *ht, diff --git a/plugins/filter_kubernetes/kube_conf.c b/plugins/filter_kubernetes/kube_conf.c index eb409fcee54..7acfaacc23f 100644 --- a/plugins/filter_kubernetes/kube_conf.c +++ b/plugins/filter_kubernetes/kube_conf.c @@ -99,6 +99,12 @@ struct flb_kube *flb_kube_conf_create(struct flb_filter_instance *ins, ctx->api_https = FLB_FALSE; } + if (ctx->use_pod_association) { + ctx->kubernetes_api_host = flb_strdup(FLB_API_HOST); + ctx->kubernetes_api_port = FLB_API_PORT; + } + + } else if (!url) { ctx->api_host = flb_strdup(FLB_API_HOST); @@ -190,6 +196,12 @@ struct flb_kube *flb_kube_conf_create(struct flb_filter_instance *ins, flb_plg_info(ctx->ins, "https=%i host=%s port=%i", ctx->api_https, ctx->api_host, ctx->api_port); } + + + ctx->pod_hash_table = flb_hash_create_with_ttl_force_destroy(ctx->pod_service_map_ttl, + FLB_HASH_EVICT_OLDER, + FLB_HASH_TABLE_SIZE, + FLB_HASH_TABLE_SIZE); return ctx; } @@ -203,6 +215,10 @@ void flb_kube_conf_destroy(struct flb_kube *ctx) flb_hash_destroy(ctx->hash_table); } + if (ctx->pod_hash_table) { + flb_hash_destroy(ctx->pod_hash_table); + } + if (ctx->merge_log == FLB_TRUE) { flb_free(ctx->unesc_buf); } @@ -211,6 +227,9 @@ void flb_kube_conf_destroy(struct flb_kube *ctx) if (ctx->parser == NULL && ctx->regex) { flb_regex_destroy(ctx->regex); } + if (ctx->deploymentRegex) { + flb_regex_destroy(ctx->deploymentRegex); + } flb_free(ctx->api_host); flb_free(ctx->token); @@ -222,6 +241,24 @@ void flb_kube_conf_destroy(struct flb_kube *ctx) flb_upstream_destroy(ctx->upstream); } + if(ctx->pod_association_tls) { + flb_tls_destroy(ctx->pod_association_tls); + } + + if (ctx->pod_association_upstream) { + flb_upstream_destroy(ctx->pod_association_upstream); + } + + if (ctx->kubernetes_upstream) { + flb_upstream_destroy(ctx->kubernetes_upstream); + } + if (ctx->kubernetes_api_host) { + flb_free(ctx->kubernetes_api_host); + } + if (ctx->platform) { + flb_free(ctx->platform); + } + #ifdef FLB_HAVE_TLS if (ctx->tls) { flb_tls_destroy(ctx->tls); diff --git a/plugins/filter_kubernetes/kube_conf.h b/plugins/filter_kubernetes/kube_conf.h index 31bc8015408..507fa39d0ee 100644 --- a/plugins/filter_kubernetes/kube_conf.h +++ b/plugins/filter_kubernetes/kube_conf.h @@ -64,8 +64,40 @@ #define FLB_KUBE_TAG_PREFIX "kube.var.log.containers." #endif +/* + * Maximum attribute length for Entity's KeyAttributes + * values + * https://docs.aws.amazon.com/applicationsignals/latest/APIReference/API_Service.html#:~:text=Maximum%20length%20of%201024. + */ +#define KEY_ATTRIBUTES_MAX_LEN 1024 +#define SERVICE_NAME_SOURCE_MAX_LEN 64 + +/* + * Configmap used for verifying whether if FluentBit is + * on EKS or native Kubernetes + */ +#define KUBE_SYSTEM_NAMESPACE "kube-system" +#define AWS_AUTH_CONFIG_MAP "aws-auth" + +/* + * Possible platform values for Kubernetes plugin + */ +#define NATIVE_KUBERNETES_PLATFORM "k8s" +#define EKS_PLATFORM "eks" + struct kube_meta; +struct service_attributes { + char name[KEY_ATTRIBUTES_MAX_LEN]; + int name_len; + char environment[KEY_ATTRIBUTES_MAX_LEN]; + int environment_len; + char name_source[SERVICE_NAME_SOURCE_MAX_LEN]; + int name_source_len; + int fields; + +}; + /* Filter context */ struct flb_kube { /* Configuration parameters */ @@ -119,6 +151,7 @@ struct flb_kube { /* Regex context to parse records */ struct flb_regex *regex; + struct flb_regex *deploymentRegex; struct flb_parser *parser; /* TLS CA certificate file */ @@ -158,6 +191,45 @@ struct flb_kube { int kube_meta_cache_ttl; + /* Configuration used for enabling pod to service name mapping*/ + int use_pod_association; + char *pod_association_host; + char *pod_association_endpoint; + int pod_association_port; + + /* + * TTL is used to check how long should the mapped entry + * remain in the hash table + */ + struct flb_hash *pod_hash_table; + int pod_service_map_ttl; + int pod_service_map_refresh_interval; + flb_sds_t pod_service_preload_cache_path; + struct flb_upstream *pod_association_upstream; + /* + * This connection is used for calling Kubernetes configmaps + * endpoint so pod association can determine the environment. + * Example: EKS or Native Kubernetes. + */ + char *kubernetes_api_host; + int kubernetes_api_port; + struct flb_upstream *kubernetes_upstream; + char *platform; + /* + * This value is used for holding the platform config + * value. Platform will be overriden with this variable + * if it's set + */ + char *set_platform; + + //Agent TLS certs + struct flb_tls *pod_association_tls; + char *pod_association_host_server_ca_file; + char *pod_association_host_client_cert_file; + char *pod_association_host_client_key_file; + int pod_association_host_tls_debug; + int pod_association_host_tls_verify; + struct flb_tls *tls; struct flb_config *config; diff --git a/plugins/filter_kubernetes/kube_meta.c b/plugins/filter_kubernetes/kube_meta.c index 35067ce0580..077324835cc 100644 --- a/plugins/filter_kubernetes/kube_meta.c +++ b/plugins/filter_kubernetes/kube_meta.c @@ -346,8 +346,9 @@ static int get_meta_file_info(struct flb_kube *ctx, const char *namespace, * this could send out HTTP Request either to KUBE Server API or Kubelet */ static int get_meta_info_from_request(struct flb_kube *ctx, + struct flb_upstream *upstream, const char *namespace, - const char *podname, + const char *resource, char **buffer, size_t *size, int *root_type, char* uri) @@ -358,11 +359,11 @@ static int get_meta_info_from_request(struct flb_kube *ctx, size_t b_sent; int packed; - if (!ctx->upstream) { + if (!upstream) { return -1; } - u_conn = flb_upstream_conn_get(ctx->upstream); + u_conn = flb_upstream_conn_get(upstream); if (!u_conn) { flb_plg_error(ctx->ins, "kubelet upstream connection error"); @@ -388,9 +389,9 @@ static int get_meta_info_from_request(struct flb_kube *ctx, } ret = flb_http_do(c, &b_sent); - flb_plg_debug(ctx->ins, "Request (ns=%s, pod=%s) http_do=%i, " + flb_plg_debug(ctx->ins, "Request (ns=%s, resource=%s) http_do=%i, " "HTTP Status: %i", - namespace, podname, ret, c->resp.status); + namespace, resource, ret, c->resp.status); if (ret != 0 || c->resp.status != 200) { if (c->resp.payload_size > 0) { @@ -440,7 +441,7 @@ static int get_pods_from_kubelet(struct flb_kube *ctx, } flb_plg_debug(ctx->ins, "Send out request to Kubelet for pods information."); - packed = get_meta_info_from_request(ctx, namespace, podname, + packed = get_meta_info_from_request(ctx, ctx->upstream, namespace, podname, &buf, &size, &root_type, uri); } @@ -455,6 +456,51 @@ static int get_pods_from_kubelet(struct flb_kube *ctx, return 0; } +/* Gather metadata from API Server */ +static int get_api_server_configmap(struct flb_kube *ctx, + const char *namespace, const char *configmap, + char **out_buf, size_t *out_size) +{ + int ret; + int packed = -1; + int root_type; + char uri[1024]; + char *buf; + size_t size; + + *out_buf = NULL; + *out_size = 0; + + if (packed == -1) { + + ret = snprintf(uri, sizeof(uri) - 1, FLB_KUBE_API_CONFIGMAP_FMT, namespace, + configmap); + + if (ret == -1) { + return -1; + } + flb_plg_debug(ctx->ins, + "Send out request to API Server for configmap information"); + if(ctx->use_kubelet) { + packed = get_meta_info_from_request(ctx,ctx->kubernetes_upstream, namespace, configmap, + &buf, &size, &root_type, uri); + } else { + packed = get_meta_info_from_request(ctx,ctx->upstream, namespace, configmap, + &buf, &size, &root_type, uri); + } + } + + /* validate pack */ + if (packed == -1) { + return -1; + } + + *out_buf = buf; + *out_size = size; + + return 0; +} + /* Gather metadata from API Server */ static int get_api_server_info(struct flb_kube *ctx, const char *namespace, const char *podname, @@ -484,7 +530,7 @@ static int get_api_server_info(struct flb_kube *ctx, } flb_plg_debug(ctx->ins, "Send out request to API Server for pods information"); - packed = get_meta_info_from_request(ctx, namespace, podname, + packed = get_meta_info_from_request(ctx, ctx->upstream, namespace, podname, &buf, &size, &root_type, uri); } @@ -499,6 +545,22 @@ static int get_api_server_info(struct flb_kube *ctx, return 0; } +/* Gather pods list information from Kubelet */ +static void get_cluster_from_environment(struct flb_kube *ctx,struct flb_kube_meta *meta) +{ + if(meta->cluster == NULL) { + char* cluster_name = getenv("CLUSTER_NAME"); + if(cluster_name) { + meta->cluster = strdup(cluster_name); + meta->cluster_len = strlen(cluster_name); + meta->fields++; + } else { + free(cluster_name); + } + flb_plg_debug(ctx->ins, "Cluster name is %s.", meta->cluster); + } +} + static void cb_results(const char *name, const char *value, size_t vlen, void *data) { @@ -705,6 +767,126 @@ static void extract_container_hash(struct flb_kube_meta *meta, } } +static void cb_results_workload(const char *name, const char *value, + size_t vlen, void *data) +{ + if (name == NULL || value == NULL || vlen == 0 || data == NULL) { + return; + } + + struct flb_kube_meta *meta = data; + + if (meta->workload == NULL && strcmp(name, "deployment") == 0) { + meta->workload = flb_strndup(value, vlen); + meta->workload_len = vlen; + meta->fields++; + } +} + +/* + * Search workload based on the following priority + * where the top is highest priority + * 1. Deployment name + * 2. StatefulSet name + * 3. DaemonSet name + * 4. Job name + * 5. CronJob name + * 6. Pod name + * 7. Container name + */ +static void search_workload(struct flb_kube_meta *meta,struct flb_kube *ctx,msgpack_object map) +{ + int i,j,ownerIndex; + int regex_found; + int replicaset_match; + int podname_match = FLB_FALSE; + int workload_found = FLB_FALSE; + msgpack_object k, v; + msgpack_object_map ownerMap; + struct flb_regex_search result; + /* Temporary variable to store the workload value */ + msgpack_object workload_val; + + for (i = 0; i < map.via.map.size; i++) { + + k = map.via.map.ptr[i].key; + v = map.via.map.ptr[i].val; + if (strncmp(k.via.str.ptr, "name", k.via.str.size) == 0) { + + if (!strncmp(v.via.str.ptr, meta->podname, v.via.str.size)) { + podname_match = FLB_TRUE; + } + + } + /* Example JSON for the below parsing: + * "ownerReferences": [ + { + "apiVersion": "apps/v1", + "kind": "ReplicaSet", + "name": "my-replicaset", + "uid": "abcd1234-5678-efgh-ijkl-9876mnopqrst", + "controller": true, + "blockOwnerDeletion": true + } + ]*/ + if (podname_match && strncmp(k.via.str.ptr, "ownerReferences", k.via.str.size) == 0 && v.type == MSGPACK_OBJECT_ARRAY) { + for (j = 0; j < v.via.array.size; j++) { + if (v.via.array.ptr[j].type == MSGPACK_OBJECT_MAP) { + ownerMap = v.via.array.ptr[j].via.map; + for (ownerIndex = 0; ownerIndex < ownerMap.size; ownerIndex++) { + msgpack_object key = ownerMap.ptr[ownerIndex].key; + msgpack_object val = ownerMap.ptr[ownerIndex].val; + + /* Ensure both key and value are strings */ + if (key.type == MSGPACK_OBJECT_STR && val.type == MSGPACK_OBJECT_STR) { + if (strncmp(key.via.str.ptr, "kind", key.via.str.size) == 0 && strncmp(val.via.str.ptr, "ReplicaSet", val.via.str.size) == 0) { + replicaset_match = FLB_TRUE; + } + + if (strncmp(key.via.str.ptr, "name", key.via.str.size) == 0) { + /* Store the value of 'name' in workload_val so it can be reused by set_workload */ + workload_val = val; + workload_found = FLB_TRUE; + if (replicaset_match) { + regex_found = flb_regex_do(ctx->deploymentRegex, val.via.str.ptr, val.via.str.size, &result); + if (regex_found > 0) { + /* Parse regex results */ + flb_regex_parse(ctx->deploymentRegex, &result, cb_results_workload, meta); + } else { + /* Set workload if regex does not match */ + goto set_workload; + } + } else { + /* Set workload if not a replicaset match */ + goto set_workload; + } + } + } + } + } + } + } + } + if(!workload_found) { + if(meta->podname != NULL) { + meta->workload = flb_strndup(meta->podname, meta->podname_len); + meta->workload_len = meta->podname_len; + meta->fields++; + } else if (meta->container_name != NULL) { + meta->workload = flb_strndup(meta->container_name, meta->container_name_len); + meta->workload_len = meta->container_name_len; + meta->fields++; + } + } + +return; + +set_workload: + meta->workload = flb_strndup(workload_val.via.str.ptr, workload_val.via.str.size); + meta->workload_len = workload_val.via.str.size; + meta->fields++; +} + static int search_podname_and_namespace(struct flb_kube_meta *meta, struct flb_kube *ctx, msgpack_object map) @@ -846,6 +1028,26 @@ static int search_item_in_items(struct flb_kube_meta *meta, return ret; } +static char* find_fallback_environment(struct flb_kube *ctx, struct flb_kube_meta *meta) { + char *fallback_env = NULL; + + /* + * Possible fallback environments: + * 1. eks:cluster-name/namespace + * 2. k8s:cluster-name/namespace + */ + if(ctx->platform == NULL && ctx->set_platform != NULL) { + ctx->platform = flb_strdup(ctx->set_platform); + } + if (ctx->platform != NULL && meta->cluster != NULL && meta->namespace != NULL) { + int ret = asprintf(&fallback_env, "%s:%s/%s", ctx->platform, meta->cluster, meta->namespace); + if (ret == -1) { + return NULL; + } + return fallback_env; + } + return NULL; +} static int merge_meta_from_tag(struct flb_kube *ctx, struct flb_kube_meta *meta, char **out_buf, size_t *out_size) @@ -917,7 +1119,9 @@ static int merge_meta(struct flb_kube_meta *meta, struct flb_kube *ctx, int have_labels = -1; int have_annotations = -1; int have_nodename = -1; + int pod_service_found = -1; size_t off = 0; + size_t tmp_service_attr_size = 0; msgpack_sbuffer mp_sbuf; msgpack_packer mp_pck; @@ -932,7 +1136,7 @@ static int merge_meta(struct flb_kube_meta *meta, struct flb_kube *ctx, msgpack_object api_map; msgpack_object ann_map; struct flb_kube_props props = {0}; - + struct service_attributes *tmp_service_attributes = {0}; /* * - reg_buf: is a msgpack Map containing meta captured using Regex * @@ -988,6 +1192,9 @@ static int merge_meta(struct flb_kube_meta *meta, struct flb_kube *ctx, k = api_map.via.map.ptr[i].key; if (k.via.str.size == 8 && !strncmp(k.via.str.ptr, "metadata", 8)) { meta_val = api_map.via.map.ptr[i].val; + if(ctx ->use_pod_association) { + search_workload(meta,ctx,meta_val); + } if (meta_val.type == MSGPACK_OBJECT_MAP) { meta_found = FLB_TRUE; } @@ -1053,6 +1260,29 @@ static int merge_meta(struct flb_kube_meta *meta, struct flb_kube *ctx, } } } + int fallback_environment_len = 0; + char *fallback_environment = NULL; + if(ctx->use_pod_association) { + fallback_environment = find_fallback_environment(ctx,meta); + if(fallback_environment) { + fallback_environment_len = strlen(fallback_environment); + } + pod_service_found = flb_hash_get(ctx->pod_hash_table, + meta->podname, meta->podname_len, + &tmp_service_attributes, &tmp_service_attr_size); + if (pod_service_found != -1 && tmp_service_attributes != NULL) { + map_size += tmp_service_attributes->fields; + } + if(pod_service_found != -1 && tmp_service_attributes != NULL && tmp_service_attributes->environment[0] == '\0' && fallback_environment) { + map_size++; + } + if(pod_service_found == -1 && meta->workload != NULL && fallback_environment) { + map_size++; + } + if(ctx->platform) { + map_size++; + } + } /* Set map size: current + pod_id, labels and annotations */ map_size += meta->fields; @@ -1071,6 +1301,58 @@ static int merge_meta(struct flb_kube_meta *meta, struct flb_kube *ctx, msgpack_pack_str(&mp_pck, meta->namespace_len); msgpack_pack_str_body(&mp_pck, meta->namespace, meta->namespace_len); } + if(ctx->use_pod_association) { + if (pod_service_found != -1 && tmp_service_attributes != NULL) { + if (tmp_service_attributes->name[0] != '\0') { + msgpack_pack_str(&mp_pck, 12); + msgpack_pack_str_body(&mp_pck, "service_name", 12); + msgpack_pack_str(&mp_pck, tmp_service_attributes->name_len); + msgpack_pack_str_body(&mp_pck, tmp_service_attributes->name, tmp_service_attributes->name_len); + } + if (tmp_service_attributes->environment[0] != '\0') { + msgpack_pack_str(&mp_pck, 11); + msgpack_pack_str_body(&mp_pck, "environment", 11); + msgpack_pack_str(&mp_pck, tmp_service_attributes->environment_len); + msgpack_pack_str_body(&mp_pck, tmp_service_attributes->environment, tmp_service_attributes->environment_len); + } else if(tmp_service_attributes->environment[0] == '\0' && fallback_environment) { + msgpack_pack_str(&mp_pck, 11); + msgpack_pack_str_body(&mp_pck, "environment", 11); + msgpack_pack_str(&mp_pck, fallback_environment_len); + msgpack_pack_str_body(&mp_pck, fallback_environment, fallback_environment_len); + } + if (tmp_service_attributes->name_source[0] != '\0') { + msgpack_pack_str(&mp_pck, 11); + msgpack_pack_str_body(&mp_pck, "name_source", 11); + msgpack_pack_str(&mp_pck, tmp_service_attributes->name_source_len); + msgpack_pack_str_body(&mp_pck, tmp_service_attributes->name_source, tmp_service_attributes->name_source_len); + } + } else if ( pod_service_found == -1 && meta->workload != NULL && fallback_environment) { + msgpack_pack_str(&mp_pck, 11); + msgpack_pack_str_body(&mp_pck, "environment", 11); + msgpack_pack_str(&mp_pck, fallback_environment_len); + msgpack_pack_str_body(&mp_pck, fallback_environment, fallback_environment_len); + } + + if(ctx->platform != NULL) { + int platform_len = strlen(ctx->platform); + msgpack_pack_str(&mp_pck, 8); + msgpack_pack_str_body(&mp_pck, "platform", 8); + msgpack_pack_str(&mp_pck, platform_len); + msgpack_pack_str_body(&mp_pck, ctx->platform, platform_len); + } + if (meta->cluster != NULL) { + msgpack_pack_str(&mp_pck, 7); + msgpack_pack_str_body(&mp_pck, "cluster", 7); + msgpack_pack_str(&mp_pck, meta->cluster_len); + msgpack_pack_str_body(&mp_pck, meta->cluster, meta->cluster_len); + } + if (meta->workload != NULL) { + msgpack_pack_str(&mp_pck, 8); + msgpack_pack_str_body(&mp_pck, "workload", 8); + msgpack_pack_str(&mp_pck, meta->workload_len); + msgpack_pack_str_body(&mp_pck, meta->workload, meta->workload_len); + } + } /* Append API Server content */ if (have_uid >= 0) { @@ -1178,6 +1460,10 @@ static int merge_meta(struct flb_kube_meta *meta, struct flb_kube *ctx, *out_buf = mp_sbuf.data; *out_size = mp_sbuf.size; + if(fallback_environment) { + flb_free(fallback_environment); + } + return 0; } @@ -1186,8 +1472,9 @@ static inline int extract_meta(struct flb_kube *ctx, const char *data, size_t data_size, struct flb_kube_meta *meta) { - int i; + int i, pod_service_found; size_t off = 0; + size_t tmp_service_attr_size = 0; ssize_t n; int kube_tag_len; const char *kube_tag_str; @@ -1195,6 +1482,7 @@ static inline int extract_meta(struct flb_kube *ctx, int container_found = FLB_FALSE; int container_length = 0; struct flb_regex_search result; + struct service_attributes *tmp_service_attributes = {0}; msgpack_unpacked mp_result; msgpack_object root; msgpack_object map; @@ -1282,6 +1570,23 @@ static inline int extract_meta(struct flb_kube *ctx, if (ctx->cache_use_docker_id && meta->docker_id) { n += meta->docker_id_len + 1; } + + pod_service_found = flb_hash_get(ctx->pod_hash_table, + meta->podname, meta->podname_len, + &tmp_service_attributes, &tmp_service_attr_size); + + if (pod_service_found != -1 && tmp_service_attributes != NULL) { + if (tmp_service_attributes->name[0] != '\0') { + n += tmp_service_attributes->name_len + 1; + } + if (tmp_service_attributes->environment[0] != '\0') { + n += tmp_service_attributes->environment_len + 1; + } + if (tmp_service_attributes->name_source[0] != '\0') { + n += tmp_service_attributes->name_source_len + 1; + } + } + meta->cache_key = flb_malloc(n); if (!meta->cache_key) { flb_errno(); @@ -1313,6 +1618,24 @@ static inline int extract_meta(struct flb_kube *ctx, off += meta->docker_id_len; } + if (pod_service_found != -1 && tmp_service_attributes != NULL) { + if (tmp_service_attributes->name[0] != '\0') { + meta->cache_key[off++] = ':'; + memcpy(meta->cache_key + off, tmp_service_attributes->name, tmp_service_attributes->name_len); + off += tmp_service_attributes->name_len; + } + if (tmp_service_attributes->environment[0] != '\0') { + meta->cache_key[off++] = ':'; + memcpy(meta->cache_key + off, tmp_service_attributes->environment, tmp_service_attributes->environment_len); + off += tmp_service_attributes->environment_len; + } + if (tmp_service_attributes->name_source[0] != '\0') { + meta->cache_key[off++] = ':'; + memcpy(meta->cache_key + off, tmp_service_attributes->name_source, tmp_service_attributes->name_source_len); + off += tmp_service_attributes->name_source_len; + } + } + meta->cache_key[off] = '\0'; meta->cache_key_len = off; } @@ -1334,7 +1657,9 @@ static int get_and_merge_meta(struct flb_kube *ctx, struct flb_kube_meta *meta, int ret; char *api_buf; size_t api_size; - + if(ctx->use_pod_association) { + get_cluster_from_environment(ctx, meta); + } if (ctx->use_tag_for_meta) { ret = merge_meta_from_tag(ctx, meta, out_buf, out_size); return ret; @@ -1388,11 +1713,39 @@ static int wait_for_dns(struct flb_kube *ctx) return -1; } +int flb_kube_pod_association_init(struct flb_kube *ctx, struct flb_config *config) { + ctx->pod_association_tls = flb_tls_create(ctx->pod_association_host_tls_verify, + ctx->pod_association_host_tls_debug, + NULL, NULL, + ctx->pod_association_host_server_ca_file, + ctx->pod_association_host_client_cert_file, ctx->pod_association_host_client_key_file, NULL); + if (!ctx->pod_association_tls) { + flb_plg_error(ctx->ins, "[kube_meta] could not create TLS config for pod association host"); + return -1; + } + ctx->pod_association_upstream = flb_upstream_create(config, + ctx->pod_association_host, + ctx->pod_association_port, + FLB_IO_TLS, ctx->pod_association_tls); + if (!ctx->pod_association_upstream) { + flb_plg_error(ctx->ins, "kube network init create pod association upstream failed"); + flb_tls_destroy(ctx->pod_association_tls); + ctx->pod_association_tls = NULL; + return -1; + } + flb_upstream_thread_safe(ctx->pod_association_upstream); + mk_list_init(&ctx->pod_association_upstream->_head); + return 0; +} + static int flb_kube_network_init(struct flb_kube *ctx, struct flb_config *config) { int io_type = FLB_IO_TCP; ctx->upstream = NULL; + ctx->pod_association_upstream = NULL; + ctx->pod_association_tls = NULL; + ctx->kubernetes_upstream = NULL; if (ctx->api_https == FLB_TRUE) { if (!ctx->tls_ca_path && !ctx->tls_ca_file) { @@ -1426,6 +1779,18 @@ static int flb_kube_network_init(struct flb_kube *ctx, struct flb_config *config /* Remove async flag from upstream */ ctx->upstream->flags &= ~(FLB_IO_ASYNC); + /* Continue the filter kubernetes plugin functionality if the pod_association fails */ + if(ctx->use_pod_association) { + if(ctx->use_kubelet) { + ctx->kubernetes_upstream = flb_upstream_create(config, + ctx->kubernetes_api_host, + ctx->kubernetes_api_port, + FLB_IO_TLS, + ctx->tls); + } + flb_kube_pod_association_init(ctx, config); + } + return 0; } @@ -1433,8 +1798,8 @@ static int flb_kube_network_init(struct flb_kube *ctx, struct flb_config *config int flb_kube_meta_init(struct flb_kube *ctx, struct flb_config *config) { int ret; - char *meta_buf; - size_t meta_size; + char *meta_buf, *config_buf = NULL; + size_t meta_size, config_size; if (ctx->dummy_meta == FLB_TRUE) { flb_plg_warn(ctx->ins, "using Dummy Metadata"); @@ -1482,8 +1847,22 @@ int flb_kube_meta_init(struct flb_kube *ctx, struct flb_config *config) } return -1; } + + ctx->platform = NULL; + if (ctx->use_pod_association) { + ret = get_api_server_configmap(ctx, KUBE_SYSTEM_NAMESPACE,AWS_AUTH_CONFIG_MAP, + &config_buf, &config_size); + if (ret == -1) { + ctx->platform = flb_strdup(NATIVE_KUBERNETES_PLATFORM); + } else { + ctx->platform = flb_strdup(EKS_PLATFORM); + } + } flb_plg_info(ctx->ins, "connectivity OK"); flb_free(meta_buf); + if(config_buf) { + flb_free(config_buf); + } } else { flb_plg_info(ctx->ins, "Fluent Bit not running in a POD"); @@ -1605,8 +1984,7 @@ int flb_kube_meta_get(struct flb_kube *ctx, return 0; } -int flb_kube_meta_release(struct flb_kube_meta *meta) -{ +int flb_kube_meta_release(struct flb_kube_meta *meta) { int r = 0; if (meta->namespace) { @@ -1643,5 +2021,13 @@ int flb_kube_meta_release(struct flb_kube_meta *meta) flb_free(meta->cache_key); } + if (meta->workload) { + flb_free(meta->workload); + } + + if (meta->cluster) { + flb_free(meta->cluster); + } + return r; } diff --git a/plugins/filter_kubernetes/kube_meta.h b/plugins/filter_kubernetes/kube_meta.h index fb0278afcdc..f5cfc143877 100644 --- a/plugins/filter_kubernetes/kube_meta.h +++ b/plugins/filter_kubernetes/kube_meta.h @@ -27,6 +27,7 @@ struct flb_kube; struct flb_kube_meta { int fields; + int cluster_len; int namespace_len; int podname_len; int cache_key_len; @@ -34,12 +35,15 @@ struct flb_kube_meta { int docker_id_len; int container_hash_len; int container_image_len; + int workload_len; + char *cluster; char *namespace; char *podname; char *container_name; char *container_image; char *docker_id; + char *workload; char *container_hash; /* set only on Systemd mode */ @@ -53,6 +57,7 @@ struct flb_kube_meta { #define FLB_KUBE_API_HOST "kubernetes.default.svc" #define FLB_KUBE_API_PORT 443 #define FLB_KUBE_API_FMT "/api/v1/namespaces/%s/pods/%s" +#define FLB_KUBE_API_CONFIGMAP_FMT "/api/v1/namespaces/%s/configmaps/%s" #define FLB_KUBELET_PODS "/pods" int flb_kube_meta_init(struct flb_kube *ctx, struct flb_config *config); @@ -65,5 +70,6 @@ int flb_kube_meta_get(struct flb_kube *ctx, struct flb_kube_meta *meta, struct flb_kube_props *props); int flb_kube_meta_release(struct flb_kube_meta *meta); +int flb_kube_pod_association_init(struct flb_kube *ctx, struct flb_config *config); #endif diff --git a/plugins/filter_kubernetes/kube_regex.c b/plugins/filter_kubernetes/kube_regex.c index e530ecf02e6..c0a0e308179 100644 --- a/plugins/filter_kubernetes/kube_regex.c +++ b/plugins/filter_kubernetes/kube_regex.c @@ -34,6 +34,7 @@ int flb_kube_regex_init(struct flb_kube *ctx) ctx->regex = flb_regex_create(KUBE_TAG_TO_REGEX); } } + ctx->deploymentRegex = flb_regex_create(DEPLOYMENT_REGEX); if (!ctx->regex) { return -1; diff --git a/plugins/filter_kubernetes/kube_regex.h b/plugins/filter_kubernetes/kube_regex.h index ae648fd74d6..d3dd90f764f 100644 --- a/plugins/filter_kubernetes/kube_regex.h +++ b/plugins/filter_kubernetes/kube_regex.h @@ -26,6 +26,8 @@ #define KUBE_JOURNAL_TO_REGEX "^(?[^_]+)_(?[^\\._]+)(\\.(?[^_]+))?_(?[^_]+)_(?[^_]+)_[^_]+_[^_]+$" +#define DEPLOYMENT_REGEX "^(?.+)-(?[bcdfghjklmnpqrstvwxz2456789]{6,10})$" + int flb_kube_regex_init(struct flb_kube *ctx); #endif diff --git a/plugins/filter_kubernetes/kubernetes.c b/plugins/filter_kubernetes/kubernetes.c index 259c5e6a869..54d2f6d85b4 100644 --- a/plugins/filter_kubernetes/kubernetes.c +++ b/plugins/filter_kubernetes/kubernetes.c @@ -32,12 +32,254 @@ #include #include +#include +#include /* Merge status used by merge_log_handler() */ #define MERGE_NONE 0 /* merge unescaped string in temporary buffer */ #define MERGE_PARSED 1 /* merge parsed string (log_buf) */ #define MERGE_MAP 2 /* merge direct binary object (v) */ +struct task_args { + struct flb_kube *ctx; + char *api_server_url; +}; + +pthread_mutex_t metadata_mutex; +pthread_t background_thread; +struct task_args *task_args = {0}; +struct mk_event_loop *evl; + +/* + * If a file exists called service.map, load it and use it. + * If not, fall back to API. This is primarily for unit tests purposes, + */ +static int get_pod_service_file_info(struct flb_kube *ctx, char **buffer) { + + int fd = -1; + char *payload = NULL; + size_t payload_size = 0; + struct stat sb; + int packed = -1; + int ret; + char uri[1024]; + + if (ctx->pod_service_preload_cache_path) { + + ret = snprintf(uri, sizeof(uri) - 1, "%s.map", + ctx->pod_service_preload_cache_path); + if (ret > 0) { + fd = open(uri, O_RDONLY, 0); + if (fd != -1) { + if (fstat(fd, &sb) == 0) { + payload = flb_malloc(sb.st_size); + if (!payload) { + flb_errno(); + } + else { + ret = read(fd, payload, sb.st_size); + if (ret == sb.st_size) { + payload_size = ret; + } + } + } + close(fd); + } + } + + if (payload_size) { + *buffer=payload; + packed = payload_size; + flb_plg_debug(ctx->ins, "pod to service map content is: %s", buffer); + } + } + + return packed; +} + +static void parse_pod_service_map(struct flb_kube *ctx, char *api_buf, size_t api_size) +{ + if(ctx->hash_table == NULL || ctx->pod_hash_table == NULL) { + return; + } + flb_plg_debug(ctx->ins, "started parsing pod to service map"); + + size_t off = 0; + int ret; + msgpack_unpacked api_result; + msgpack_object api_map; + msgpack_object k,v, attributeKey, attributeValue; + char *buffer; + size_t size; + int root_type; + + /* Iterate API server msgpack and lookup specific fields */ + if (api_buf != NULL) { + ret = flb_pack_json(api_buf, api_size, + &buffer, &size, &root_type); + + if (ret < 0) { + flb_plg_warn(ctx->ins, "Could not parse json response = %s", + api_buf); + flb_free(buffer); + return; + } + msgpack_unpacked_init(&api_result); + ret = msgpack_unpack_next(&api_result, buffer, size, &off); + if (ret == MSGPACK_UNPACK_SUCCESS) { + api_map = api_result.data; + for (int i = 0; i < api_map.via.map.size; i++) { + k = api_map.via.map.ptr[i].key; + v = api_map.via.map.ptr[i].val; + if (k.type == MSGPACK_OBJECT_STR && v.type == MSGPACK_OBJECT_MAP) { + char *pod_name = flb_strndup(k.via.str.ptr, k.via.str.size); + struct service_attributes *service_attributes = flb_malloc(sizeof(struct service_attributes)); + for (int j = 0; j < v.via.map.size; j++) { + attributeKey = v.via.map.ptr[j].key; + attributeValue = v.via.map.ptr[j].val; + if (attributeKey.type == MSGPACK_OBJECT_STR && attributeValue.type == MSGPACK_OBJECT_STR) { + char *attributeKeyString = flb_strndup(attributeKey.via.str.ptr, attributeKey.via.str.size); + if(strcmp(attributeKeyString, "ServiceName") == 0 && attributeValue.via.str.size < KEY_ATTRIBUTES_MAX_LEN) { + strncpy(service_attributes->name, attributeValue.via.str.ptr, attributeValue.via.str.size); + service_attributes->name[attributeValue.via.str.size] = '\0'; + service_attributes->name_len = attributeValue.via.str.size; + service_attributes->fields++; + } + if(strcmp(attributeKeyString, "Environment") == 0 && attributeValue.via.str.size < KEY_ATTRIBUTES_MAX_LEN) { + strncpy(service_attributes->environment, attributeValue.via.str.ptr,attributeValue.via.str.size); + service_attributes->environment[attributeValue.via.str.size] = '\0'; + service_attributes->environment_len = attributeValue.via.str.size; + service_attributes->fields++; + } + if(strcmp(attributeKeyString, "ServiceNameSource") == 0 && attributeValue.via.str.size < SERVICE_NAME_SOURCE_MAX_LEN) { + strncpy(service_attributes->name_source, attributeValue.via.str.ptr,attributeValue.via.str.size); + service_attributes->name_source[attributeValue.via.str.size] = '\0'; + service_attributes->name_source_len = attributeValue.via.str.size; + service_attributes->fields++; + } + flb_free(attributeKeyString); + } + } + if (service_attributes->name[0] != '\0' || service_attributes->environment[0] != '\0') { + pthread_mutex_lock(&metadata_mutex); + flb_hash_add(ctx->pod_hash_table, + pod_name,k.via.str.size, + service_attributes, 0); + pthread_mutex_unlock(&metadata_mutex); + } else { + flb_free(service_attributes); + } + flb_free(pod_name); + }else { + flb_plg_error(ctx->ins, "key and values are not string and map"); + } + } + } + } + + flb_plg_debug(ctx->ins, "ended parsing pod to service map" ); + + msgpack_unpacked_destroy(&api_result); + flb_free(buffer); +} + +static int fetch_pod_service_map(struct flb_kube *ctx, char *api_server_url) { + if(!ctx->use_pod_association) { + return -1; + } + int ret; + struct flb_http_client *c; + size_t b_sent; + struct flb_upstream_conn *u_conn; + char *buffer = {0}; + + flb_plg_debug(ctx->ins, "fetch pod to service map"); + + ret = get_pod_service_file_info(ctx, &buffer); + if (ret > 0 && buffer != NULL) { + parse_pod_service_map(ctx, buffer, ret); + flb_free(buffer); + } + else { + /* Get upstream context and connection */ + /* if block handles the TLS certificates update, as the Fluent-bit connection gets net timeout error, it destroys the upstream + * On the next call to fetch_pod_service_map, it creates a new pod association upstream with latest TLS certs */ + if (!ctx->pod_association_upstream) { + flb_plg_debug(ctx->ins, "[kubernetes] upstream object for pod association is NULL. Making a new one now"); + ret = flb_kube_pod_association_init(ctx,ctx->config); + if( ret == -1) { + return -1; + } + } + + u_conn = flb_upstream_conn_get(ctx->pod_association_upstream); + if (!u_conn) { + flb_plg_error(ctx->ins, "[kubernetes] no upstream connections available to %s:%i", + ctx->pod_association_upstream->tcp_host, ctx->pod_association_upstream->tcp_port); + flb_upstream_destroy(ctx->pod_association_upstream); + flb_tls_destroy(ctx->pod_association_tls); + ctx->pod_association_upstream = NULL; + ctx->pod_association_tls = NULL; + return -1; + } + + /* Create HTTP client */ + c = flb_http_client(u_conn, FLB_HTTP_GET, + api_server_url, + NULL, 0, ctx->pod_association_host, + ctx->pod_association_port, NULL, 0); + + if (!c) { + flb_error("[kubernetes] could not create HTTP client"); + flb_upstream_conn_release(u_conn); + flb_upstream_destroy(ctx->pod_association_upstream); + flb_tls_destroy(ctx->pod_association_tls); + ctx->pod_association_upstream = NULL; + ctx->pod_association_tls = NULL; + return -1; + } + + /* Perform HTTP request */ + ret = flb_http_do(c, &b_sent); + flb_plg_debug(ctx->ins, "Request (uri = %s) http_do=%i, " + "HTTP Status: %i", + api_server_url, ret, c->resp.status); + + if (ret != 0 || c->resp.status != 200) { + if (c->resp.payload_size > 0) { + flb_plg_debug(ctx->ins, "HTTP response : %s", + c->resp.payload); + } + flb_http_client_destroy(c); + flb_upstream_conn_release(u_conn); + return -1; + } + + /* Parse response data */ + if (c->resp.payload != NULL) { + flb_plg_debug(ctx->ins, "HTTP response payload : %s", + c->resp.payload); + parse_pod_service_map(ctx, c->resp.payload, c->resp.payload_size); + } + + /* Cleanup */ + flb_http_client_destroy(c); + flb_upstream_conn_release(u_conn); + } + return 0; +} + +void *update_pod_service_map(void *arg) { + while (1) { + flb_engine_evl_init(); + evl = mk_event_loop_create(256); + flb_engine_evl_set(evl); + fetch_pod_service_map(task_args->ctx,task_args->api_server_url); + flb_plg_debug(task_args->ctx->ins, "Updating pod to service map after %d seconds", task_args->ctx->pod_service_map_refresh_interval); + sleep(task_args->ctx->pod_service_map_refresh_interval); + } +} + static int get_stream(msgpack_object_map map) { int i; @@ -206,6 +448,23 @@ static int cb_kube_init(struct flb_filter_instance *f_ins, */ flb_kube_meta_init(ctx, config); +/* + * Init separate thread for calling pod to + * service map + */ + pthread_mutex_init(&metadata_mutex, NULL); + + if (ctx->use_pod_association) { + task_args = malloc(sizeof(struct task_args)); + task_args->ctx = ctx; + task_args->api_server_url = ctx->pod_association_endpoint; + // Start the background thread + if (pthread_create(&background_thread, NULL, update_pod_service_map, NULL) != 0) { + flb_error("Failed to create background thread"); + free(task_args); + } + } + return 0; } @@ -619,7 +878,16 @@ static int cb_kube_exit(void *data, struct flb_config *config) ctx = data; flb_kube_conf_destroy(ctx); + if (background_thread) { + pthread_cancel(background_thread); + pthread_join(background_thread, NULL); + } + pthread_mutex_destroy(&metadata_mutex); + flb_free(task_args); + if (evl) { + mk_event_loop_destroy(evl); + } return 0; } @@ -881,6 +1149,105 @@ static struct flb_config_map config_map[] = { "For example, set this value to 60 or 60s and cache entries " "which have been created more than 60s will be evicted" }, + + /* + * Enable pod to service name association logics + * This can be configured with endpoint that returns a response with the corresponding + * podname in relation to the service name. For example, if there is a pod named "petclinic-12345" + * then in order to associate a service name to pod "petclinic-12345", the JSON response to the endpoint + * must follow the below patterns + * { + * "petclinic-12345": { + * "ServiceName":"petclinic", + * "Environment":"default" + * } + * } + */ + { + FLB_CONFIG_MAP_BOOL, "use_pod_association", "false", + 0, FLB_TRUE, offsetof(struct flb_kube, use_pod_association), + "use custom endpoint to get pod to service name mapping" + }, + /* + * The host used for pod to service name association , default is 127.0.0.1 + * Will only check when "use_pod_association" config is set to true + */ + { + FLB_CONFIG_MAP_STR, "pod_association_host", "cloudwatch-agent.amazon-cloudwatch", + 0, FLB_TRUE, offsetof(struct flb_kube, pod_association_host), + "host to connect with when performing pod to service name association" + }, + /* + * The endpoint used for pod to service name association, default is /kubernetes/pod-to-service-env-map + * Will only check when "use_pod_association" config is set to true + */ + { + FLB_CONFIG_MAP_STR, "pod_association_endpoint", "/kubernetes/pod-to-service-env-map", + 0, FLB_TRUE, offsetof(struct flb_kube, pod_association_endpoint), + "endpoint to connect with when performing pod to service name association" + }, + /* + * The port for pod to service name association endpoint, default is 4311 + * Will only check when "use_pod_association" config is set to true + */ + { + FLB_CONFIG_MAP_INT, "pod_association_port", "4311", + 0, FLB_TRUE, offsetof(struct flb_kube, pod_association_port), + "port to connect with when performing pod to service name association" + }, + { + FLB_CONFIG_MAP_INT, "pod_service_map_ttl", "0", + 0, FLB_TRUE, offsetof(struct flb_kube, pod_service_map_ttl), + "configurable TTL for pod to service map storage. " + "By default, it is set to 0 which means TTL for cache entries is disabled and " + "cache entries are evicted at random when capacity is reached. " + "In order to enable this option, you should set the number to a time interval. " + "For example, set this value to 60 or 60s and cache entries " + "which have been created more than 60s will be evicted" + }, + { + FLB_CONFIG_MAP_INT, "pod_service_map_refresh_interval", "60", + 0, FLB_TRUE, offsetof(struct flb_kube, pod_service_map_refresh_interval), + "Refresh interval for the pod to service map storage." + "By default, it is set to refresh every 60 seconds" + }, + { + FLB_CONFIG_MAP_STR, "pod_service_preload_cache_dir", NULL, + 0, FLB_TRUE, offsetof(struct flb_kube, pod_service_preload_cache_path), + "set directory with pod to service map files" + }, + { + FLB_CONFIG_MAP_STR, "pod_association_host_server_ca_file", "/etc/amazon-cloudwatch-observability-agent-server-cert/tls-ca.crt", + 0, FLB_TRUE, offsetof(struct flb_kube, pod_association_host_server_ca_file), + "TLS CA certificate path for communication with agent server" + }, + { + FLB_CONFIG_MAP_STR, "pod_association_host_client_cert_file", "/etc/amazon-cloudwatch-observability-agent-client-cert/client.crt", + 0, FLB_TRUE, offsetof(struct flb_kube, pod_association_host_client_cert_file), + "Client Certificate path for enabling mTLS on calls to agent server" + }, + { + FLB_CONFIG_MAP_STR, "pod_association_host_client_key_file", "/etc/amazon-cloudwatch-observability-agent-client-cert/client.key", + 0, FLB_TRUE, offsetof(struct flb_kube, pod_association_host_client_key_file), + "Client Certificate Key path for enabling mTLS on calls to agent server" + }, + { + FLB_CONFIG_MAP_INT, "pod_association_host_tls_debug", "0", + 0, FLB_TRUE, offsetof(struct flb_kube, pod_association_host_tls_debug), + "set TLS debug level: 0 (no debug), 1 (error), " + "2 (state change), 3 (info) and 4 (verbose)" + }, + { + FLB_CONFIG_MAP_BOOL, "pod_association_host_tls_verify", "true", + 0, FLB_TRUE, offsetof(struct flb_kube, pod_association_host_tls_verify), + "enable or disable verification of TLS peer certificate" + }, + { + FLB_CONFIG_MAP_STR, "set_platform", NULL, + 0, FLB_TRUE, offsetof(struct flb_kube, set_platform), + "Set the platform that kubernetes is in. Possible values are k8s and eks" + "This should only be used for testing purpose" + }, /* EOF */ {0} }; diff --git a/plugins/out_cloudwatch_logs/cloudwatch_api.c b/plugins/out_cloudwatch_logs/cloudwatch_api.c index de0825e3f83..8408f7bb348 100644 --- a/plugins/out_cloudwatch_logs/cloudwatch_api.c +++ b/plugins/out_cloudwatch_logs/cloudwatch_api.c @@ -195,12 +195,145 @@ static inline int try_to_write(char *buf, int *off, size_t left, return FLB_TRUE; } +static int entity_add_key_attributes(struct flb_cloudwatch *ctx, struct cw_flush *buf, struct log_stream *stream, int *offset) { + char ts[KEY_ATTRIBUTES_MAX_LEN]; + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "\"keyAttributes\":{",0)) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "\"Type\":\"Service\"",0)) { + goto error; + } + if(stream->entity->key_attributes->name != NULL && strlen(stream->entity->key_attributes->name) != 0) { + if (!snprintf(ts,KEY_ATTRIBUTES_MAX_LEN, ",%s%s%s","\"Name\":\"",stream->entity->key_attributes->name,"\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + } + if(stream->entity->key_attributes->environment != NULL && strlen(stream->entity->key_attributes->environment) != 0) { + if (!snprintf(ts,KEY_ATTRIBUTES_MAX_LEN, ",%s%s%s","\"Environment\":\"",stream->entity->key_attributes->environment,"\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "},", 2)) { + goto error; + } + return 0; +error: + return -1; +} + +static int entity_add_attributes(struct flb_cloudwatch *ctx, struct cw_flush *buf, struct log_stream *stream,int *offset) { + char ts[ATTRIBUTES_MAX_LEN]; + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "\"attributes\":{", + 0)) { + goto error; + } + if (stream->entity->attributes->platform_type != NULL && strlen(stream->entity->attributes->platform_type) != 0) { + if (strcmp(stream->entity->attributes->platform_type, "eks") == 0) { + if (!snprintf(ts,ATTRIBUTES_MAX_LEN, "%s%s%s","\"PlatformType\":\"","AWS::EKS","\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + if(stream->entity->attributes->cluster_name != NULL && strlen(stream->entity->attributes->cluster_name) != 0) { + if (!snprintf(ts,ATTRIBUTES_MAX_LEN, ",%s%s%s","\"EKS.Cluster\":\"",stream->entity->attributes->cluster_name,"\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + } + } else if (strcmp(stream->entity->attributes->platform_type, "k8s") == 0) { + if (!snprintf(ts,ATTRIBUTES_MAX_LEN, "%s%s%s","\"PlatformType\":\"","K8s","\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + if(stream->entity->attributes->cluster_name != NULL && strlen(stream->entity->attributes->cluster_name) != 0) { + if (!snprintf(ts,ATTRIBUTES_MAX_LEN, ",%s%s%s","\"K8s.Cluster\":\"",stream->entity->attributes->cluster_name,"\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + } + } + } else { + if (!snprintf(ts,ATTRIBUTES_MAX_LEN, "%s%s%s","\"PlatformType\":\"","Generic","\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + } + if(stream->entity->attributes->namespace != NULL && strlen(stream->entity->attributes->namespace) != 0) { + if (!snprintf(ts,ATTRIBUTES_MAX_LEN, ",%s%s%s","\"K8s.Namespace\":\"",stream->entity->attributes->namespace,"\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + } + if(stream->entity->attributes->node != NULL && strlen(stream->entity->attributes->node) != 0) { + if (!snprintf(ts,ATTRIBUTES_MAX_LEN, ",%s%s%s","\"K8s.Node\":\"",buf->current_stream->entity->attributes->node,"\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + } + if(stream->entity->attributes->workload != NULL && strlen(stream->entity->attributes->workload) != 0) { + if (!snprintf(ts,ATTRIBUTES_MAX_LEN, ",%s%s%s","\"K8s.Workload\":\"",buf->current_stream->entity->attributes->workload,"\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + } + if(stream->entity->attributes->instance_id != NULL && strlen(stream->entity->attributes->instance_id) != 0) { + if (!snprintf(ts,ATTRIBUTES_MAX_LEN, ",%s%s%s","\"EC2.InstanceId\":\"",buf->current_stream->entity->attributes->instance_id,"\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + } + if(stream->entity->attributes->name_source != NULL && strlen(stream->entity->attributes->name_source) != 0) { + if (!snprintf(ts,ATTRIBUTES_MAX_LEN, ",%s%s%s","\"AWS.ServiceNameSource\":\"",buf->current_stream->entity->attributes->name_source,"\"")) { + goto error; + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size,ts,0)) { + goto error; + } + } + + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "}", 1)) { + goto error; + } + return 0; +error: + return -1; +} + /* * Writes the "header" for a put log events payload */ static int init_put_payload(struct flb_cloudwatch *ctx, struct cw_flush *buf, struct log_stream *stream, int *offset) { + int ret; if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, "{\"logGroupName\":\"", 17)) { goto error; @@ -225,6 +358,34 @@ static int init_put_payload(struct flb_cloudwatch *ctx, struct cw_flush *buf, "\",", 2)) { goto error; } + // If we are missing the service name, the entity will get rejected by the frontend anyway + // so do not emit entity unless service name is filled + if(ctx->add_entity && stream->entity != NULL && stream->entity->key_attributes->name != NULL) { + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "\"entity\":{", 10)) { + goto error; + } + + if(stream->entity->key_attributes != NULL) { + ret = entity_add_key_attributes(ctx,buf,stream,offset); + if (ret < 0) { + flb_plg_error(ctx->ins, "Failed to initialize Entity KeyAttributes"); + goto error; + } + } + if(stream->entity->attributes != NULL) { + ret = entity_add_attributes(ctx,buf,stream,offset); + if (ret < 0) { + flb_plg_error(ctx->ins, "Failed to initialize Entity Attributes"); + goto error; + } + } + if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, + "},", 2)) { + goto error; + } + + } if (!try_to_write(buf->out_buf, offset, buf->out_buf_size, "\"logEvents\":[", 13)) { @@ -782,6 +943,107 @@ int pack_emf_payload(struct flb_cloudwatch *ctx, return 0; } +void parse_entity(struct flb_cloudwatch *ctx, entity *entity, msgpack_object map, int map_size) { + int i,j; + msgpack_object key, kube_key; + msgpack_object val, kube_val; + + int val_map_size; + for(i=0; i < map_size; i++) { + key = map.via.map.ptr[i].key; + val = map.via.map.ptr[i].val; + if(strncmp(key.via.str.ptr, "kubernetes",10 ) == 0 ) { + if (val.type == MSGPACK_OBJECT_MAP) { + val_map_size = val.via.map.size; + for (j=0; j < val_map_size; j++) { + kube_key = val.via.map.ptr[j].key; + kube_val = val.via.map.ptr[j].val; + if(strncmp(kube_key.via.str.ptr, "service_name", kube_key.via.str.size) == 0) { + if(entity->key_attributes->name != NULL) { + flb_free(entity->key_attributes->name); + } + entity->key_attributes->name = flb_strndup(kube_val.via.str.ptr, kube_val.via.str.size); + } else if(strncmp(kube_key.via.str.ptr, "environment", kube_key.via.str.size) == 0) { + if(entity->key_attributes->environment != NULL) { + flb_free(entity->key_attributes->environment); + } + entity->key_attributes->environment = flb_strndup(kube_val.via.str.ptr, kube_val.via.str.size); + } else if(strncmp(kube_key.via.str.ptr, "namespace_name", kube_key.via.str.size) == 0) { + if(entity->attributes->namespace != NULL) { + flb_free(entity->attributes->namespace); + } + entity->attributes->namespace = flb_strndup(kube_val.via.str.ptr, kube_val.via.str.size); + } else if(strncmp(kube_key.via.str.ptr, "host", kube_key.via.str.size) == 0) { + if(entity->attributes->node != NULL) { + flb_free(entity->attributes->node); + } + entity->attributes->node = flb_strndup(kube_val.via.str.ptr, kube_val.via.str.size); + } else if(strncmp(kube_key.via.str.ptr, "cluster", kube_key.via.str.size) == 0) { + if(entity->attributes->cluster_name != NULL) { + flb_free(entity->attributes->cluster_name); + } + entity->attributes->cluster_name = flb_strndup(kube_val.via.str.ptr, kube_val.via.str.size); + } else if(strncmp(kube_key.via.str.ptr, "workload", kube_key.via.str.size) == 0) { + if(entity->attributes->workload != NULL) { + flb_free(entity->attributes->workload); + } + entity->attributes->workload = flb_strndup(kube_val.via.str.ptr, kube_val.via.str.size); + } else if(strncmp(kube_key.via.str.ptr, "name_source", kube_key.via.str.size) == 0) { + if(entity->attributes->name_source != NULL) { + flb_free(entity->attributes->name_source); + } + entity->attributes->name_source = flb_strndup(kube_val.via.str.ptr, kube_val.via.str.size); + } else if(strncmp(kube_key.via.str.ptr, "platform", kube_key.via.str.size) == 0) { + if(entity->attributes->platform_type != NULL) { + flb_free(entity->attributes->platform_type); + } + entity->attributes->platform_type = flb_strndup(kube_val.via.str.ptr, kube_val.via.str.size); + } + } + } + } + if(strncmp(key.via.str.ptr, "ec2_instance_id",key.via.str.size ) == 0 ) { + if(entity->attributes->instance_id != NULL) { + flb_free(entity->attributes->instance_id); + } + entity->attributes->instance_id = flb_strndup(val.via.str.ptr, val.via.str.size); + } + } + if(entity->key_attributes->name == NULL && entity->attributes->name_source == NULL &&entity->attributes->workload != NULL) { + entity->key_attributes->name = flb_strndup(entity->attributes->workload, strlen(entity->attributes->workload)); + entity->attributes->name_source = flb_strndup("K8sWorkload", 11); + } +} + +void update_or_create_entity(struct flb_cloudwatch *ctx, struct log_stream *stream, const msgpack_object map) { + if(stream->entity == NULL) { + stream->entity = flb_malloc(sizeof(entity)); + if (stream->entity == NULL) { + return; + } + memset(stream->entity, 0, sizeof(entity)); + + stream->entity->key_attributes = flb_malloc(sizeof(entity_key_attributes)); + if (stream->entity->key_attributes == NULL) { + return; + } + memset(stream->entity->key_attributes, 0, sizeof(entity_key_attributes)); + + stream->entity->attributes = flb_malloc(sizeof(entity_attributes)); + if (stream->entity->attributes == NULL) { + return; + } + memset(stream->entity->attributes, 0, sizeof(entity_attributes)); + + parse_entity(ctx,stream->entity,map, map.via.map.size); + } else { + parse_entity(ctx,stream->entity,map, map.via.map.size); + } + if (!stream->entity) { + flb_plg_warn(ctx->ins, "Failed to generate entity"); + } +} + /* * Main routine- processes msgpack and sends in batches which ignore the empty ones * return value is the number of events processed and send. @@ -856,6 +1118,9 @@ int process_and_send(struct flb_cloudwatch *ctx, const char *input_plugin, flb_plg_debug(ctx->ins, "Couldn't determine log group & stream for record with tag %s", tag); goto error; } + if(ctx->kubernete_metadata_enabled && ctx->add_entity) { + update_or_create_entity(ctx,stream,map); + } if (ctx->log_key) { key_str = NULL; @@ -1420,6 +1685,8 @@ int put_log_events(struct flb_cloudwatch *ctx, struct cw_flush *buf, if (c) { flb_plg_debug(ctx->ins, "PutLogEvents http status=%d", c->resp.status); + flb_plg_debug(ctx->ins, "PutLogEvents http data=%s", c->resp.data); + flb_plg_debug(ctx->ins, "PutLogEvents http payload=%s", c->resp.payload); if (c->resp.status == 200) { if (c->resp.data == NULL || c->resp.data_len == 0 || strstr(c->resp.data, AMZN_REQUEST_ID_HEADER) == NULL) { diff --git a/plugins/out_cloudwatch_logs/cloudwatch_api.h b/plugins/out_cloudwatch_logs/cloudwatch_api.h index 99919055bc0..064cbe94844 100644 --- a/plugins/out_cloudwatch_logs/cloudwatch_api.h +++ b/plugins/out_cloudwatch_logs/cloudwatch_api.h @@ -35,6 +35,12 @@ /* number of characters needed to 'end' a PutLogEvents payload */ #define PUT_LOG_EVENTS_FOOTER_LEN 4 +// https://docs.aws.amazon.com/applicationsignals/latest/APIReference/API_Service.html +/* Maxinum number of character limits including both the KeyAttributes key and its value */ +#define KEY_ATTRIBUTES_MAX_LEN 1100 +/* Maxinum number of character limits including both the Attributes key and its value */ +#define ATTRIBUTES_MAX_LEN 300 + /* 256KiB minus 26 bytes for the event */ #define MAX_EVENT_LEN 262118 diff --git a/plugins/out_cloudwatch_logs/cloudwatch_logs.c b/plugins/out_cloudwatch_logs/cloudwatch_logs.c index 03723f521a1..c2ca0720f7e 100644 --- a/plugins/out_cloudwatch_logs/cloudwatch_logs.c +++ b/plugins/out_cloudwatch_logs/cloudwatch_logs.c @@ -344,6 +344,15 @@ static int cb_cloudwatch_init(struct flb_output_instance *ins, flb_output_upstream_set(upstream, ctx->ins); ctx->cw_client->host = ctx->endpoint; + struct mk_list *head; + struct flb_filter_instance *f_ins; + mk_list_foreach(head, &config->filters) { + f_ins = mk_list_entry(head, struct flb_filter_instance, _head); + if (strstr(f_ins->p->name, "kubernetes")) { + ctx->kubernete_metadata_enabled = true; + } + } + /* Export context */ flb_output_set_context(ins, ctx); @@ -491,6 +500,26 @@ static int cb_cloudwatch_exit(void *data, struct flb_config *config) return 0; } +void entity_destroy(entity *entity) { + if(entity->attributes) { + flb_free(entity->attributes->cluster_name); + flb_free(entity->attributes->instance_id); + flb_free(entity->attributes->namespace); + flb_free(entity->attributes->node); + flb_free(entity->attributes->platform_type); + flb_free(entity->attributes->workload); + flb_free(entity->attributes->name_source); + flb_free(entity->attributes); + } + if(entity->key_attributes) { + flb_free(entity->key_attributes->environment); + flb_free(entity->key_attributes->name); + flb_free(entity->key_attributes->type); + flb_free(entity->key_attributes); + } + flb_free(entity); +} + void log_stream_destroy(struct log_stream *stream) { if (stream) { @@ -500,6 +529,9 @@ void log_stream_destroy(struct log_stream *stream) if (stream->group) { flb_sds_destroy(stream->group); } + if (stream->entity) { + entity_destroy(stream->entity); + } flb_free(stream); } } @@ -637,6 +669,12 @@ static struct flb_config_map config_map[] = { "is 'd1,d2;d3', we will consider it as [[d1, d2],[d3]]." }, + { + FLB_CONFIG_MAP_BOOL, "add_entity", "false", + 0, FLB_TRUE, offsetof(struct flb_cloudwatch, add_entity), + "add entity to PutLogEvent calls" + }, + /* EOF */ {0} }; diff --git a/plugins/out_cloudwatch_logs/cloudwatch_logs.h b/plugins/out_cloudwatch_logs/cloudwatch_logs.h index 705f69b9139..afd0b888b9c 100644 --- a/plugins/out_cloudwatch_logs/cloudwatch_logs.h +++ b/plugins/out_cloudwatch_logs/cloudwatch_logs.h @@ -30,6 +30,33 @@ #include #include +/* Entity object used for associating the telemetry + * in the PutLogEvent call*/ +typedef struct entity { + struct entity_key_attributes *key_attributes; + struct entity_attributes *attributes; +}entity; + +/* KeyAttributes used for CloudWatch Entity object + * in the PutLogEvent call*/ +typedef struct entity_key_attributes { + char *type; + char *name; + char *environment; +}entity_key_attributes; + +/* Attributes used for CloudWatch Entity object + * in the PutLogEvent call*/ +typedef struct entity_attributes { + char *platform_type; + char *cluster_name; + char *namespace; + char *workload; + char *node; + char *instance_id; + char *name_source; +}entity_attributes; + /* buffers used for each flush */ struct cw_flush { /* temporary buffer for storing the serialized event messages */ @@ -84,6 +111,13 @@ struct log_stream { unsigned long long oldest_event; unsigned long long newest_event; + /* + * PutLogEvents entity object + * variable that store service or infrastructure + * information + */ + struct entity *entity; + struct mk_list _head; }; @@ -146,6 +180,14 @@ struct flb_cloudwatch { /* Plugin output instance reference */ struct flb_output_instance *ins; + + /* Checks if kubernete filter is enabled + * So the plugin knows when to scrape for Entity + */ + + int kubernete_metadata_enabled; + + int add_entity; }; void flb_cloudwatch_ctx_destroy(struct flb_cloudwatch *ctx); diff --git a/src/flb_hash.c b/src/flb_hash.c index fe3035f901d..8dbcc502bb7 100644 --- a/src/flb_hash.c +++ b/src/flb_hash.c @@ -34,6 +34,8 @@ static inline void flb_hash_entry_free(struct flb_hash *ht, flb_free(entry->key); if (entry->val && entry->val_size > 0) { flb_free(entry->val); + } else if (ht->force_remove_pointer) { + flb_free(entry->val); } flb_free(entry); } @@ -60,6 +62,7 @@ struct flb_hash *flb_hash_create(int evict_mode, size_t size, int max_entries) ht->size = size; ht->total_count = 0; ht->cache_ttl = 0; + ht->force_remove_pointer = 0; ht->table = flb_calloc(1, sizeof(struct flb_hash_table) * size); if (!ht->table) { flb_errno(); @@ -93,6 +96,21 @@ struct flb_hash *flb_hash_create_with_ttl(int cache_ttl, int evict_mode, return ht; } +struct flb_hash *flb_hash_create_with_ttl_force_destroy(int cache_ttl, int evict_mode, + size_t size, int max_entries) +{ + struct flb_hash *ht; + + ht = flb_hash_create_with_ttl(cache_ttl,evict_mode, size, max_entries); + if (!ht) { + flb_errno(); + return NULL; + } + + ht->force_remove_pointer = 1; + return ht; +} + int flb_hash_del_ptr(struct flb_hash *ht, const char *key, int key_len, void *ptr) { diff --git a/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-daemonset_fluent-bit.log b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-daemonset_fluent-bit.log new file mode 100644 index 00000000000..259723131be --- /dev/null +++ b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-daemonset_fluent-bit.log @@ -0,0 +1 @@ +{"log":"Fluent Bit is logging\n","stream":"stdout","time":"2019-04-01T17:58:33.598656444Z"} diff --git a/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-deployment_fluent-bit.log b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-deployment_fluent-bit.log new file mode 100644 index 00000000000..259723131be --- /dev/null +++ b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-deployment_fluent-bit.log @@ -0,0 +1 @@ +{"log":"Fluent Bit is logging\n","stream":"stdout","time":"2019-04-01T17:58:33.598656444Z"} diff --git a/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-pod_fluent-bit.log b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-pod_fluent-bit.log new file mode 100644 index 00000000000..259723131be --- /dev/null +++ b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-pod_fluent-bit.log @@ -0,0 +1 @@ +{"log":"Fluent Bit is logging\n","stream":"stdout","time":"2019-04-01T17:58:33.598656444Z"} diff --git a/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-replicaset_fluent-bit.log b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-replicaset_fluent-bit.log new file mode 100644 index 00000000000..259723131be --- /dev/null +++ b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-disabled-replicaset_fluent-bit.log @@ -0,0 +1 @@ +{"log":"Fluent Bit is logging\n","stream":"stdout","time":"2019-04-01T17:58:33.598656444Z"} diff --git a/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-daemonset_fluent-bit.log b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-daemonset_fluent-bit.log new file mode 100644 index 00000000000..259723131be --- /dev/null +++ b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-daemonset_fluent-bit.log @@ -0,0 +1 @@ +{"log":"Fluent Bit is logging\n","stream":"stdout","time":"2019-04-01T17:58:33.598656444Z"} diff --git a/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-deployment_fluent-bit.log b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-deployment_fluent-bit.log new file mode 100644 index 00000000000..259723131be --- /dev/null +++ b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-deployment_fluent-bit.log @@ -0,0 +1 @@ +{"log":"Fluent Bit is logging\n","stream":"stdout","time":"2019-04-01T17:58:33.598656444Z"} diff --git a/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-pod_fluent-bit.log b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-pod_fluent-bit.log new file mode 100644 index 00000000000..259723131be --- /dev/null +++ b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-pod_fluent-bit.log @@ -0,0 +1 @@ +{"log":"Fluent Bit is logging\n","stream":"stdout","time":"2019-04-01T17:58:33.598656444Z"} diff --git a/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-replicaset_fluent-bit.log b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-replicaset_fluent-bit.log new file mode 100644 index 00000000000..259723131be --- /dev/null +++ b/tests/runtime/data/kubernetes/log/options/options_use-kubelet-enabled-replicaset_fluent-bit.log @@ -0,0 +1 @@ +{"log":"Fluent Bit is logging\n","stream":"stdout","time":"2019-04-01T17:58:33.598656444Z"} diff --git a/tests/runtime/data/kubernetes/log/options/options_use-pod-association-enabled_fluent-bit.log b/tests/runtime/data/kubernetes/log/options/options_use-pod-association-enabled_fluent-bit.log new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/tests/runtime/data/kubernetes/log/options/options_use-pod-association-enabled_fluent-bit.log @@ -0,0 +1 @@ + diff --git a/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-daemonset.meta b/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-daemonset.meta new file mode 100644 index 00000000000..15e91677570 --- /dev/null +++ b/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-daemonset.meta @@ -0,0 +1,126 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "annotations": { + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + "prometheus.io/scrape": "true" + }, + "creationTimestamp": "2019-04-03T09:29:00Z", + "labels": { + "app.kubernetes.io/name": "fluent-bit" + }, + "name": "use-kubelet-disabled-daemonset", + "namespace": "options", + "resourceVersion": "74466568", + "selfLink": "/api/v1/namespaces/core/pods/base", + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "kind": "DaemonSet", + "name": "my-daemonset", + "uid": "abcd1234-5678-efgh-ijkl-9876mnopqrst", + "controller": true, + "blockOwnerDeletion": true + } + ] + }, + "spec": { + "containers": [ + { + "image": "fluent/fluent-bit", + "imagePullPolicy": "Always", + "name": "fluent-bit", + "resources": {}, + "stdin": true, + "stdinOnce": true, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "volumeMounts": [ + { + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-9ffht", + "readOnly": true + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "nodeName": "ip-10-49-18-80.eu-west-1.compute.internal", + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [ + { + "name": "default-token-9ffht", + "secret": { + "defaultMode": 420, + "secretName": "default-token-9ffht" + } + } + ] + }, + "status": { + "conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:06Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [ + { + "containerID": "docker://c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16", + "image": "fluent/fluent-bit:latest", + "imageID": "docker-pullable://fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f", + "lastState": {}, + "name": "fluent-bit", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-04-03T09:29:05Z" + } + } + } + ], + "hostIP": "10.49.18.80", + "phase": "Running", + "podIP": "100.116.192.42", + "qosClass": "BestEffort", + "startTime": "2019-04-03T09:29:00Z" + } +} diff --git a/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-deployment.meta b/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-deployment.meta new file mode 100644 index 00000000000..e40da24a0bc --- /dev/null +++ b/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-deployment.meta @@ -0,0 +1,126 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "annotations": { + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + "prometheus.io/scrape": "true" + }, + "creationTimestamp": "2019-04-03T09:29:00Z", + "labels": { + "app.kubernetes.io/name": "fluent-bit" + }, + "name": "use-kubelet-disabled-deployment", + "namespace": "options", + "resourceVersion": "74466568", + "selfLink": "/api/v1/namespaces/core/pods/base", + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "kind": "ReplicaSet", + "name": "my-deployment-bcg289", + "uid": "abcd1234-5678-efgh-ijkl-9876mnopqrst", + "controller": true, + "blockOwnerDeletion": true + } + ] + }, + "spec": { + "containers": [ + { + "image": "fluent/fluent-bit", + "imagePullPolicy": "Always", + "name": "fluent-bit", + "resources": {}, + "stdin": true, + "stdinOnce": true, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "volumeMounts": [ + { + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-9ffht", + "readOnly": true + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "nodeName": "ip-10-49-18-80.eu-west-1.compute.internal", + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [ + { + "name": "default-token-9ffht", + "secret": { + "defaultMode": 420, + "secretName": "default-token-9ffht" + } + } + ] + }, + "status": { + "conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:06Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [ + { + "containerID": "docker://c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16", + "image": "fluent/fluent-bit:latest", + "imageID": "docker-pullable://fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f", + "lastState": {}, + "name": "fluent-bit", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-04-03T09:29:05Z" + } + } + } + ], + "hostIP": "10.49.18.80", + "phase": "Running", + "podIP": "100.116.192.42", + "qosClass": "BestEffort", + "startTime": "2019-04-03T09:29:00Z" + } +} diff --git a/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-pod.meta b/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-pod.meta new file mode 100644 index 00000000000..a8bcc69017f --- /dev/null +++ b/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-pod.meta @@ -0,0 +1,116 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "annotations": { + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + "prometheus.io/scrape": "true" + }, + "creationTimestamp": "2019-04-03T09:29:00Z", + "labels": { + "app.kubernetes.io/name": "fluent-bit" + }, + "name": "use-kubelet-disabled-pod", + "namespace": "options", + "resourceVersion": "74466568", + "selfLink": "/api/v1/namespaces/core/pods/base", + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a" + }, + "spec": { + "containers": [ + { + "image": "fluent/fluent-bit", + "imagePullPolicy": "Always", + "name": "fluent-bit", + "resources": {}, + "stdin": true, + "stdinOnce": true, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "volumeMounts": [ + { + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-9ffht", + "readOnly": true + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "nodeName": "ip-10-49-18-80.eu-west-1.compute.internal", + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [ + { + "name": "default-token-9ffht", + "secret": { + "defaultMode": 420, + "secretName": "default-token-9ffht" + } + } + ] + }, + "status": { + "conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:06Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [ + { + "containerID": "docker://c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16", + "image": "fluent/fluent-bit:latest", + "imageID": "docker-pullable://fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f", + "lastState": {}, + "name": "fluent-bit", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-04-03T09:29:05Z" + } + } + } + ], + "hostIP": "10.49.18.80", + "phase": "Running", + "podIP": "100.116.192.42", + "qosClass": "BestEffort", + "startTime": "2019-04-03T09:29:00Z" + } +} diff --git a/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-replicaset.meta b/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-replicaset.meta new file mode 100644 index 00000000000..4651a74ff34 --- /dev/null +++ b/tests/runtime/data/kubernetes/meta/options_use-kubelet-disabled-replicaset.meta @@ -0,0 +1,126 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "annotations": { + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + "prometheus.io/scrape": "true" + }, + "creationTimestamp": "2019-04-03T09:29:00Z", + "labels": { + "app.kubernetes.io/name": "fluent-bit" + }, + "name": "use-kubelet-disabled-replicaset", + "namespace": "options", + "resourceVersion": "74466568", + "selfLink": "/api/v1/namespaces/core/pods/base", + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "kind": "ReplicaSet", + "name": "my-replicaset", + "uid": "abcd1234-5678-efgh-ijkl-9876mnopqrst", + "controller": true, + "blockOwnerDeletion": true + } + ] + }, + "spec": { + "containers": [ + { + "image": "fluent/fluent-bit", + "imagePullPolicy": "Always", + "name": "fluent-bit", + "resources": {}, + "stdin": true, + "stdinOnce": true, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "volumeMounts": [ + { + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-9ffht", + "readOnly": true + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "nodeName": "ip-10-49-18-80.eu-west-1.compute.internal", + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [ + { + "name": "default-token-9ffht", + "secret": { + "defaultMode": 420, + "secretName": "default-token-9ffht" + } + } + ] + }, + "status": { + "conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:06Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [ + { + "containerID": "docker://c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16", + "image": "fluent/fluent-bit:latest", + "imageID": "docker-pullable://fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f", + "lastState": {}, + "name": "fluent-bit", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-04-03T09:29:05Z" + } + } + } + ], + "hostIP": "10.49.18.80", + "phase": "Running", + "podIP": "100.116.192.42", + "qosClass": "BestEffort", + "startTime": "2019-04-03T09:29:00Z" + } +} diff --git a/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-daemonset.meta b/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-daemonset.meta new file mode 100644 index 00000000000..6cebe927c77 --- /dev/null +++ b/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-daemonset.meta @@ -0,0 +1,119 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": {}, + "items": [{ + "metadata": { + "annotations": { + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + "prometheus.io/scrape": "true" + }, + "creationTimestamp": "2019-04-03T09:29:00Z", + "labels": { + "app.kubernetes.io/name": "fluent-bit" + }, + "name": "use-kubelet-enabled-daemonset", + "namespace": "options", + "resourceVersion": "74466568", + "selfLink": "/api/v1/namespaces/core/pods/base", + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "kind": "DaemonSet", + "name": "my-daemonset", + "uid": "abcd1234-5678-efgh-ijkl-9876mnopqrst", + "controller": true, + "blockOwnerDeletion": true + } + ] + }, + "spec": { + "containers": [{ + "image": "fluent/fluent-bit", + "imagePullPolicy": "Always", + "name": "fluent-bit", + "resources": {}, + "stdin": true, + "stdinOnce": true, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "volumeMounts": [{ + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-9ffht", + "readOnly": true + }] + }], + "dnsPolicy": "ClusterFirst", + "nodeName": "ip-10-49-18-80.eu-west-1.compute.internal", + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [{ + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [{ + "name": "default-token-9ffht", + "secret": { + "defaultMode": 420, + "secretName": "default-token-9ffht" + } + }] + }, + "status": { + "conditions": [{ + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:06Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [{ + "containerID": "docker://c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16", + "image": "fluent/fluent-bit:latest", + "imageID": "docker-pullable://fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f", + "lastState": {}, + "name": "fluent-bit", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-04-03T09:29:05Z" + } + } + }], + "hostIP": "10.49.18.80", + "phase": "Running", + "podIP": "100.116.192.42", + "qosClass": "BestEffort", + "startTime": "2019-04-03T09:29:00Z" + } + }] +} diff --git a/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-deployment.meta b/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-deployment.meta new file mode 100644 index 00000000000..8ce5e16068f --- /dev/null +++ b/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-deployment.meta @@ -0,0 +1,119 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": {}, + "items": [{ + "metadata": { + "annotations": { + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + "prometheus.io/scrape": "true" + }, + "creationTimestamp": "2019-04-03T09:29:00Z", + "labels": { + "app.kubernetes.io/name": "fluent-bit" + }, + "name": "use-kubelet-enabled-deployment", + "namespace": "options", + "resourceVersion": "74466568", + "selfLink": "/api/v1/namespaces/core/pods/base", + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "kind": "ReplicaSet", + "name": "my-deployment-bcg289", + "uid": "abcd1234-5678-efgh-ijkl-9876mnopqrst", + "controller": true, + "blockOwnerDeletion": true + } + ] + }, + "spec": { + "containers": [{ + "image": "fluent/fluent-bit", + "imagePullPolicy": "Always", + "name": "fluent-bit", + "resources": {}, + "stdin": true, + "stdinOnce": true, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "volumeMounts": [{ + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-9ffht", + "readOnly": true + }] + }], + "dnsPolicy": "ClusterFirst", + "nodeName": "ip-10-49-18-80.eu-west-1.compute.internal", + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [{ + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [{ + "name": "default-token-9ffht", + "secret": { + "defaultMode": 420, + "secretName": "default-token-9ffht" + } + }] + }, + "status": { + "conditions": [{ + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:06Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [{ + "containerID": "docker://c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16", + "image": "fluent/fluent-bit:latest", + "imageID": "docker-pullable://fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f", + "lastState": {}, + "name": "fluent-bit", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-04-03T09:29:05Z" + } + } + }], + "hostIP": "10.49.18.80", + "phase": "Running", + "podIP": "100.116.192.42", + "qosClass": "BestEffort", + "startTime": "2019-04-03T09:29:00Z" + } + }] +} diff --git a/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-pod.meta b/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-pod.meta new file mode 100644 index 00000000000..122479e9c07 --- /dev/null +++ b/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-pod.meta @@ -0,0 +1,109 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": {}, + "items": [{ + "metadata": { + "annotations": { + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + "prometheus.io/scrape": "true" + }, + "creationTimestamp": "2019-04-03T09:29:00Z", + "labels": { + "app.kubernetes.io/name": "fluent-bit" + }, + "name": "use-kubelet-enabled-pod", + "namespace": "options", + "resourceVersion": "74466568", + "selfLink": "/api/v1/namespaces/core/pods/base", + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a" + }, + "spec": { + "containers": [{ + "image": "fluent/fluent-bit", + "imagePullPolicy": "Always", + "name": "fluent-bit", + "resources": {}, + "stdin": true, + "stdinOnce": true, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "volumeMounts": [{ + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-9ffht", + "readOnly": true + }] + }], + "dnsPolicy": "ClusterFirst", + "nodeName": "ip-10-49-18-80.eu-west-1.compute.internal", + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [{ + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [{ + "name": "default-token-9ffht", + "secret": { + "defaultMode": 420, + "secretName": "default-token-9ffht" + } + }] + }, + "status": { + "conditions": [{ + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:06Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [{ + "containerID": "docker://c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16", + "image": "fluent/fluent-bit:latest", + "imageID": "docker-pullable://fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f", + "lastState": {}, + "name": "fluent-bit", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-04-03T09:29:05Z" + } + } + }], + "hostIP": "10.49.18.80", + "phase": "Running", + "podIP": "100.116.192.42", + "qosClass": "BestEffort", + "startTime": "2019-04-03T09:29:00Z" + } + }] +} diff --git a/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-replicaset.meta b/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-replicaset.meta new file mode 100644 index 00000000000..966f782a306 --- /dev/null +++ b/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled-replicaset.meta @@ -0,0 +1,119 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": {}, + "items": [{ + "metadata": { + "annotations": { + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + "prometheus.io/scrape": "true" + }, + "creationTimestamp": "2019-04-03T09:29:00Z", + "labels": { + "app.kubernetes.io/name": "fluent-bit" + }, + "name": "use-kubelet-enabled-replicaset", + "namespace": "options", + "resourceVersion": "74466568", + "selfLink": "/api/v1/namespaces/core/pods/base", + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "kind": "ReplicaSet", + "name": "my-replicaset", + "uid": "abcd1234-5678-efgh-ijkl-9876mnopqrst", + "controller": true, + "blockOwnerDeletion": true + } + ] + }, + "spec": { + "containers": [{ + "image": "fluent/fluent-bit", + "imagePullPolicy": "Always", + "name": "fluent-bit", + "resources": {}, + "stdin": true, + "stdinOnce": true, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "volumeMounts": [{ + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-9ffht", + "readOnly": true + }] + }], + "dnsPolicy": "ClusterFirst", + "nodeName": "ip-10-49-18-80.eu-west-1.compute.internal", + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [{ + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [{ + "name": "default-token-9ffht", + "secret": { + "defaultMode": 420, + "secretName": "default-token-9ffht" + } + }] + }, + "status": { + "conditions": [{ + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:06Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [{ + "containerID": "docker://c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16", + "image": "fluent/fluent-bit:latest", + "imageID": "docker-pullable://fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f", + "lastState": {}, + "name": "fluent-bit", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-04-03T09:29:05Z" + } + } + }], + "hostIP": "10.49.18.80", + "phase": "Running", + "podIP": "100.116.192.42", + "qosClass": "BestEffort", + "startTime": "2019-04-03T09:29:00Z" + } + }] +} diff --git a/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled.meta b/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled.meta index af9fb1a3ba6..4c1305d7a1f 100644 --- a/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled.meta +++ b/tests/runtime/data/kubernetes/meta/options_use-kubelet-enabled.meta @@ -17,7 +17,17 @@ "namespace": "options", "resourceVersion": "74466568", "selfLink": "/api/v1/namespaces/core/pods/base", - "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a" + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "name": "my-deployment", + "uid": "abcd1234-5678-efgh-ijkl-9876mnopqrst", + "controller": true, + "blockOwnerDeletion": true + } + ] }, "spec": { "containers": [{ diff --git a/tests/runtime/data/kubernetes/meta/options_use-pod-association-enabled-fallback-env.meta b/tests/runtime/data/kubernetes/meta/options_use-pod-association-enabled-fallback-env.meta new file mode 100644 index 00000000000..755931d52a2 --- /dev/null +++ b/tests/runtime/data/kubernetes/meta/options_use-pod-association-enabled-fallback-env.meta @@ -0,0 +1,109 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": {}, + "items": [{ + "metadata": { + "annotations": { + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + "prometheus.io/scrape": "true" + }, + "creationTimestamp": "2019-04-03T09:29:00Z", + "labels": { + "app.kubernetes.io/name": "fluent-bit" + }, + "name": "use-pod-association-enabled-fallback-env", + "namespace": "options", + "resourceVersion": "74466568", + "selfLink": "/api/v1/namespaces/core/pods/base", + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a" + }, + "spec": { + "containers": [{ + "image": "fluent/fluent-bit", + "imagePullPolicy": "Always", + "name": "fluent-bit", + "resources": {}, + "stdin": true, + "stdinOnce": true, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "volumeMounts": [{ + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-9ffht", + "readOnly": true + }] + }], + "dnsPolicy": "ClusterFirst", + "nodeName": "ip-10-49-18-80.eu-west-1.compute.internal", + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [{ + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [{ + "name": "default-token-9ffht", + "secret": { + "defaultMode": 420, + "secretName": "default-token-9ffht" + } + }] + }, + "status": { + "conditions": [{ + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:06Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [{ + "containerID": "docker://c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16", + "image": "fluent/fluent-bit:latest", + "imageID": "docker-pullable://fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f", + "lastState": {}, + "name": "fluent-bit", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-04-03T09:29:05Z" + } + } + }], + "hostIP": "10.49.18.80", + "phase": "Running", + "podIP": "100.116.192.42", + "qosClass": "BestEffort", + "startTime": "2019-04-03T09:29:00Z" + } + }] +} diff --git a/tests/runtime/data/kubernetes/meta/options_use-pod-association-enabled.meta b/tests/runtime/data/kubernetes/meta/options_use-pod-association-enabled.meta new file mode 100644 index 00000000000..99b35b01f6b --- /dev/null +++ b/tests/runtime/data/kubernetes/meta/options_use-pod-association-enabled.meta @@ -0,0 +1,109 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": {}, + "items": [{ + "metadata": { + "annotations": { + "prometheus.io/path": "/api/v1/metrics/prometheus", + "prometheus.io/port": "2020", + "prometheus.io/scrape": "true" + }, + "creationTimestamp": "2019-04-03T09:29:00Z", + "labels": { + "app.kubernetes.io/name": "fluent-bit" + }, + "name": "use-pod-association-enabled", + "namespace": "options", + "resourceVersion": "74466568", + "selfLink": "/api/v1/namespaces/core/pods/base", + "uid": "e9f2963f-55f2-11e9-84c5-02e422b8a84a" + }, + "spec": { + "containers": [{ + "image": "fluent/fluent-bit", + "imagePullPolicy": "Always", + "name": "fluent-bit", + "resources": {}, + "stdin": true, + "stdinOnce": true, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "tty": true, + "volumeMounts": [{ + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-9ffht", + "readOnly": true + }] + }], + "dnsPolicy": "ClusterFirst", + "nodeName": "ip-10-49-18-80.eu-west-1.compute.internal", + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [{ + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [{ + "name": "default-token-9ffht", + "secret": { + "defaultMode": 420, + "secretName": "default-token-9ffht" + } + }] + }, + "status": { + "conditions": [{ + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:06Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-04-03T09:29:00Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [{ + "containerID": "docker://c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16", + "image": "fluent/fluent-bit:latest", + "imageID": "docker-pullable://fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f", + "lastState": {}, + "name": "fluent-bit", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-04-03T09:29:05Z" + } + } + }], + "hostIP": "10.49.18.80", + "phase": "Running", + "podIP": "100.116.192.42", + "qosClass": "BestEffort", + "startTime": "2019-04-03T09:29:00Z" + } + }] +} diff --git a/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-daemonset_fluent-bit.out b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-daemonset_fluent-bit.out new file mode 100644 index 00000000000..dbfcdb92d3c --- /dev/null +++ b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-daemonset_fluent-bit.out @@ -0,0 +1 @@ +[1554141513.598656,{"log":"Fluent Bit is logging\n","stream":"stdout","kubernetes":{"pod_name":"use-kubelet-disabled-daemonset","namespace_name":"options","workload":"my-daemonset","pod_id":"e9f2963f-55f2-11e9-84c5-02e422b8a84a","labels":{"app.kubernetes.io/name":"fluent-bit"},"annotations":{"prometheus.io/path":"/api/v1/metrics/prometheus","prometheus.io/port":"2020","prometheus.io/scrape":"true"},"host":"ip-10-49-18-80.eu-west-1.compute.internal","container_name":"fluent-bit","docker_id":"c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16","container_hash":"fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f","container_image":"fluent/fluent-bit:latest"}}] diff --git a/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-deployment_fluent-bit.out b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-deployment_fluent-bit.out new file mode 100644 index 00000000000..10ac27f0592 --- /dev/null +++ b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-deployment_fluent-bit.out @@ -0,0 +1 @@ +[1554141513.598656,{"log":"Fluent Bit is logging\n","stream":"stdout","kubernetes":{"pod_name":"use-kubelet-disabled-deployment","namespace_name":"options","workload":"my-deployment","pod_id":"e9f2963f-55f2-11e9-84c5-02e422b8a84a","labels":{"app.kubernetes.io/name":"fluent-bit"},"annotations":{"prometheus.io/path":"/api/v1/metrics/prometheus","prometheus.io/port":"2020","prometheus.io/scrape":"true"},"host":"ip-10-49-18-80.eu-west-1.compute.internal","container_name":"fluent-bit","docker_id":"c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16","container_hash":"fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f","container_image":"fluent/fluent-bit:latest"}}] diff --git a/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-pod_fluent-bit.out b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-pod_fluent-bit.out new file mode 100644 index 00000000000..8aea6e523eb --- /dev/null +++ b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-pod_fluent-bit.out @@ -0,0 +1 @@ +[1554141513.598656,{"log":"Fluent Bit is logging\n","stream":"stdout","kubernetes":{"pod_name":"use-kubelet-disabled-pod","namespace_name":"options","workload":"use-kubelet-disabled-pod","pod_id":"e9f2963f-55f2-11e9-84c5-02e422b8a84a","labels":{"app.kubernetes.io/name":"fluent-bit"},"annotations":{"prometheus.io/path":"/api/v1/metrics/prometheus","prometheus.io/port":"2020","prometheus.io/scrape":"true"},"host":"ip-10-49-18-80.eu-west-1.compute.internal","container_name":"fluent-bit","docker_id":"c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16","container_hash":"fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f","container_image":"fluent/fluent-bit:latest"}}] diff --git a/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-replicaset_fluent-bit.out b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-replicaset_fluent-bit.out new file mode 100644 index 00000000000..af2b3740f7f --- /dev/null +++ b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-disabled-replicaset_fluent-bit.out @@ -0,0 +1 @@ +[1554141513.598656,{"log":"Fluent Bit is logging\n","stream":"stdout","kubernetes":{"pod_name":"use-kubelet-disabled-replicaset","namespace_name":"options","workload":"my-replicaset","pod_id":"e9f2963f-55f2-11e9-84c5-02e422b8a84a","labels":{"app.kubernetes.io/name":"fluent-bit"},"annotations":{"prometheus.io/path":"/api/v1/metrics/prometheus","prometheus.io/port":"2020","prometheus.io/scrape":"true"},"host":"ip-10-49-18-80.eu-west-1.compute.internal","container_name":"fluent-bit","docker_id":"c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16","container_hash":"fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f","container_image":"fluent/fluent-bit:latest"}}] diff --git a/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-daemonset_fluent-bit.out b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-daemonset_fluent-bit.out new file mode 100644 index 00000000000..d9ce4885dcc --- /dev/null +++ b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-daemonset_fluent-bit.out @@ -0,0 +1 @@ +[1554141513.598656,{"log":"Fluent Bit is logging\n","stream":"stdout","kubernetes":{"pod_name":"use-kubelet-enabled-daemonset","namespace_name":"options","workload":"my-daemonset","pod_id":"e9f2963f-55f2-11e9-84c5-02e422b8a84a","labels":{"app.kubernetes.io/name":"fluent-bit"},"annotations":{"prometheus.io/path":"/api/v1/metrics/prometheus","prometheus.io/port":"2020","prometheus.io/scrape":"true"},"host":"ip-10-49-18-80.eu-west-1.compute.internal","container_name":"fluent-bit","docker_id":"c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16","container_hash":"fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f","container_image":"fluent/fluent-bit:latest"}}] diff --git a/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-deployment_fluent-bit.out b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-deployment_fluent-bit.out new file mode 100644 index 00000000000..0b7055a6e9e --- /dev/null +++ b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-deployment_fluent-bit.out @@ -0,0 +1 @@ +[1554141513.598656,{"log":"Fluent Bit is logging\n","stream":"stdout","kubernetes":{"pod_name":"use-kubelet-enabled-deployment","namespace_name":"options","workload":"my-deployment","pod_id":"e9f2963f-55f2-11e9-84c5-02e422b8a84a","labels":{"app.kubernetes.io/name":"fluent-bit"},"annotations":{"prometheus.io/path":"/api/v1/metrics/prometheus","prometheus.io/port":"2020","prometheus.io/scrape":"true"},"host":"ip-10-49-18-80.eu-west-1.compute.internal","container_name":"fluent-bit","docker_id":"c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16","container_hash":"fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f","container_image":"fluent/fluent-bit:latest"}}] diff --git a/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-pod_fluent-bit.out b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-pod_fluent-bit.out new file mode 100644 index 00000000000..ccc0d77b4d0 --- /dev/null +++ b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-pod_fluent-bit.out @@ -0,0 +1 @@ +[1554141513.598656,{"log":"Fluent Bit is logging\n","stream":"stdout","kubernetes":{"pod_name":"use-kubelet-enabled-pod","namespace_name":"options","workload":"use-kubelet-enabled-pod","pod_id":"e9f2963f-55f2-11e9-84c5-02e422b8a84a","labels":{"app.kubernetes.io/name":"fluent-bit"},"annotations":{"prometheus.io/path":"/api/v1/metrics/prometheus","prometheus.io/port":"2020","prometheus.io/scrape":"true"},"host":"ip-10-49-18-80.eu-west-1.compute.internal","container_name":"fluent-bit","docker_id":"c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16","container_hash":"fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f","container_image":"fluent/fluent-bit:latest"}}] diff --git a/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-replicaset_fluent-bit.out b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-replicaset_fluent-bit.out new file mode 100644 index 00000000000..cd9cfdb86ca --- /dev/null +++ b/tests/runtime/data/kubernetes/out/options/options_use-kubelet-enabled-replicaset_fluent-bit.out @@ -0,0 +1 @@ +[1554141513.598656,{"log":"Fluent Bit is logging\n","stream":"stdout","kubernetes":{"pod_name":"use-kubelet-enabled-replicaset","namespace_name":"options","workload":"my-replicaset","pod_id":"e9f2963f-55f2-11e9-84c5-02e422b8a84a","labels":{"app.kubernetes.io/name":"fluent-bit"},"annotations":{"prometheus.io/path":"/api/v1/metrics/prometheus","prometheus.io/port":"2020","prometheus.io/scrape":"true"},"host":"ip-10-49-18-80.eu-west-1.compute.internal","container_name":"fluent-bit","docker_id":"c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16","container_hash":"fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f","container_image":"fluent/fluent-bit:latest"}}] diff --git a/tests/runtime/data/kubernetes/out/options/options_use-pod-association-enabled-fallback-env_fluent-bit.out b/tests/runtime/data/kubernetes/out/options/options_use-pod-association-enabled-fallback-env_fluent-bit.out new file mode 100644 index 00000000000..3a2a66d746a --- /dev/null +++ b/tests/runtime/data/kubernetes/out/options/options_use-pod-association-enabled-fallback-env_fluent-bit.out @@ -0,0 +1 @@ +[1554141513.598656,{"log":"Fluent Bit is logging\n","stream":"stdout","kubernetes":{"pod_name":"use-pod-association-enabled-fallback-env","namespace_name":"options","service_name":"test-service","environment":"eks:test-cluster/options","name_source":"Instrumentation","platform":"eks","cluster":"test-cluster","workload":"use-pod-association-enabled-fallback-env","pod_id":"e9f2963f-55f2-11e9-84c5-02e422b8a84a","labels":{"app.kubernetes.io/name":"fluent-bit"},"annotations":{"prometheus.io/path":"/api/v1/metrics/prometheus","prometheus.io/port":"2020","prometheus.io/scrape":"true"},"host":"ip-10-49-18-80.eu-west-1.compute.internal","container_name":"fluent-bit","docker_id":"c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16","container_hash":"fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f","container_image":"fluent/fluent-bit:latest"}}] diff --git a/tests/runtime/data/kubernetes/out/options/options_use-pod-association-enabled_fluent-bit.out b/tests/runtime/data/kubernetes/out/options/options_use-pod-association-enabled_fluent-bit.out new file mode 100644 index 00000000000..4b1ec144601 --- /dev/null +++ b/tests/runtime/data/kubernetes/out/options/options_use-pod-association-enabled_fluent-bit.out @@ -0,0 +1 @@ +[1554141513.598656,{"log":"Fluent Bit is logging\n","stream":"stdout","kubernetes":{"pod_name":"use-pod-association-enabled","namespace_name":"options","service_name":"test-service","environment":"test-environment","name_source":"Instrumentation","workload":"use-pod-association-enabled","pod_id":"e9f2963f-55f2-11e9-84c5-02e422b8a84a","labels":{"app.kubernetes.io/name":"fluent-bit"},"annotations":{"prometheus.io/path":"/api/v1/metrics/prometheus","prometheus.io/port":"2020","prometheus.io/scrape":"true"},"host":"ip-10-49-18-80.eu-west-1.compute.internal","container_name":"fluent-bit","docker_id":"c9898099f6d235126d564ed38a020007ea7a6fac6e25e718de683c9dd0076c16","container_hash":"fluent/fluent-bit@sha256:7ac0fd3569af866e9a6a22eb592744200d2dbe098cf066162453f8d0b06c531f","container_image":"fluent/fluent-bit:latest"}}] diff --git a/tests/runtime/data/kubernetes/servicemap/options_use-pod-association-enabled-fallback-env_fluent-bit.map b/tests/runtime/data/kubernetes/servicemap/options_use-pod-association-enabled-fallback-env_fluent-bit.map new file mode 100644 index 00000000000..5809d2740fb --- /dev/null +++ b/tests/runtime/data/kubernetes/servicemap/options_use-pod-association-enabled-fallback-env_fluent-bit.map @@ -0,0 +1,6 @@ +{ + "use-pod-association-enabled-fallback-env": { + "ServiceName": "test-service", + "ServiceNameSource": "Instrumentation" + } +} diff --git a/tests/runtime/data/kubernetes/servicemap/options_use-pod-association-enabled_fluent-bit.map b/tests/runtime/data/kubernetes/servicemap/options_use-pod-association-enabled_fluent-bit.map new file mode 100644 index 00000000000..e4d67d8bf71 --- /dev/null +++ b/tests/runtime/data/kubernetes/servicemap/options_use-pod-association-enabled_fluent-bit.map @@ -0,0 +1,7 @@ +{ + "use-pod-association-enabled": { + "ServiceName": "test-service", + "Environment": "test-environment", + "ServiceNameSource": "Instrumentation" + } +} diff --git a/tests/runtime/filter_kubernetes.c b/tests/runtime/filter_kubernetes.c index 9d003405dcb..02979f0e808 100644 --- a/tests/runtime/filter_kubernetes.c +++ b/tests/runtime/filter_kubernetes.c @@ -23,6 +23,7 @@ struct kube_test_result { /* Test target mode */ #define KUBE_TAIL 0 #define KUBE_SYSTEMD 1 +#define KUBE_POD_ASSOCIATION 2 #ifdef FLB_HAVE_SYSTEMD int flb_test_systemd_send(void); @@ -35,6 +36,45 @@ char kube_test_id[64]; #define KUBE_URL "http://" KUBE_IP ":" KUBE_PORT #define DPATH FLB_TESTS_DATA_PATH "/data/kubernetes" +// Helper function to clear the file +static void clear_file(const char *filename) { + FILE *file; + + // Open the file in "w" mode to empty it + file = fopen(filename, "w"); + if (file == NULL) { + perror("Error opening file to clear content"); + return; + } + + // Close the file to complete truncation + fclose(file); +} + +// Helper function to write to the file with the specified content +static void write_log_to_file(const char *filename) { + FILE *file; + char log_entry[512]; + + // Log message to write + const char *log_template = "{\"log\":\"Fluent Bit is logging\\n\",\"stream\":\"stdout\",\"time\":\"2019-04-01T17:58:33.598656444Z\"}"; + + // Open the file for appending + file = fopen(filename, "a"); + if (file == NULL) { + perror("Error opening file"); + return; + } + // Format the final log entry with the current time + snprintf(log_entry, sizeof(log_entry), log_template); + + // Write the log entry to the file + fprintf(file, "%s\n", log_entry); + + // Close the file + fclose(file); +} + static int file_to_buf(const char *path, char **out_buf, size_t *out_size) { int ret; @@ -210,12 +250,12 @@ static void kube_test(const char *target, int type, const char *suffix, int nExp ret = flb_service_set(ctx.flb, "Flush", "1", "Grace", "1", - "Log_Level", "error", + "Log_Level", "debug", "Parsers_File", DPATH "/parsers.conf", NULL); TEST_CHECK_(ret == 0, "setting service options"); - if (type == KUBE_TAIL) { + if (type == KUBE_TAIL || type == KUBE_POD_ASSOCIATION) { /* Compose path based on target */ snprintf(path, sizeof(path) - 1, DPATH "/log/%s.log", target); TEST_CHECK_(access(path, R_OK) == 0, "accessing log file: %s", path); @@ -266,7 +306,7 @@ static void kube_test(const char *target, int type, const char *suffix, int nExp } va_end(va); - if (type == KUBE_TAIL) { + if (type == KUBE_TAIL || type == KUBE_POD_ASSOCIATION) { ret = flb_filter_set(ctx.flb, filter_ffd, "Regex_Parser", "kubernetes-tag", "Kube_Tag_Prefix", "kube.", @@ -310,6 +350,29 @@ static void kube_test(const char *target, int type, const char *suffix, int nExp } #endif + if(type == KUBE_POD_ASSOCIATION) { + clear_file(path); + } + + //Testing the default values setup + struct mk_list *head; + struct flb_filter_instance *f_ins; + mk_list_foreach(head, &ctx.flb->config->filters) { + f_ins = mk_list_entry(head, struct flb_filter_instance, _head); + if (strstr(f_ins->p->name, "kubernetes")) { + TEST_CHECK(strcmp(f_ins->p->config_map[39].name, "pod_association_host_server_ca_file") == 0); + TEST_CHECK(strcmp(f_ins->p->config_map[39].def_value, "/etc/amazon-cloudwatch-observability-agent-server-cert/tls-ca.crt") == 0); + TEST_CHECK(strcmp(f_ins->p->config_map[40].name, "pod_association_host_client_cert_file") == 0); + TEST_CHECK(strcmp(f_ins->p->config_map[40].def_value, "/etc/amazon-cloudwatch-observability-agent-client-cert/client.crt") == 0); + TEST_CHECK(strcmp(f_ins->p->config_map[41].name, "pod_association_host_client_key_file") == 0); + TEST_CHECK(strcmp(f_ins->p->config_map[41].def_value, "/etc/amazon-cloudwatch-observability-agent-client-cert/client.key") == 0); + TEST_CHECK(strcmp(f_ins->p->config_map[42].name, "pod_association_host_tls_debug") == 0); + TEST_CHECK(strcmp(f_ins->p->config_map[42].def_value, "0") == 0); + TEST_CHECK(strcmp(f_ins->p->config_map[43].name, "pod_association_host_tls_verify") == 0); + TEST_CHECK(strcmp(f_ins->p->config_map[43].def_value, "true") == 0); + } + } + /* Start the engine */ ret = flb_start(ctx.flb); TEST_CHECK_(ret == 0, "starting engine"); @@ -323,9 +386,12 @@ static void kube_test(const char *target, int type, const char *suffix, int nExp } #endif - /* Poll for up to 2 seconds or until we got a match */ - for (ret = 0; ret < 2000 && result.nMatched == 0; ret++) { + /* Poll for up to 3 seconds or until we got a match */ + for (ret = 0; ret < 3000 && result.nMatched != nExpected; ret++) { usleep(1000); + if (ret == 2000 && type == KUBE_POD_ASSOCIATION) { + write_log_to_file(path); + } } TEST_CHECK(result.nMatched == nExpected); TEST_MSG("result.nMatched: %i\nnExpected: %i", result.nMatched, nExpected); @@ -382,11 +448,76 @@ static void flb_test_options_use_kubelet_enabled_json() flb_test_options_use_kubelet_enabled("options_use-kubelet-enabled_fluent-bit", NULL, 1); } +#define flb_test_pod_to_service_map_use_kubelet_true(target, suffix, nExpected, platform) \ + kube_test("options/" target, KUBE_POD_ASSOCIATION, suffix, nExpected, \ + "use_pod_association", "true", \ + "use_kubelet", "true", \ + "kubelet_port", "8002", \ + "Pod_Service_Preload_Cache_Dir", DPATH "/servicemap/" target, \ + "pod_association_host_server_ca_file", "/tst/ca.crt", \ + "pod_association_host_client_cert_file", "/tst/client.crt", \ + "pod_association_host_client_key_file", "/tst/client.key", \ + "set_platform", platform, \ + NULL); \ + +#define flb_test_pod_to_service_map_use_kubelet_false(target, suffix, nExpected, platform) \ + kube_test("options/" target, KUBE_POD_ASSOCIATION, suffix, nExpected, \ + "use_pod_association", "true", \ + "use_kubelet", "false", \ + "kubelet_port", "8002", \ + "Pod_Service_Preload_Cache_Dir", DPATH "/servicemap/" target, \ + "pod_association_host_server_ca_file", "/tst/ca.crt", \ + "pod_association_host_client_cert_file", "/tst/client.crt", \ + "pod_association_host_client_key_file", "/tst/client.key", \ + "set_platform", platform, \ + NULL); \ + +static void flb_test_options_use_kubelet_enabled_replicaset_json() +{ + flb_test_pod_to_service_map_use_kubelet_true("options_use-kubelet-enabled-replicaset_fluent-bit", NULL, 1, NULL); +} + +static void flb_test_options_use_kubelet_enabled_deployment_json() +{ + flb_test_pod_to_service_map_use_kubelet_true("options_use-kubelet-enabled-deployment_fluent-bit", NULL, 1, NULL); +} + +static void flb_test_options_use_kubelet_enabled_daemonset_json() +{ + flb_test_pod_to_service_map_use_kubelet_true("options_use-kubelet-enabled-daemonset_fluent-bit", NULL, 1, NULL); +} + +static void flb_test_options_use_kubelet_enabled_pod_json() +{ + flb_test_pod_to_service_map_use_kubelet_true("options_use-kubelet-enabled-pod_fluent-bit", NULL, 1, NULL); +} + static void flb_test_options_use_kubelet_disabled_json() { flb_test_options_use_kubelet_disabled("options_use-kubelet-disabled_fluent-bit", NULL, 1); } +static void flb_test_options_use_kubelet_disabled_replicaset_json() +{ + flb_test_pod_to_service_map_use_kubelet_false("options_use-kubelet-disabled-replicaset_fluent-bit", NULL, 1, NULL); +} + +static void flb_test_options_use_kubelet_disabled_deployment_json() +{ + flb_test_pod_to_service_map_use_kubelet_false("options_use-kubelet-disabled-deployment_fluent-bit", NULL, 1, NULL); +} + +static void flb_test_options_use_kubelet_disabled_daemonset_json() +{ + flb_test_pod_to_service_map_use_kubelet_false("options_use-kubelet-disabled-daemonset_fluent-bit", NULL, 1, NULL); +} + +static void flb_test_options_use_kubelet_disabled_pod_json() +{ + flb_test_pod_to_service_map_use_kubelet_false("options_use-kubelet-disabled-pod_fluent-bit", NULL, 1, NULL); +} + + #define flb_test_options_merge_log_enabled(target, suffix, nExpected) \ kube_test("options/" target, KUBE_TAIL, suffix, nExpected, \ "Merge_Log", "On", \ @@ -867,6 +998,17 @@ static void flb_test_annotations_exclude_multiple_4_container_4_stderr() flb_test_annotations_exclude("annotations-exclude_multiple-4_container-4", "stderr", 1); } +static void kube_options_use_pod_association_enabled() +{ + flb_test_pod_to_service_map_use_kubelet_true("options_use-pod-association-enabled_fluent-bit", NULL, 1, NULL); +} + +static void kube_options_use_pod_association_enabled_fallback_env() +{ + setenv("CLUSTER_NAME","test-cluster", 1); + flb_test_pod_to_service_map_use_kubelet_true("options_use-pod-association-enabled-fallback-env_fluent-bit", NULL, 1, "eks"); +} + #ifdef FLB_HAVE_SYSTEMD #define CONTAINER_NAME "CONTAINER_NAME=k8s_kairosdb_kairosdb-914055854-b63vq_default_d6c53deb-05a4-11e8-a8c4-080027435fb7_23" #include @@ -957,8 +1099,18 @@ TEST_LIST = { {"kube_core_no_meta", flb_test_core_no_meta}, {"kube_core_unescaping_text", flb_test_core_unescaping_text}, {"kube_core_unescaping_json", flb_test_core_unescaping_json}, + {"kube_options_use_pod_association_enabled", kube_options_use_pod_association_enabled}, + {"kube_options_use_pod_association_enabled_fallback_env", kube_options_use_pod_association_enabled_fallback_env}, {"kube_options_use-kubelet_enabled_json", flb_test_options_use_kubelet_enabled_json}, + {"kube_options_use-kubelet_enabled_replicateset_json", flb_test_options_use_kubelet_enabled_replicaset_json}, + {"kube_options_use-kubelet_enabled_deployment_json", flb_test_options_use_kubelet_enabled_deployment_json}, + {"kube_options_use-kubelet_enabled_daemonset_json", flb_test_options_use_kubelet_enabled_daemonset_json}, + {"kube_options_use-kubelet_enabled_pod_json", flb_test_options_use_kubelet_enabled_pod_json}, {"kube_options_use-kubelet_disabled_json", flb_test_options_use_kubelet_disabled_json}, + {"kube_options_use-kubelet_disabled_replicaset_json", flb_test_options_use_kubelet_disabled_replicaset_json}, + {"kube_options_use-kubelet_disabled_deployment_json", flb_test_options_use_kubelet_disabled_deployment_json}, + {"kube_options_use-kubelet_disabled_daemonset_json", flb_test_options_use_kubelet_disabled_daemonset_json}, +{"kube_options_use-kubelet_disabled_pod_json", flb_test_options_use_kubelet_disabled_pod_json}, {"kube_options_merge_log_enabled_text", flb_test_options_merge_log_enabled_text}, {"kube_options_merge_log_enabled_json", flb_test_options_merge_log_enabled_json}, {"kube_options_merge_log_enabled_invalid_json", flb_test_options_merge_log_enabled_invalid_json},