From 4d11b8a7b85c9fb3ed84609fb8ad91ea29df4cd0 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Thu, 25 Jan 2024 16:23:33 -0800 Subject: [PATCH 01/30] feat(discovery): port kubeApi discovery to v3 --- pom.xml | 23 + .../discovery/ContainerDiscovery.java | 17 +- .../cryostat/discovery/CustomDiscovery.java | 4 +- .../java/io/cryostat/discovery/Discovery.java | 3 +- .../io/cryostat/discovery/DiscoveryNode.java | 16 +- .../io/cryostat/discovery/JDPDiscovery.java | 4 +- .../cryostat/discovery/KubeApiDiscovery.java | 529 ++++++++++++++++++ .../java/io/cryostat/discovery/NodeType.java | 128 +++++ src/main/resources/application-dev.properties | 3 + .../resources/application-test.properties | 3 + src/main/resources/application.properties | 3 + 11 files changed, 712 insertions(+), 21 deletions(-) create mode 100644 src/main/java/io/cryostat/discovery/KubeApiDiscovery.java create mode 100644 src/main/java/io/cryostat/discovery/NodeType.java diff --git a/pom.xml b/pom.xml index 1e5fdcb9d..3826a0c0d 100644 --- a/pom.xml +++ b/pom.xml @@ -46,6 +46,7 @@ 3.2.5 2 3.2.5 + 6.7.2 ${surefire.rerunFailingTestsCount} @@ -217,6 +218,16 @@ ${com.google.java-format.version} provided + + io.fabric8 + openshift-client + ${io.fabric8.client.version} + + + io.fabric8 + kubernetes-client + ${io.fabric8.client.version} + io.quarkus @@ -249,6 +260,18 @@ junit-jupiter test + + io.fabric8 + kubernetes-server-mock + ${io.fabric8.client.version} + test + + + io.fabric8 + openshift-server-mock + ${io.fabric8.client.version} + test + diff --git a/src/main/java/io/cryostat/discovery/ContainerDiscovery.java b/src/main/java/io/cryostat/discovery/ContainerDiscovery.java index 48dffc944..fd2b32833 100644 --- a/src/main/java/io/cryostat/discovery/ContainerDiscovery.java +++ b/src/main/java/io/cryostat/discovery/ContainerDiscovery.java @@ -164,12 +164,11 @@ void onStart(@Observes StartupEvent evt) { getClass().getName(), socketPath); return; } - logger.infov("{0} started", getClass().getName()); DiscoveryNode universe = DiscoveryNode.getUniverse(); if (DiscoveryNode.getRealm(getRealm()).isEmpty()) { DiscoveryPlugin plugin = new DiscoveryPlugin(); - DiscoveryNode node = DiscoveryNode.environment(getRealm(), DiscoveryNode.REALM); + DiscoveryNode node = DiscoveryNode.environment(getRealm(), BaseNodeType.REALM); plugin.realm = node; plugin.builtin = true; universe.children.add(node); @@ -177,6 +176,8 @@ void onStart(@Observes StartupEvent evt) { universe.persist(); } + logger.info(String.format("Starting %s client", getRealm())); + queryContainers(); this.timerId = vertx.setPeriodic(pollPeriod.toMillis(), unused -> queryContainers()); } @@ -352,11 +353,12 @@ public void handleContainerEvent(ContainerSpec desc, EventKind evtKind) { "PORT", // "AnnotationKey.PORT, Integer.toString(jmxPort))); - DiscoveryNode node = DiscoveryNode.target(target); + DiscoveryNode node = DiscoveryNode.target(target, BaseNodeType.JVM); target.discoveryNode = node; String podName = desc.PodName; if (StringUtils.isNotBlank(podName)) { - DiscoveryNode pod = DiscoveryNode.environment(podName, DiscoveryNode.POD); + DiscoveryNode pod = + DiscoveryNode.environment(podName, ContainerDiscoveryNodeType.POD); if (!realm.children.contains(pod)) { pod.children.add(node); realm.children.add(pod); @@ -366,7 +368,9 @@ public void handleContainerEvent(ContainerSpec desc, EventKind evtKind) { realm, n -> podName.equals(n.name) - && DiscoveryNode.POD.equals(n.nodeType)) + && ContainerDiscoveryNodeType.POD + .getKind() + .equals(n.nodeType)) .orElseThrow(); pod.children.add(node); } @@ -381,7 +385,8 @@ public void handleContainerEvent(ContainerSpec desc, EventKind evtKind) { Target t = Target.getTargetByConnectUrl(connectUrl); String podName = desc.PodName; if (StringUtils.isNotBlank(podName)) { - DiscoveryNode pod = DiscoveryNode.environment(podName, DiscoveryNode.POD); + DiscoveryNode pod = + DiscoveryNode.environment(podName, ContainerDiscoveryNodeType.POD); pod.children.remove(t.discoveryNode); } else { realm.children.remove(t.discoveryNode); diff --git a/src/main/java/io/cryostat/discovery/CustomDiscovery.java b/src/main/java/io/cryostat/discovery/CustomDiscovery.java index 3d3428426..831de5ee4 100644 --- a/src/main/java/io/cryostat/discovery/CustomDiscovery.java +++ b/src/main/java/io/cryostat/discovery/CustomDiscovery.java @@ -78,7 +78,7 @@ void onStart(@Observes StartupEvent evt) { DiscoveryNode universe = DiscoveryNode.getUniverse(); if (DiscoveryNode.getRealm(REALM).isEmpty()) { DiscoveryPlugin plugin = new DiscoveryPlugin(); - DiscoveryNode node = DiscoveryNode.environment(REALM, DiscoveryNode.REALM); + DiscoveryNode node = DiscoveryNode.environment(REALM, BaseNodeType.REALM); plugin.realm = node; plugin.builtin = true; universe.children.add(node); @@ -167,7 +167,7 @@ Response doV2Create( target.annotations = new Annotations(); target.annotations.cryostat().putAll(Map.of("REALM", REALM)); - DiscoveryNode node = DiscoveryNode.target(target); + DiscoveryNode node = DiscoveryNode.target(target, BaseNodeType.JVM); target.discoveryNode = node; DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); diff --git a/src/main/java/io/cryostat/discovery/Discovery.java b/src/main/java/io/cryostat/discovery/Discovery.java index 2b2aa99a6..f9310ef6f 100644 --- a/src/main/java/io/cryostat/discovery/Discovery.java +++ b/src/main/java/io/cryostat/discovery/Discovery.java @@ -195,7 +195,6 @@ public Response register(@Context RoutingContext ctx, JsonObject body) // TODO apply URI range validation to the remote address InetAddress remoteAddress = getRemoteAddress(ctx); - URI location; DiscoveryPlugin plugin; if (StringUtils.isNotBlank(pluginId) && StringUtils.isNotBlank(priorToken)) { @@ -217,7 +216,7 @@ public Response register(@Context RoutingContext ctx, JsonObject body) plugin.callback = callbackUri; plugin.realm = DiscoveryNode.environment( - requireNonBlank(realmName, "realm"), DiscoveryNode.REALM); + requireNonBlank(realmName, "realm"), BaseNodeType.REALM); plugin.builtin = false; plugin.persist(); diff --git a/src/main/java/io/cryostat/discovery/DiscoveryNode.java b/src/main/java/io/cryostat/discovery/DiscoveryNode.java index 46ccf2456..6c2b05072 100644 --- a/src/main/java/io/cryostat/discovery/DiscoveryNode.java +++ b/src/main/java/io/cryostat/discovery/DiscoveryNode.java @@ -55,9 +55,6 @@ public class DiscoveryNode extends PanacheEntity { public static final String NODE_TYPE = "nodeType"; - public static final String UNIVERSE = "Universe"; - public static final String REALM = "Realm"; - public static final String POD = "Pod"; @Column(unique = false, nullable = false, updatable = false) @JsonView(Views.Flat.class) @@ -95,9 +92,10 @@ public int hashCode() { } public static DiscoveryNode getUniverse() { - return DiscoveryNode.find(NODE_TYPE, UNIVERSE) + return DiscoveryNode.find(NODE_TYPE, BaseNodeType.UNIVERSE) .singleResultOptional() - .orElseGet(() -> environment(UNIVERSE, UNIVERSE)); + .orElseGet( + () -> environment(BaseNodeType.UNIVERSE.toString(), BaseNodeType.UNIVERSE)); } public static Optional getRealm(String name) { @@ -109,10 +107,10 @@ public static Optional getChild( return node.children.stream().filter(predicate).findFirst(); } - public static DiscoveryNode environment(String name, String nodeType) { + public static DiscoveryNode environment(String name, NodeType nodeType) { DiscoveryNode node = new DiscoveryNode(); node.name = name; - node.nodeType = nodeType; + node.nodeType = nodeType.getKind(); node.labels = new HashMap<>(); node.children = new ArrayList<>(); node.target = null; @@ -120,10 +118,10 @@ public static DiscoveryNode environment(String name, String nodeType) { return node; } - public static DiscoveryNode target(Target target) { + public static DiscoveryNode target(Target target, NodeType nodeType) { DiscoveryNode node = new DiscoveryNode(); node.name = target.connectUrl.toString(); - node.nodeType = "JVM"; + node.nodeType = nodeType.getKind(); node.labels = new HashMap<>(target.labels); node.children = null; node.target = target; diff --git a/src/main/java/io/cryostat/discovery/JDPDiscovery.java b/src/main/java/io/cryostat/discovery/JDPDiscovery.java index fd203863f..4012ea7b3 100644 --- a/src/main/java/io/cryostat/discovery/JDPDiscovery.java +++ b/src/main/java/io/cryostat/discovery/JDPDiscovery.java @@ -70,7 +70,7 @@ void onStart(@Observes StartupEvent evt) { DiscoveryNode universe = DiscoveryNode.getUniverse(); if (DiscoveryNode.getRealm(REALM).isEmpty()) { DiscoveryPlugin plugin = new DiscoveryPlugin(); - DiscoveryNode node = DiscoveryNode.environment(REALM, DiscoveryNode.REALM); + DiscoveryNode node = DiscoveryNode.environment(REALM, BaseNodeType.REALM); plugin.realm = node; plugin.builtin = true; universe.children.add(node); @@ -139,7 +139,7 @@ public synchronized void handleJdpEvent(JvmDiscoveryEvent evt) { "PORT", // "AnnotationKey.PORT, Integer.toString(rmiTarget.getPort()))); - DiscoveryNode node = DiscoveryNode.target(target); + DiscoveryNode node = DiscoveryNode.target(target, BaseNodeType.JVM); target.discoveryNode = node; realm.children.add(node); diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java new file mode 100644 index 000000000..3b08c5074 --- /dev/null +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -0,0 +1,529 @@ +package io.cryostat.discovery; + +import java.nio.file.Path; +import java.time.Duration; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ForkJoinPool; + +import io.cryostat.core.sys.FileSystem; + +import com.google.common.base.Optional; +import io.fabric8.kubernetes.api.model.Endpoints; +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.ObjectReference; +import io.fabric8.kubernetes.api.model.OwnerReference; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientBuilder; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.openshift.api.model.Route; +import io.fabric8.openshift.client.OpenShiftClient; +import io.quarkus.runtime.ShutdownEvent; +import io.quarkus.runtime.StartupEvent; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.event.Observes; +import jakarta.inject.Inject; +import jakarta.transaction.Transactional; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.concurrent.ConcurrentException; +import org.apache.commons.lang3.concurrent.LazyInitializer; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; +import org.eclipse.microprofile.config.inject.ConfigProperty; +import org.jboss.logging.Logger; + +@ApplicationScoped +class OpenShiftDiscovery extends KubeApiDiscovery { + @ConfigProperty(name = "cryostat.discovery.openshift.enabled") + boolean enabled; + + @Override + boolean available() { + return super.available() && client().supports(Route.class); + } + + @Override + OpenShiftClient client() { + return kubeConfig.withOpenShift(super.client()); + } +} + +@ApplicationScoped +public class KubeApiDiscovery { + public static final String REALM = "KubernetesApi"; + + public static final long ENDPOINTS_INFORMER_RESYNC_PERIOD = Duration.ofSeconds(30).toMillis(); + + @Inject Logger logger; + + @Inject KubeConfig kubeConfig; + + @ConfigProperty(name = "cryostat.discovery.kubernetes.enabled") + boolean enabled; + + // private Integer memoHash; + // private EnvironmentNode memoTree; + // private final Lazy connectionToolkit; + // private final Logger logger; + private final Map, Pair> + discoveryNodeCache = new ConcurrentHashMap<>(); + // private final Map, Object> queryLocks = + // new ConcurrentHashMap<>(); + + private final LazyInitializer>> nsInformers = + new LazyInitializer>>() { + @Override + protected HashMap> initialize() + throws ConcurrentException { + // TODO add support for some wildcard indicating a single Informer for any + // namespace that Cryostat has permissions to. This will need some restructuring + // of how the + // namespaces within the discovery tree are mapped. + var result = new HashMap>(); + kubeConfig + .getWatchNamespaces() + .forEach( + ns -> { + result.put( + ns, + kubeConfig + .kubeClient() + .endpoints() + .inNamespace(ns) + .inform( + new EndpointsHandler(), + ENDPOINTS_INFORMER_RESYNC_PERIOD)); + logger.info( + String.format( + "Started Endpoints SharedInformer for" + + " namespace \"{}\"", + ns)); + }); + return result; + } + }; + + @Transactional + void onStart(@Observes StartupEvent evt) { + if (!(enabled())) { + return; + } + + if (!available()) { + logger.errorv("{0} enabled but environment is not Kubernetes!", getClass().getName()); + return; + } + + DiscoveryNode universe = DiscoveryNode.getUniverse(); + if (DiscoveryNode.getRealm(REALM).isEmpty()) { + DiscoveryPlugin plugin = new DiscoveryPlugin(); + DiscoveryNode node = DiscoveryNode.environment(REALM, BaseNodeType.REALM); + plugin.realm = node; + plugin.builtin = true; + universe.children.add(node); + plugin.persist(); + universe.persist(); + } + + logger.info(String.format("Starting %s client", REALM)); + + try { + nsInformers.get(); // trigger lazy init + } catch (ConcurrentException e) { + throw new IllegalStateException(e); + } + } + + void onStop(@Observes ShutdownEvent evt) { + if (!(enabled() && available())) { + return; + } + logger.info(String.format("Shutting down %s client", REALM)); + } + + boolean enabled() { + return enabled; + } + + boolean available() { + try { + boolean hasNamespace = StringUtils.isNotBlank(kubeConfig.getOwnNamespace()); + return kubeConfig.kubeApiAvailable() || hasNamespace; + } catch (Exception e) { + logger.info(e); + } + return false; + } + + KubernetesClient client() { + return kubeConfig.kubeClient(); + } + + // @Override + // public List listDiscoverableServices() { + // try { + // return getAllServiceRefs(); + // } catch (Exception e) { + // logger.warn(e); + // return Collections.emptyList(); + // } + // } + + // @Override + // public EnvironmentNode getDiscoveryTree() { + // int currentHash = 0; + // HashCodeBuilder hcb = new HashCodeBuilder(); + // Map> informers = safeGetInformers(); + // for (var informer : informers.values()) { + // List store = informer.getStore().list(); + // hcb.append(store.hashCode()); + // } + // currentHash = hcb.build(); + // if (Objects.equals(memoHash, currentHash)) { + // logger.trace("Using memoized discovery tree"); + // return new EnvironmentNode(memoTree); + // } + // memoHash = currentHash; + // EnvironmentNode realmNode = + // new EnvironmentNode(REALM, BaseNodeType.REALM, Collections.emptyMap(), Set.of()); + // informers + // .entrySet() + // .forEach( + // entry -> { + // var namespace = entry.getKey(); + // var store = entry.getValue().getStore().list(); + // EnvironmentNode nsNode = + // new EnvironmentNode(namespace, KubernetesNodeType.NAMESPACE); + // try { + // store.stream() + // .flatMap(endpoints -> + // getTargetTuples(endpoints).stream()) + // .forEach(tuple -> buildOwnerChain(nsNode, tuple)); + // } catch (Exception e) { + // logger.warn(e); + // } finally { + // discoveryNodeCache.clear(); + // queryLocks.clear(); + // } + // realmNode.addChildNode(nsNode); + // }); + // memoTree = realmNode; + // return realmNode; + // } + + private Map> safeGetInformers() { + Map> informers; + try { + informers = nsInformers.get(); + } catch (ConcurrentException e) { + throw new IllegalStateException(e); + } + return informers; + } + + // private void buildOwnerChain(EnvironmentNode nsNode, TargetTuple targetTuple) { + // ObjectReference target = targetTuple.addr.getTargetRef(); + // if (target == null) { + // logger.error( + // "Address {} for Endpoint {} had null target reference", + // targetTuple.addr.getIp() != null + // ? targetTuple.addr.getIp() + // : targetTuple.addr.getHostname(), + // targetTuple.objRef.getName()); + // return; + // } + // String targetKind = target.getKind(); + // KubernetesNodeType targetType = KubernetesNodeType.fromKubernetesKind(targetKind); + // if (targetType == KubernetesNodeType.POD) { + // // if the Endpoint points to a Pod, chase the owner chain up as far as possible, then + // // add that to the Namespace + + // Pair pod = + // discoveryNodeCache.computeIfAbsent( + // cacheKey(target.getNamespace(), target), this::queryForNode); + // pod.getRight() + // .addChildNode( + // new TargetNode( + // KubernetesNodeType.ENDPOINT, targetTuple.toServiceRef())); + + // Pair node = pod; + // while (true) { + // Pair owner = getOrCreateOwnerNode(node); + // if (owner == null) { + // break; + // } + // EnvironmentNode ownerNode = owner.getRight(); + // ownerNode.addChildNode(node.getRight()); + // node = owner; + // } + // nsNode.addChildNode(node.getRight()); + // } else { + // // if the Endpoint points to something else(?) than a Pod, just add the target + // straight + // // to the Namespace + // nsNode.addChildNode( + // new TargetNode(KubernetesNodeType.ENDPOINT, targetTuple.toServiceRef())); + // } + // } + + // private Pair getOrCreateOwnerNode( + // Pair child) { + // HasMetadata childRef = child.getLeft(); + // if (childRef == null) { + // logger.error( + // "Could not locate node named {} of kind {} while traversing environment", + // child.getRight().getName(), + // child.getRight().getNodeType()); + // return null; + // } + // List owners = childRef.getMetadata().getOwnerReferences(); + // // Take first "expected" owner Kind from NodeTypes, or if none, simply use the first + // owner. + // // If there are no owners then return null to signify this and break the chain + // if (owners.isEmpty()) { + // return null; + // } + // String namespace = childRef.getMetadata().getNamespace(); + // OwnerReference owner = + // owners.stream() + // .filter(o -> KubernetesNodeType.fromKubernetesKind(o.getKind()) != null) + // .findFirst() + // .orElse(owners.get(0)); + // return discoveryNodeCache.computeIfAbsent(cacheKey(namespace, owner), + // this::queryForNode); + // } + + private Triple cacheKey(String ns, OwnerReference resource) { + return Triple.of(ns, resource.getKind(), resource.getName()); + } + + // Unfortunately, ObjectReference and OwnerReference both independently implement getKind and + // getName - they don't come from a common base class. + private Triple cacheKey(String ns, ObjectReference resource) { + return Triple.of(ns, resource.getKind(), resource.getName()); + } + + // private Pair queryForNode( + // Triple lookupKey) { + // String namespace = lookupKey.getLeft(); + // KubernetesNodeType nodeType = + // KubernetesNodeType.fromKubernetesKind(lookupKey.getMiddle()); + // String nodeName = lookupKey.getRight(); + // if (nodeType == null) { + // return null; + // } + // synchronized (queryLocks.computeIfAbsent(lookupKey, k -> new Object())) { + // EnvironmentNode node; + // HasMetadata kubeObj = + // + // nodeType.getQueryFunction().apply(k8sClient).apply(namespace).apply(nodeName); + // if (kubeObj != null) { + // node = new EnvironmentNode(nodeName, nodeType, + // kubeObj.getMetadata().getLabels()); + // } else { + // node = new EnvironmentNode(nodeName, nodeType); + // } + // return Pair.of(kubeObj, node); + // } + // } + + // private boolean isCompatiblePort(EndpointPort port) { + // return "jfr-jmx".equals(port.getName()) || 9091 == port.getPort(); + // } + + // private List getAllServiceRefs() { + // return safeGetInformers().values().stream() + // .flatMap(i -> i.getStore().list().stream()) + // .flatMap(endpoints -> getServiceRefs(endpoints).stream()) + // .collect(Collectors.toList()); + // } + + // private List getTargetTuples(Endpoints endpoints) { + // List tts = new ArrayList<>(); + // for (EndpointSubset subset : endpoints.getSubsets()) { + // for (EndpointPort port : subset.getPorts()) { + // if (!isCompatiblePort(port)) { + // continue; + // } + // for (EndpointAddress addr : subset.getAddresses()) { + // tts.add(new TargetTuple(addr.getTargetRef(), addr, port)); + // } + // } + // } + // return tts; + // } + + // private List getServiceRefs(Endpoints endpoints) { + // return getTargetTuples(endpoints).stream() + // .map(TargetTuple::toServiceRef) + // .filter(Objects::nonNull) + // .collect(Collectors.toList()); + // } + + class KubeConfig { + public static final String KUBERNETES_NAMESPACE_PATH = + "/var/run/secrets/kubernetes.io/serviceaccount/namespace"; + + @Inject Logger logger; + @Inject FileSystem fs; + + @ConfigProperty(name = "cryostat.k8s.namespaces") + Optional> watchNamespaces; + + @ConfigProperty(name = "kubernetes.service.host") + Optional serviceHost; + + private KubernetesClient kubeClient; + + List getWatchNamespaces() { + return watchNamespaces.or(List.of()); + } + + String getOwnNamespace() { + try { + return fs.readString(Path.of(KUBERNETES_NAMESPACE_PATH)); + } catch (Exception e) { + logger.trace(e); + return null; + } + } + + boolean kubeApiAvailable() { + return serviceHost.isPresent(); + } + + KubernetesClient kubeClient() { + if (kubeClient == null) { + kubeClient = + new KubernetesClientBuilder() + .withTaskExecutor(ForkJoinPool.commonPool()) + .build(); + } + return kubeClient; + } + + OpenShiftClient withOpenShift(KubernetesClient client) { + return client.adapt(OpenShiftClient.class); + } + } + + private final class EndpointsHandler implements ResourceEventHandler { + + @Override + public void onAdd(Endpoints obj) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'onAdd'"); + } + + @Override + public void onUpdate(Endpoints oldObj, Endpoints newObj) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'onUpdate'"); + } + + @Override + public void onDelete(Endpoints obj, boolean deletedFinalStateUnknown) { + // TODO Auto-generated method stub + throw new UnsupportedOperationException("Unimplemented method 'onDelete'"); + } + // @Override + // public void onAdd(Endpoints endpoints) { + // getServiceRefs(endpoints) + // .forEach(serviceRef -> notifyAsyncTargetDiscovery(EventKind.FOUND, + // serviceRef)); + // } + + // @Override + // public void onUpdate(Endpoints oldEndpoints, Endpoints newEndpoints) { + // Set previousRefs = new HashSet<>(getServiceRefs(oldEndpoints)); + // Set currentRefs = new HashSet<>(getServiceRefs(newEndpoints)); + + // if (previousRefs.equals(currentRefs)) { + // return; + // } + + // ServiceRef.compare(previousRefs).to(currentRefs).updated().stream() + // .forEach(sr -> notifyAsyncTargetDiscovery(EventKind.MODIFIED, sr)); + + // ServiceRef.compare(previousRefs).to(currentRefs).added().stream() + // .forEach(sr -> notifyAsyncTargetDiscovery(EventKind.FOUND, sr)); + // ` + // ServiceRef.compare(previousRefs).to(currentRefs).removed().stream() + // .forEach(sr -> notifyAsyncTargetDiscovery(EventKind.LOST, sr)); + // } + + // @Override + // public void onDelete(Endpoints endpoints, boolean deletedFinalStateUnknown) { + // if (deletedFinalStateUnknown) { + // logger.warn("Deleted final state unknown: {}", endpoints); + // return; + // } + // getServiceRefs(endpoints) + // .forEach(serviceRef -> notifyAsyncTargetDiscovery(EventKind.LOST, + // serviceRef)); + // } + } + + // class TargetTuple { + // ObjectReference objRef; + // EndpointAddress addr; + // EndpointPort port; + + // TargetTuple(ObjectReference objRef, EndpointAddress addr, EndpointPort port) { + // this.objRef = objRef; + // this.addr = addr; + // this.port = port; + // } + + // Target toTarget() { + // Pair node = + // discoveryNodeCache.computeIfAbsent( + // cacheKey(objRef.getNamespace(), objRef), + // this::queryForNode); + // HasMetadata podRef = node.getLeft(); + // if (node.getRight().getNodeType() != KubernetesNodeType.POD) { + // throw new IllegalStateException(); + // } + // if (podRef == null) { + // throw new IllegalStateException(); + // } + // try { + // String targetName = objRef.getName(); + + // String ip = addr.getIp().replaceAll("\\.", "-"); + // String namespace = podRef.getMetadata().getNamespace(); + // String host = String.format("%s.%s.pod", ip, namespace); + + // JMXServiceURL jmxUrl = + // new JMXServiceURL( + // "rmi", + // "", + // 0, + // "/jndi/rmi://" + host + ':' + port.getPort() + "/jmxrmi"); + // ServiceRef serviceRef = + // new ServiceRef(null, URI.create(jmxUrl.toString()), targetName); + // serviceRef.setLabels(podRef.getMetadata().getLabels()); + // serviceRef.setPlatformAnnotations(podRef.getMetadata().getAnnotations()); + // serviceRef.setCryostatAnnotations( + // Map.of( + // AnnotationKey.REALM, + // REALM, + // AnnotationKey.HOST, + // addr.getIp(), + // AnnotationKey.PORT, + // Integer.toString(port.getPort()), + // AnnotationKey.NAMESPACE, + // addr.getTargetRef().getNamespace(), + // AnnotationKey.POD_NAME, + // addr.getTargetRef().getName())); + // return serviceRef; + // } catch (Exception e) { + // logger.warn(e); + // return null; + // } + // } + // } +} diff --git a/src/main/java/io/cryostat/discovery/NodeType.java b/src/main/java/io/cryostat/discovery/NodeType.java new file mode 100644 index 000000000..032bfcae2 --- /dev/null +++ b/src/main/java/io/cryostat/discovery/NodeType.java @@ -0,0 +1,128 @@ +package io.cryostat.discovery; + +import java.util.function.Function; + +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.client.KubernetesClient; + +public interface NodeType { + String getKind(); + + int ordinal(); +} + +enum BaseNodeType implements NodeType { + // represents the entire deployment scenario Cryostat finds itself in + UNIVERSE("Universe"), + // represents a division of the deployment scenario - the universe may consist of a + // Kubernetes Realm and a JDP Realm, for example + REALM("Realm"), + // represents a plain target JVM, connectable over JMX + JVM("JVM"), + // represents a target JVM using the Cryostat Agent, *not* connectable over JMX. Agent instances + // that do publish a JMX Service URL should publish themselves with the JVM NodeType. + AGENT("CryostatAgent"), + ; + + private final String kind; + + BaseNodeType(String kind) { + this.kind = kind; + } + + @Override + public String getKind() { + return kind; + } + + @Override + public String toString() { + return getKind(); + } +} + +enum KubeDiscoveryNodeType implements NodeType { + NAMESPACE("Namespace"), + STATEFULSET( + "StatefulSet", + c -> ns -> n -> c.apps().statefulSets().inNamespace(ns).withName(n).get()), + DAEMONSET("DaemonSet", c -> ns -> n -> c.apps().daemonSets().inNamespace(ns).withName(n).get()), + DEPLOYMENT( + "Deployment", c -> ns -> n -> c.apps().deployments().inNamespace(ns).withName(n).get()), + REPLICASET( + "ReplicaSet", c -> ns -> n -> c.apps().replicaSets().inNamespace(ns).withName(n).get()), + REPLICATIONCONTROLLER( + "ReplicationController", + c -> ns -> n -> c.replicationControllers().inNamespace(ns).withName(n).get()), + POD("Pod", c -> ns -> n -> c.pods().inNamespace(ns).withName(n).get()), + ENDPOINT("Endpoint", c -> ns -> n -> c.endpoints().inNamespace(ns).withName(n).get()), + // OpenShift resources + DEPLOYMENTCONFIG("DeploymentConfig"), + ; + + private final String kubernetesKind; + private final transient Function< + KubernetesClient, Function>> + getFn; + + KubeDiscoveryNodeType(String kubernetesKind) { + this(kubernetesKind, client -> namespace -> name -> null); + } + + KubeDiscoveryNodeType( + String kubernetesKind, + Function>> + getFn) { + this.kubernetesKind = kubernetesKind; + this.getFn = getFn; + } + + @Override + public String getKind() { + return kubernetesKind; + } + + public Function>> + getQueryFunction() { + return getFn; + } + + public static KubeDiscoveryNodeType fromKubernetesKind(String kubernetesKind) { + if (kubernetesKind == null) { + return null; + } + for (KubeDiscoveryNodeType nt : values()) { + if (kubernetesKind.equalsIgnoreCase(nt.kubernetesKind)) { + return nt; + } + } + return null; + } + + @Override + public String toString() { + return getKind(); + } +} + +enum ContainerDiscoveryNodeType implements NodeType { + // represents a container pod managed by Podman + POD("Pod"), + ; + + private final String kind; + + ContainerDiscoveryNodeType(String kind) { + this.kind = kind; + } + + @Override + public String getKind() { + return kind; + } + + @Override + public String toString() { + return getKind(); + } +} diff --git a/src/main/resources/application-dev.properties b/src/main/resources/application-dev.properties index 0467ceee7..d1f9feaf4 100644 --- a/src/main/resources/application-dev.properties +++ b/src/main/resources/application-dev.properties @@ -17,6 +17,9 @@ quarkus.log.category."org.jboss.resteasy.reactive.common.core.AbstractResteasyRe cryostat.discovery.jdp.enabled=true cryostat.discovery.podman.enabled=true cryostat.discovery.docker.enabled=true +cryostat.discovery.kubernetes.enabled=true +cryostat.k8s.namespaces= +kubernetes.service.host= quarkus.datasource.devservices.enabled=true quarkus.datasource.devservices.image-name=quay.io/cryostat/cryostat-db diff --git a/src/main/resources/application-test.properties b/src/main/resources/application-test.properties index 70b0d40ef..7a4bffafc 100644 --- a/src/main/resources/application-test.properties +++ b/src/main/resources/application-test.properties @@ -3,6 +3,9 @@ quarkus.smallrye-openapi.info-title=Cryostat API (test) cryostat.discovery.jdp.enabled=true cryostat.discovery.podman.enabled=true cryostat.discovery.docker.enabled=true +cryostat.discovery.kubernetes.enabled=true +cryostat.k8s.namespaces= +kubernetes.service.host= quarkus.test.env.JAVA_OPTS_APPEND=-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=9091 -Dcom.sun.management.jmxremote.rmi.port=9091 -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index f27a26dcd..015e70753 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -10,6 +10,7 @@ cryostat.discovery.plugins.jwt.secret.keysize=256 cryostat.discovery.plugins.jwt.signature.algorithm=HS256 cryostat.discovery.plugins.jwt.encryption.algorithm=dir cryostat.discovery.plugins.jwt.encryption.method=A256GCM +cryostat.discovery.kubernetes.enabled=false quarkus.test.integration-test-profile=test cryostat.connections.max-open=0 @@ -31,6 +32,8 @@ cryostat.http.proxy.tls-enabled=false cryostat.http.proxy.host=${quarkus.http.host} cryostat.http.proxy.port=${quarkus.http.port} cryostat.http.proxy.path=/ +cryostat.k8s.namespaces= +kubernetes.service.host= conf-dir=/opt/cryostat.d ssl.truststore=${conf-dir}/truststore.p12 From f28fe8515ea40e611fecedfce171303d6e1f3661 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Thu, 8 Feb 2024 02:29:24 -0800 Subject: [PATCH 02/30] chore(discovery): clean up unused codes and address reviews --- .../cryostat/discovery/KubeApiDiscovery.java | 507 ++++++------------ src/main/resources/application-dev.properties | 3 +- .../resources/application-test.properties | 4 +- src/main/resources/application.properties | 7 +- 4 files changed, 173 insertions(+), 348 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 3b08c5074..dde36b895 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -2,25 +2,30 @@ import java.nio.file.Path; import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; +import java.util.Objects; +import java.util.Set; import java.util.concurrent.ForkJoinPool; +import java.util.stream.Collectors; import io.cryostat.core.sys.FileSystem; +import io.cryostat.targets.Target.EventKind; import com.google.common.base.Optional; +import io.fabric8.kubernetes.api.model.EndpointAddress; +import io.fabric8.kubernetes.api.model.EndpointPort; +import io.fabric8.kubernetes.api.model.EndpointSubset; import io.fabric8.kubernetes.api.model.Endpoints; -import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.ObjectReference; -import io.fabric8.kubernetes.api.model.OwnerReference; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.KubernetesClientBuilder; import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; -import io.fabric8.openshift.api.model.Route; -import io.fabric8.openshift.client.OpenShiftClient; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; import jakarta.enterprise.context.ApplicationScoped; @@ -28,29 +33,12 @@ import jakarta.inject.Inject; import jakarta.transaction.Transactional; import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.concurrent.ConcurrentException; import org.apache.commons.lang3.concurrent.LazyInitializer; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.commons.lang3.tuple.Triple; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.logging.Logger; -@ApplicationScoped -class OpenShiftDiscovery extends KubeApiDiscovery { - @ConfigProperty(name = "cryostat.discovery.openshift.enabled") - boolean enabled; - - @Override - boolean available() { - return super.available() && client().supports(Route.class); - } - - @Override - OpenShiftClient client() { - return kubeConfig.withOpenShift(super.client()); - } -} - @ApplicationScoped public class KubeApiDiscovery { public static final String REALM = "KubernetesApi"; @@ -64,14 +52,11 @@ public class KubeApiDiscovery { @ConfigProperty(name = "cryostat.discovery.kubernetes.enabled") boolean enabled; - // private Integer memoHash; - // private EnvironmentNode memoTree; - // private final Lazy connectionToolkit; - // private final Logger logger; - private final Map, Pair> - discoveryNodeCache = new ConcurrentHashMap<>(); - // private final Map, Object> queryLocks = - // new ConcurrentHashMap<>(); + @ConfigProperty(name = "cryostat.discovery.k8s.port-names") + Optional> JmxPortNames; + + @ConfigProperty(name = "cryostat.discovery.k8s.port-numbers") + Optional> JmxPortNumbers; private final LazyInitializer>> nsInformers = new LazyInitializer>>() { @@ -89,18 +74,15 @@ protected HashMap> initialize() ns -> { result.put( ns, - kubeConfig - .kubeClient() - .endpoints() + client().endpoints() .inNamespace(ns) .inform( new EndpointsHandler(), ENDPOINTS_INFORMER_RESYNC_PERIOD)); - logger.info( - String.format( - "Started Endpoints SharedInformer for" - + " namespace \"{}\"", - ns)); + logger.infov( + "Started Endpoints SharedInformer for" + + " namespace \"{}\"", + ns); }); return result; } @@ -128,20 +110,17 @@ void onStart(@Observes StartupEvent evt) { universe.persist(); } - logger.info(String.format("Starting %s client", REALM)); + logger.infov("Starting %s client", REALM); - try { - nsInformers.get(); // trigger lazy init - } catch (ConcurrentException e) { - throw new IllegalStateException(e); - } + safeGetInformers(); // trigger lazy init } void onStop(@Observes ShutdownEvent evt) { if (!(enabled() && available())) { return; } - logger.info(String.format("Shutting down %s client", REALM)); + + logger.infov("Shutting down %s client", REALM); } boolean enabled() { @@ -162,58 +141,6 @@ KubernetesClient client() { return kubeConfig.kubeClient(); } - // @Override - // public List listDiscoverableServices() { - // try { - // return getAllServiceRefs(); - // } catch (Exception e) { - // logger.warn(e); - // return Collections.emptyList(); - // } - // } - - // @Override - // public EnvironmentNode getDiscoveryTree() { - // int currentHash = 0; - // HashCodeBuilder hcb = new HashCodeBuilder(); - // Map> informers = safeGetInformers(); - // for (var informer : informers.values()) { - // List store = informer.getStore().list(); - // hcb.append(store.hashCode()); - // } - // currentHash = hcb.build(); - // if (Objects.equals(memoHash, currentHash)) { - // logger.trace("Using memoized discovery tree"); - // return new EnvironmentNode(memoTree); - // } - // memoHash = currentHash; - // EnvironmentNode realmNode = - // new EnvironmentNode(REALM, BaseNodeType.REALM, Collections.emptyMap(), Set.of()); - // informers - // .entrySet() - // .forEach( - // entry -> { - // var namespace = entry.getKey(); - // var store = entry.getValue().getStore().list(); - // EnvironmentNode nsNode = - // new EnvironmentNode(namespace, KubernetesNodeType.NAMESPACE); - // try { - // store.stream() - // .flatMap(endpoints -> - // getTargetTuples(endpoints).stream()) - // .forEach(tuple -> buildOwnerChain(nsNode, tuple)); - // } catch (Exception e) { - // logger.warn(e); - // } finally { - // discoveryNodeCache.clear(); - // queryLocks.clear(); - // } - // realmNode.addChildNode(nsNode); - // }); - // memoTree = realmNode; - // return realmNode; - // } - private Map> safeGetInformers() { Map> informers; try { @@ -224,153 +151,34 @@ private Map> safeGetInformers() { return informers; } - // private void buildOwnerChain(EnvironmentNode nsNode, TargetTuple targetTuple) { - // ObjectReference target = targetTuple.addr.getTargetRef(); - // if (target == null) { - // logger.error( - // "Address {} for Endpoint {} had null target reference", - // targetTuple.addr.getIp() != null - // ? targetTuple.addr.getIp() - // : targetTuple.addr.getHostname(), - // targetTuple.objRef.getName()); - // return; - // } - // String targetKind = target.getKind(); - // KubernetesNodeType targetType = KubernetesNodeType.fromKubernetesKind(targetKind); - // if (targetType == KubernetesNodeType.POD) { - // // if the Endpoint points to a Pod, chase the owner chain up as far as possible, then - // // add that to the Namespace - - // Pair pod = - // discoveryNodeCache.computeIfAbsent( - // cacheKey(target.getNamespace(), target), this::queryForNode); - // pod.getRight() - // .addChildNode( - // new TargetNode( - // KubernetesNodeType.ENDPOINT, targetTuple.toServiceRef())); - - // Pair node = pod; - // while (true) { - // Pair owner = getOrCreateOwnerNode(node); - // if (owner == null) { - // break; - // } - // EnvironmentNode ownerNode = owner.getRight(); - // ownerNode.addChildNode(node.getRight()); - // node = owner; - // } - // nsNode.addChildNode(node.getRight()); - // } else { - // // if the Endpoint points to something else(?) than a Pod, just add the target - // straight - // // to the Namespace - // nsNode.addChildNode( - // new TargetNode(KubernetesNodeType.ENDPOINT, targetTuple.toServiceRef())); - // } - // } - - // private Pair getOrCreateOwnerNode( - // Pair child) { - // HasMetadata childRef = child.getLeft(); - // if (childRef == null) { - // logger.error( - // "Could not locate node named {} of kind {} while traversing environment", - // child.getRight().getName(), - // child.getRight().getNodeType()); - // return null; - // } - // List owners = childRef.getMetadata().getOwnerReferences(); - // // Take first "expected" owner Kind from NodeTypes, or if none, simply use the first - // owner. - // // If there are no owners then return null to signify this and break the chain - // if (owners.isEmpty()) { - // return null; - // } - // String namespace = childRef.getMetadata().getNamespace(); - // OwnerReference owner = - // owners.stream() - // .filter(o -> KubernetesNodeType.fromKubernetesKind(o.getKind()) != null) - // .findFirst() - // .orElse(owners.get(0)); - // return discoveryNodeCache.computeIfAbsent(cacheKey(namespace, owner), - // this::queryForNode); - // } - - private Triple cacheKey(String ns, OwnerReference resource) { - return Triple.of(ns, resource.getKind(), resource.getName()); + private boolean isCompatiblePort(EndpointPort port) { + return JmxPortNames.or(List.of()).contains(port.getName()) + || JmxPortNumbers.or(List.of()).contains(port.getPort()); + } + + private List getTargetRefsFrom(Endpoints endpoints) { + return TargetRef.fromEndpoints(endpoints).stream() + .filter( + (ref) -> { + return Objects.nonNull(ref) && isCompatiblePort(ref.port()); + }) + .collect(Collectors.toList()); } - // Unfortunately, ObjectReference and OwnerReference both independently implement getKind and - // getName - they don't come from a common base class. - private Triple cacheKey(String ns, ObjectReference resource) { - return Triple.of(ns, resource.getKind(), resource.getName()); + @Transactional + public void handleEndpointEvent(TargetRef ref, EventKind eventKind) { + // TODO: Handle endpoint event } - // private Pair queryForNode( - // Triple lookupKey) { - // String namespace = lookupKey.getLeft(); - // KubernetesNodeType nodeType = - // KubernetesNodeType.fromKubernetesKind(lookupKey.getMiddle()); - // String nodeName = lookupKey.getRight(); - // if (nodeType == null) { - // return null; - // } - // synchronized (queryLocks.computeIfAbsent(lookupKey, k -> new Object())) { - // EnvironmentNode node; - // HasMetadata kubeObj = - // - // nodeType.getQueryFunction().apply(k8sClient).apply(namespace).apply(nodeName); - // if (kubeObj != null) { - // node = new EnvironmentNode(nodeName, nodeType, - // kubeObj.getMetadata().getLabels()); - // } else { - // node = new EnvironmentNode(nodeName, nodeType); - // } - // return Pair.of(kubeObj, node); - // } - // } - - // private boolean isCompatiblePort(EndpointPort port) { - // return "jfr-jmx".equals(port.getName()) || 9091 == port.getPort(); - // } - - // private List getAllServiceRefs() { - // return safeGetInformers().values().stream() - // .flatMap(i -> i.getStore().list().stream()) - // .flatMap(endpoints -> getServiceRefs(endpoints).stream()) - // .collect(Collectors.toList()); - // } - - // private List getTargetTuples(Endpoints endpoints) { - // List tts = new ArrayList<>(); - // for (EndpointSubset subset : endpoints.getSubsets()) { - // for (EndpointPort port : subset.getPorts()) { - // if (!isCompatiblePort(port)) { - // continue; - // } - // for (EndpointAddress addr : subset.getAddresses()) { - // tts.add(new TargetTuple(addr.getTargetRef(), addr, port)); - // } - // } - // } - // return tts; - // } - - // private List getServiceRefs(Endpoints endpoints) { - // return getTargetTuples(endpoints).stream() - // .map(TargetTuple::toServiceRef) - // .filter(Objects::nonNull) - // .collect(Collectors.toList()); - // } - - class KubeConfig { + @ApplicationScoped + static final class KubeConfig { public static final String KUBERNETES_NAMESPACE_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"; @Inject Logger logger; @Inject FileSystem fs; - @ConfigProperty(name = "cryostat.k8s.namespaces") + @ConfigProperty(name = "cryostat.discovery.k8s.namespaces") Optional> watchNamespaces; @ConfigProperty(name = "kubernetes.service.host") @@ -404,126 +212,137 @@ KubernetesClient kubeClient() { } return kubeClient; } - - OpenShiftClient withOpenShift(KubernetesClient client) { - return client.adapt(OpenShiftClient.class); - } } private final class EndpointsHandler implements ResourceEventHandler { - @Override - public void onAdd(Endpoints obj) { - // TODO Auto-generated method stub - throw new UnsupportedOperationException("Unimplemented method 'onAdd'"); + public void onAdd(Endpoints endpoints) { + getTargetRefsFrom(endpoints) + .forEach( + (refs) -> { + handleEndpointEvent(refs, EventKind.FOUND); + }); } @Override - public void onUpdate(Endpoints oldObj, Endpoints newObj) { - // TODO Auto-generated method stub - throw new UnsupportedOperationException("Unimplemented method 'onUpdate'"); + public void onUpdate(Endpoints oldEndpoints, Endpoints newEndpoints) { + Set previousRefs = new HashSet<>(getTargetRefsFrom(oldEndpoints)); + Set currentRefs = new HashSet<>(getTargetRefsFrom(newEndpoints)); + + if (previousRefs.equals(currentRefs)) { + return; + } + + TargetRef.compare(previousRefs).to(currentRefs).updated().stream() + .forEach(ref -> handleEndpointEvent(ref, EventKind.MODIFIED)); + + TargetRef.compare(previousRefs).to(currentRefs).added().stream() + .forEach(ref -> handleEndpointEvent(ref, EventKind.FOUND)); + + TargetRef.compare(previousRefs).to(currentRefs).removed().stream() + .forEach(ref -> handleEndpointEvent(ref, EventKind.LOST)); } @Override - public void onDelete(Endpoints obj, boolean deletedFinalStateUnknown) { - // TODO Auto-generated method stub - throw new UnsupportedOperationException("Unimplemented method 'onDelete'"); + public void onDelete(Endpoints endpoints, boolean deletedFinalStateUnknown) { + if (deletedFinalStateUnknown) { + logger.warnv("Deleted final state unknown: {}", endpoints); + return; + } + getTargetRefsFrom(endpoints) + .forEach( + (tt) -> { + handleEndpointEvent(tt, EventKind.LOST); + }); } - // @Override - // public void onAdd(Endpoints endpoints) { - // getServiceRefs(endpoints) - // .forEach(serviceRef -> notifyAsyncTargetDiscovery(EventKind.FOUND, - // serviceRef)); - // } - - // @Override - // public void onUpdate(Endpoints oldEndpoints, Endpoints newEndpoints) { - // Set previousRefs = new HashSet<>(getServiceRefs(oldEndpoints)); - // Set currentRefs = new HashSet<>(getServiceRefs(newEndpoints)); - - // if (previousRefs.equals(currentRefs)) { - // return; - // } - - // ServiceRef.compare(previousRefs).to(currentRefs).updated().stream() - // .forEach(sr -> notifyAsyncTargetDiscovery(EventKind.MODIFIED, sr)); - - // ServiceRef.compare(previousRefs).to(currentRefs).added().stream() - // .forEach(sr -> notifyAsyncTargetDiscovery(EventKind.FOUND, sr)); - // ` - // ServiceRef.compare(previousRefs).to(currentRefs).removed().stream() - // .forEach(sr -> notifyAsyncTargetDiscovery(EventKind.LOST, sr)); - // } - - // @Override - // public void onDelete(Endpoints endpoints, boolean deletedFinalStateUnknown) { - // if (deletedFinalStateUnknown) { - // logger.warn("Deleted final state unknown: {}", endpoints); - // return; - // } - // getServiceRefs(endpoints) - // .forEach(serviceRef -> notifyAsyncTargetDiscovery(EventKind.LOST, - // serviceRef)); - // } } - // class TargetTuple { - // ObjectReference objRef; - // EndpointAddress addr; - // EndpointPort port; - - // TargetTuple(ObjectReference objRef, EndpointAddress addr, EndpointPort port) { - // this.objRef = objRef; - // this.addr = addr; - // this.port = port; - // } - - // Target toTarget() { - // Pair node = - // discoveryNodeCache.computeIfAbsent( - // cacheKey(objRef.getNamespace(), objRef), - // this::queryForNode); - // HasMetadata podRef = node.getLeft(); - // if (node.getRight().getNodeType() != KubernetesNodeType.POD) { - // throw new IllegalStateException(); - // } - // if (podRef == null) { - // throw new IllegalStateException(); - // } - // try { - // String targetName = objRef.getName(); - - // String ip = addr.getIp().replaceAll("\\.", "-"); - // String namespace = podRef.getMetadata().getNamespace(); - // String host = String.format("%s.%s.pod", ip, namespace); - - // JMXServiceURL jmxUrl = - // new JMXServiceURL( - // "rmi", - // "", - // 0, - // "/jndi/rmi://" + host + ':' + port.getPort() + "/jmxrmi"); - // ServiceRef serviceRef = - // new ServiceRef(null, URI.create(jmxUrl.toString()), targetName); - // serviceRef.setLabels(podRef.getMetadata().getLabels()); - // serviceRef.setPlatformAnnotations(podRef.getMetadata().getAnnotations()); - // serviceRef.setCryostatAnnotations( - // Map.of( - // AnnotationKey.REALM, - // REALM, - // AnnotationKey.HOST, - // addr.getIp(), - // AnnotationKey.PORT, - // Integer.toString(port.getPort()), - // AnnotationKey.NAMESPACE, - // addr.getTargetRef().getNamespace(), - // AnnotationKey.POD_NAME, - // addr.getTargetRef().getName())); - // return serviceRef; - // } catch (Exception e) { - // logger.warn(e); - // return null; - // } - // } - // } + static record TargetRef(ObjectReference objRef, EndpointAddress addr, EndpointPort port) { + TargetRef { + Objects.requireNonNull(objRef); + Objects.requireNonNull(addr); + Objects.requireNonNull(port); + } + + static List fromEndpoints(Endpoints endpoints) { + List tts = new ArrayList<>(); + for (EndpointSubset subset : endpoints.getSubsets()) { + for (EndpointPort port : subset.getPorts()) { + for (EndpointAddress addr : subset.getAddresses()) { + tts.add(new TargetRef(addr.getTargetRef(), addr, port)); + } + } + } + return tts; + } + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (other == this) { + return true; + } + if (!(other instanceof TargetRef)) { + return false; + } + TargetRef sr = (TargetRef) other; + return new EqualsBuilder() + .append(objRef().getName(), objRef().getName()) + .append(addr(), sr.addr()) + .append(port(), sr.port()) + .build(); + } + + public static Compare compare(Collection src) { + return new Compare(src); + } + + public static class Compare { + private Collection previous, current; + + public Compare(Collection previous) { + this.previous = new HashSet<>(previous); + } + + public Compare to(Collection current) { + this.current = new HashSet<>(current); + return this; + } + + public Collection added() { + return removeAllUpdatedRefs(addedOrUpdatedRefs(), updated()); + } + + public Collection removed() { + return removeAllUpdatedRefs(removedOrUpdatedRefs(), updated()); + } + + public Collection updated() { + Collection updated = addedOrUpdatedRefs(); + updated.removeAll(removedOrUpdatedRefs()); + return updated; + } + + private Collection addedOrUpdatedRefs() { + Collection added = new HashSet<>(current); + added.removeAll(previous); + return added; + } + + private Collection removedOrUpdatedRefs() { + Collection removed = new HashSet<>(previous); + removed.removeAll(current); + return removed; + } + + private Collection removeAllUpdatedRefs( + Collection src, Collection updated) { + Collection tnSet = new HashSet<>(src); + tnSet.removeAll(updated); + return tnSet; + } + } + } } diff --git a/src/main/resources/application-dev.properties b/src/main/resources/application-dev.properties index d1f9feaf4..a1e20551a 100644 --- a/src/main/resources/application-dev.properties +++ b/src/main/resources/application-dev.properties @@ -18,7 +18,8 @@ cryostat.discovery.jdp.enabled=true cryostat.discovery.podman.enabled=true cryostat.discovery.docker.enabled=true cryostat.discovery.kubernetes.enabled=true -cryostat.k8s.namespaces= + +cryostat.discovery.k8s.namespaces= kubernetes.service.host= quarkus.datasource.devservices.enabled=true diff --git a/src/main/resources/application-test.properties b/src/main/resources/application-test.properties index 7a4bffafc..753f240df 100644 --- a/src/main/resources/application-test.properties +++ b/src/main/resources/application-test.properties @@ -4,7 +4,9 @@ cryostat.discovery.jdp.enabled=true cryostat.discovery.podman.enabled=true cryostat.discovery.docker.enabled=true cryostat.discovery.kubernetes.enabled=true -cryostat.k8s.namespaces= +cryostat.discovery.k8s.port-names= +cryostat.discovery.k8s.port-numbers= +cryostat.discovery.k8s.namespaces= kubernetes.service.host= quarkus.test.env.JAVA_OPTS_APPEND=-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=9091 -Dcom.sun.management.jmxremote.rmi.port=9091 -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index 015e70753..c22379797 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -11,6 +11,11 @@ cryostat.discovery.plugins.jwt.signature.algorithm=HS256 cryostat.discovery.plugins.jwt.encryption.algorithm=dir cryostat.discovery.plugins.jwt.encryption.method=A256GCM cryostat.discovery.kubernetes.enabled=false +cryostat.discovery.k8s.port-names= +cryostat.discovery.k8s.port-numbers= +cryostat.discovery.k8s.namespaces= +kubernetes.service.host= + quarkus.test.integration-test-profile=test cryostat.connections.max-open=0 @@ -32,8 +37,6 @@ cryostat.http.proxy.tls-enabled=false cryostat.http.proxy.host=${quarkus.http.host} cryostat.http.proxy.port=${quarkus.http.port} cryostat.http.proxy.path=/ -cryostat.k8s.namespaces= -kubernetes.service.host= conf-dir=/opt/cryostat.d ssl.truststore=${conf-dir}/truststore.p12 From 0661f473040a659c624f1dda3585b0b4cc6aa7be Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Thu, 21 Mar 2024 19:05:08 -0700 Subject: [PATCH 03/30] build(deps): remove unused mvn deps --- pom.xml | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/pom.xml b/pom.xml index 3826a0c0d..169563d2d 100644 --- a/pom.xml +++ b/pom.xml @@ -218,11 +218,6 @@ ${com.google.java-format.version} provided - - io.fabric8 - openshift-client - ${io.fabric8.client.version} - io.fabric8 kubernetes-client @@ -260,19 +255,6 @@ junit-jupiter test - - io.fabric8 - kubernetes-server-mock - ${io.fabric8.client.version} - test - - - io.fabric8 - openshift-server-mock - ${io.fabric8.client.version} - test - - From ca24b07d83bc5fc1cda94dd27bcb450ddcc113ad Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Thu, 21 Mar 2024 22:07:53 -0700 Subject: [PATCH 04/30] feat(discovery): implement discovery logic for FOUND events --- .../cryostat/discovery/KubeApiDiscovery.java | 317 ++++++++++++++---- 1 file changed, 258 insertions(+), 59 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index dde36b895..54be9bf1b 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -1,5 +1,6 @@ package io.cryostat.discovery; +import java.net.URI; import java.nio.file.Path; import java.time.Duration; import java.util.ArrayList; @@ -13,7 +14,11 @@ import java.util.concurrent.ForkJoinPool; import java.util.stream.Collectors; +import javax.management.remote.JMXServiceURL; + import io.cryostat.core.sys.FileSystem; +import io.cryostat.targets.Target; +import io.cryostat.targets.Target.Annotations; import io.cryostat.targets.Target.EventKind; import com.google.common.base.Optional; @@ -21,7 +26,9 @@ import io.fabric8.kubernetes.api.model.EndpointPort; import io.fabric8.kubernetes.api.model.EndpointSubset; import io.fabric8.kubernetes.api.model.Endpoints; +import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.ObjectReference; +import io.fabric8.kubernetes.api.model.OwnerReference; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.KubernetesClientBuilder; import io.fabric8.kubernetes.client.informers.ResourceEventHandler; @@ -36,6 +43,7 @@ import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.concurrent.ConcurrentException; import org.apache.commons.lang3.concurrent.LazyInitializer; +import org.apache.commons.lang3.tuple.Pair; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.logging.Logger; @@ -63,10 +71,9 @@ public class KubeApiDiscovery { @Override protected HashMap> initialize() throws ConcurrentException { - // TODO add support for some wildcard indicating a single Informer for any + // TODO: add support for some wildcard indicating a single Informer for any // namespace that Cryostat has permissions to. This will need some restructuring - // of how the - // namespaces within the discovery tree are mapped. + // of how the namespaces within the discovery tree are mapped. var result = new HashMap>(); kubeConfig .getWatchNamespaces() @@ -156,18 +163,135 @@ private boolean isCompatiblePort(EndpointPort port) { || JmxPortNumbers.or(List.of()).contains(port.getPort()); } - private List getTargetRefsFrom(Endpoints endpoints) { - return TargetRef.fromEndpoints(endpoints).stream() + private List getTargetTuplesFrom(Endpoints endpoints) { + return tuplesFromEndpoints(endpoints).stream() .filter( (ref) -> { - return Objects.nonNull(ref) && isCompatiblePort(ref.port()); + return Objects.nonNull(ref) && isCompatiblePort(ref.port); }) .collect(Collectors.toList()); } @Transactional - public void handleEndpointEvent(TargetRef ref, EventKind eventKind) { - // TODO: Handle endpoint event + public void handleEndpointEvent(TargetTuple tuple, EventKind eventKind) { + DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); + switch (eventKind) { + case FOUND: + DiscoveryNode nsNode = + DiscoveryNode.environment( + tuple.objRef.getNamespace(), KubeDiscoveryNodeType.NAMESPACE); + if (!realm.children.contains(nsNode)) { + realm.children.add(nsNode); + realm.persist(); + } else { + nsNode = + DiscoveryNode.getChild( + realm, n -> tuple.objRef.getNamespace() == n.name) + .orElseThrow(); + } + buildOwnerChain(nsNode, tuple); + break; + case LOST: + break; + case MODIFIED: + break; + default: + } + } + + private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { + ObjectReference targetRef = targetTuple.addr.getTargetRef(); + if (targetRef == null) { + logger.errorv( + "Address {} for Endpoint {} had null target reference", + targetTuple.addr.getIp() != null + ? targetTuple.addr.getIp() + : targetTuple.addr.getHostname(), + targetTuple.objRef.getName()); + return; + } + String targetKind = targetRef.getKind(); + KubeDiscoveryNodeType targetType = KubeDiscoveryNodeType.fromKubernetesKind(targetKind); + Target target = targetTuple.toTarget(); + if (targetType == KubeDiscoveryNodeType.POD) { + // if the Endpoint points to a Pod, chase the owner chain up as far as possible, then + // add that to the Namespace + + Pair pod = + queryForNode( + targetRef.getNamespace(), targetRef.getName(), targetRef.getKind()); + pod.getRight() + .children + .add(DiscoveryNode.target(target, KubeDiscoveryNodeType.ENDPOINT)); + pod.getRight().persist(); + + Pair node = pod; + while (true) { + Pair owner = getOrCreateOwnerNode(node); + if (owner == null) { + break; + } + DiscoveryNode ownerNode = owner.getRight(); + ownerNode.children.add(node.getRight()); + ownerNode.persist(); + + node = owner; + } + + nsNode.children.add(node.getRight()); + } else { + // if the Endpoint points to something else(?) than a Pod, just add the target straight + // to the Namespace + nsNode.children.add(DiscoveryNode.target(target, KubeDiscoveryNodeType.ENDPOINT)); + } + target.persist(); + nsNode.persist(); + } + + private Pair getOrCreateOwnerNode( + Pair child) { + HasMetadata childRef = child.getLeft(); + if (childRef == null) { + logger.errorv( + "Could not locate node named {} of kind {} while traversing environment", + child.getRight().name, + child.getRight().nodeType); + return null; + } + List owners = childRef.getMetadata().getOwnerReferences(); + // Take first "expected" owner Kind from NodeTypes, or if none, simply use the first owner. + // If there are no owners then return null to signify this and break the chain + if (owners.isEmpty()) { + return null; + } + String namespace = childRef.getMetadata().getNamespace(); + OwnerReference owner = + owners.stream() + .filter(o -> KubeDiscoveryNodeType.fromKubernetesKind(o.getKind()) != null) + .findFirst() + .orElse(owners.get(0)); + Pair pair = + queryForNode(namespace, owner.getName(), owner.getKind()); + pair.getRight().persist(); + return pair; + } + + private Pair queryForNode( + String namespace, String name, String kind) { + KubeDiscoveryNodeType nodeType = KubeDiscoveryNodeType.fromKubernetesKind(kind); + if (nodeType == null) { + return null; + } + + HasMetadata kubeObj = + nodeType.getQueryFunction().apply(client()).apply(namespace).apply(name); + DiscoveryNode node = new DiscoveryNode(); + node.name = name; + node.nodeType = nodeType.getKind(); + node.labels = kubeObj != null ? kubeObj.getMetadata().getLabels() : new HashMap<>(); + node.children = new ArrayList<>(); + node.target = null; + return Pair.of(kubeObj, node); } @ApplicationScoped @@ -217,30 +341,30 @@ KubernetesClient kubeClient() { private final class EndpointsHandler implements ResourceEventHandler { @Override public void onAdd(Endpoints endpoints) { - getTargetRefsFrom(endpoints) + getTargetTuplesFrom(endpoints) .forEach( - (refs) -> { - handleEndpointEvent(refs, EventKind.FOUND); + (tuples) -> { + handleEndpointEvent(tuples, EventKind.FOUND); }); } @Override public void onUpdate(Endpoints oldEndpoints, Endpoints newEndpoints) { - Set previousRefs = new HashSet<>(getTargetRefsFrom(oldEndpoints)); - Set currentRefs = new HashSet<>(getTargetRefsFrom(newEndpoints)); + Set previousTuples = new HashSet<>(getTargetTuplesFrom(oldEndpoints)); + Set currentTuples = new HashSet<>(getTargetTuplesFrom(newEndpoints)); - if (previousRefs.equals(currentRefs)) { + if (previousTuples.equals(currentTuples)) { return; } - TargetRef.compare(previousRefs).to(currentRefs).updated().stream() - .forEach(ref -> handleEndpointEvent(ref, EventKind.MODIFIED)); + TargetTuple.compare(previousTuples).to(currentTuples).updated().stream() + .forEach(tuple -> handleEndpointEvent(tuple, EventKind.MODIFIED)); - TargetRef.compare(previousRefs).to(currentRefs).added().stream() - .forEach(ref -> handleEndpointEvent(ref, EventKind.FOUND)); + TargetTuple.compare(previousTuples).to(currentTuples).added().stream() + .forEach(tuple -> handleEndpointEvent(tuple, EventKind.FOUND)); - TargetRef.compare(previousRefs).to(currentRefs).removed().stream() - .forEach(ref -> handleEndpointEvent(ref, EventKind.LOST)); + TargetTuple.compare(previousTuples).to(currentTuples).removed().stream() + .forEach(tuple -> handleEndpointEvent(tuple, EventKind.LOST)); } @Override @@ -249,7 +373,7 @@ public void onDelete(Endpoints endpoints, boolean deletedFinalStateUnknown) { logger.warnv("Deleted final state unknown: {}", endpoints); return; } - getTargetRefsFrom(endpoints) + getTargetTuplesFrom(endpoints) .forEach( (tt) -> { handleEndpointEvent(tt, EventKind.LOST); @@ -257,23 +381,74 @@ public void onDelete(Endpoints endpoints, boolean deletedFinalStateUnknown) { } } - static record TargetRef(ObjectReference objRef, EndpointAddress addr, EndpointPort port) { - TargetRef { - Objects.requireNonNull(objRef); - Objects.requireNonNull(addr); - Objects.requireNonNull(port); + List tuplesFromEndpoints(Endpoints endpoints) { + List tts = new ArrayList<>(); + for (EndpointSubset subset : endpoints.getSubsets()) { + for (EndpointPort port : subset.getPorts()) { + for (EndpointAddress addr : subset.getAddresses()) { + tts.add(new TargetTuple(addr.getTargetRef(), addr, port)); + } + } } + return tts; + } - static List fromEndpoints(Endpoints endpoints) { - List tts = new ArrayList<>(); - for (EndpointSubset subset : endpoints.getSubsets()) { - for (EndpointPort port : subset.getPorts()) { - for (EndpointAddress addr : subset.getAddresses()) { - tts.add(new TargetRef(addr.getTargetRef(), addr, port)); - } - } + private class TargetTuple { + ObjectReference objRef; + EndpointAddress addr; + EndpointPort port; + + TargetTuple(ObjectReference objRef, EndpointAddress addr, EndpointPort port) { + this.objRef = objRef; + this.addr = addr; + this.port = port; + } + + public Target toTarget() { + Pair pair = + queryForNode(objRef.getNamespace(), objRef.getName(), objRef.getKind()); + HasMetadata obj = pair.getLeft(); + try { + String targetName = objRef.getName(); + + String ip = addr.getIp().replaceAll("\\.", "-"); + String namespace = obj.getMetadata().getNamespace(); + String host = String.format("%s.%s.pod", ip, namespace); + + JMXServiceURL jmxUrl = + new JMXServiceURL( + "rmi", + "", + 0, + "/jndi/rmi://" + host + ':' + port.getPort() + "/jmxrmi"); + + Target target = new Target(); + target.activeRecordings = new ArrayList<>(); + target.connectUrl = URI.create(jmxUrl.toString()); + target.alias = targetName; + target.labels = obj.getMetadata().getLabels(); + target.annotations = new Annotations(); + target.annotations.platform().putAll(obj.getMetadata().getAnnotations()); + target.annotations + .cryostat() + .putAll( + Map.of( + "REALM", // AnnotationKey.REALM, + REALM, + "HOST", // AnnotationKey.HOST, + addr.getIp(), + "PORT", // "AnnotationKey.PORT, + Integer.toString(port.getPort()), + "NAMESPACE", + objRef.getNamespace(), + "POD_NAME", + objRef.getName())); + + return target; + } catch (Exception e) { + logger.warn("Target conversion exception", e); + return null; } - return tts; } @Override @@ -284,65 +459,89 @@ public boolean equals(Object other) { if (other == this) { return true; } - if (!(other instanceof TargetRef)) { + if (!(other instanceof TargetTuple)) { return false; } - TargetRef sr = (TargetRef) other; + TargetTuple sr = (TargetTuple) other; return new EqualsBuilder() - .append(objRef().getName(), objRef().getName()) - .append(addr(), sr.addr()) - .append(port(), sr.port()) + .append(objRef.getApiVersion(), sr.objRef.getApiVersion()) + .append(objRef.getKind(), sr.objRef.getKind()) + .append(objRef.getNamespace(), sr.objRef.getNamespace()) + .append(objRef.getName(), sr.objRef.getName()) + .append(addr, sr.addr) + .append(port, sr.port) .build(); } - public static Compare compare(Collection src) { + public static Compare compare(Collection src) { return new Compare(src); } public static class Compare { - private Collection previous, current; + private Collection previous, current; - public Compare(Collection previous) { + public Compare(Collection previous) { this.previous = new HashSet<>(previous); } - public Compare to(Collection current) { + public Compare to(Collection current) { this.current = new HashSet<>(current); return this; } - public Collection added() { - return removeAllUpdatedRefs(addedOrUpdatedRefs(), updated()); + public Collection added() { + return removeAllUpdatedTuples(addedOrUpdatedTuples(), updated()); } - public Collection removed() { - return removeAllUpdatedRefs(removedOrUpdatedRefs(), updated()); + public Collection removed() { + return removeAllUpdatedTuples(removedOrUpdatedTuples(), updated()); } - public Collection updated() { - Collection updated = addedOrUpdatedRefs(); - updated.removeAll(removedOrUpdatedRefs()); + public Collection updated() { + Collection updated = addedOrUpdatedTuples(); + intersection(removedOrUpdatedTuples(), addedOrUpdatedTuples(), false) + .forEach((ref) -> updated.add(ref)); return updated; } - private Collection addedOrUpdatedRefs() { - Collection added = new HashSet<>(current); + private Collection addedOrUpdatedTuples() { + Collection added = new HashSet<>(current); added.removeAll(previous); return added; } - private Collection removedOrUpdatedRefs() { - Collection removed = new HashSet<>(previous); + private Collection removedOrUpdatedTuples() { + Collection removed = new HashSet<>(previous); removed.removeAll(current); return removed; } - private Collection removeAllUpdatedRefs( - Collection src, Collection updated) { - Collection tnSet = new HashSet<>(src); - tnSet.removeAll(updated); + private Collection removeAllUpdatedTuples( + Collection src, Collection updated) { + Collection tnSet = new HashSet<>(src); + intersection(src, updated, true).stream().forEach((ref) -> tnSet.remove(ref)); return tnSet; } + + private Collection intersection( + Collection src, Collection other, boolean keepOld) { + final Collection intersection = new HashSet<>(); + + // A tuple is considered as modified if its target reference is the same + for (TargetTuple srcTuple : src) { + for (TargetTuple otherTuple : other) { + if (Objects.equals( + srcTuple.objRef.getNamespace(), + otherTuple.objRef.getNamespace()) + && Objects.equals( + srcTuple.objRef.getName(), otherTuple.objRef.getName())) { + + intersection.add(keepOld ? srcTuple : otherTuple); + } + } + } + return intersection; + } } } } From d29a798b3859eb0ab65dad914168ef7642761459 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 26 Mar 2024 21:06:55 -0700 Subject: [PATCH 05/30] feat(discovery): clean up and implement LOST events --- .../io/cryostat/discovery/DiscoveryNode.java | 5 + .../cryostat/discovery/KubeApiDiscovery.java | 209 +++++++++++------- 2 files changed, 128 insertions(+), 86 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/DiscoveryNode.java b/src/main/java/io/cryostat/discovery/DiscoveryNode.java index 6c2b05072..10901025a 100644 --- a/src/main/java/io/cryostat/discovery/DiscoveryNode.java +++ b/src/main/java/io/cryostat/discovery/DiscoveryNode.java @@ -107,6 +107,11 @@ public static Optional getChild( return node.children.stream().filter(predicate).findFirst(); } + public static Optional getNode(Predicate predicate) { + List nodes = listAll(); + return nodes.stream().filter(predicate).findFirst(); + } + public static DiscoveryNode environment(String name, NodeType nodeType) { DiscoveryNode node = new DiscoveryNode(); node.name = name; diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 54be9bf1b..1f7751948 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -10,6 +10,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ForkJoinPool; import java.util.stream.Collectors; @@ -17,11 +18,11 @@ import javax.management.remote.JMXServiceURL; import io.cryostat.core.sys.FileSystem; +import io.cryostat.discovery.KubeApiDiscovery.KubeConfig; import io.cryostat.targets.Target; import io.cryostat.targets.Target.Annotations; import io.cryostat.targets.Target.EventKind; -import com.google.common.base.Optional; import io.fabric8.kubernetes.api.model.EndpointAddress; import io.fabric8.kubernetes.api.model.EndpointPort; import io.fabric8.kubernetes.api.model.EndpointSubset; @@ -159,8 +160,8 @@ private Map> safeGetInformers() { } private boolean isCompatiblePort(EndpointPort port) { - return JmxPortNames.or(List.of()).contains(port.getName()) - || JmxPortNumbers.or(List.of()).contains(port.getPort()); + return JmxPortNames.orElse(List.of()).contains(port.getName()) + || JmxPortNumbers.orElse(List.of()).contains(port.getPort()); } private List getTargetTuplesFrom(Endpoints endpoints) { @@ -175,33 +176,87 @@ private List getTargetTuplesFrom(Endpoints endpoints) { @Transactional public void handleEndpointEvent(TargetTuple tuple, EventKind eventKind) { DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); + DiscoveryNode nsNode = + DiscoveryNode.environment( + tuple.objRef.getNamespace(), KubeDiscoveryNodeType.NAMESPACE); + if (realm.children.contains(nsNode)) { + nsNode = + DiscoveryNode.getChild(realm, n -> tuple.objRef.getNamespace() == n.name) + .orElseThrow(); + } + switch (eventKind) { case FOUND: - DiscoveryNode nsNode = - DiscoveryNode.environment( - tuple.objRef.getNamespace(), KubeDiscoveryNodeType.NAMESPACE); + buildOwnerChain(nsNode, tuple); if (!realm.children.contains(nsNode)) { realm.children.add(nsNode); realm.persist(); - } else { - nsNode = - DiscoveryNode.getChild( - realm, n -> tuple.objRef.getNamespace() == n.name) - .orElseThrow(); } - buildOwnerChain(nsNode, tuple); break; case LOST: - break; - case MODIFIED: + pruneOwnerChain(nsNode, tuple); + if (nsNode.children.isEmpty()) { + realm.children.remove(nsNode); + realm.persist(); + } break; default: } } + private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { + ObjectReference TargetTuple = targetTuple.addr.getTargetRef(); + if (TargetTuple == null) { + logger.errorv( + "Address {} for Endpoint {} had null target reference", + targetTuple.addr.getIp() != null + ? targetTuple.addr.getIp() + : targetTuple.addr.getHostname(), + targetTuple.objRef.getName()); + return; + } + + String targetKind = TargetTuple.getKind(); + KubeDiscoveryNodeType targetType = KubeDiscoveryNodeType.fromKubernetesKind(targetKind); + + Target target = Target.getTargetByConnectUrl(targetTuple.toTarget().connectUrl); + target.delete(); + + DiscoveryNode targetNode = target.discoveryNode; + + if (targetType == KubeDiscoveryNodeType.POD) { + Pair pod = + queryForNode( + TargetTuple.getNamespace(), + TargetTuple.getName(), + TargetTuple.getKind()); + + pod.getRight().children.remove(targetNode); + pod.getRight().persist(); + + Pair node = pod; + while (true) { + Pair owner = getOwnerNode(node); + if (owner == null) { + break; + } + DiscoveryNode ownerNode = owner.getRight(); + if (node.getRight().children.isEmpty()) { + ownerNode.children.remove(node.getRight()); + ownerNode.persist(); + } + node = owner; + } + } else { + nsNode.children.remove(targetNode); + } + + nsNode.persist(); + } + private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { - ObjectReference targetRef = targetTuple.addr.getTargetRef(); - if (targetRef == null) { + ObjectReference TargetTuple = targetTuple.addr.getTargetRef(); + if (TargetTuple == null) { logger.errorv( "Address {} for Endpoint {} had null target reference", targetTuple.addr.getIp() != null @@ -210,24 +265,31 @@ private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { targetTuple.objRef.getName()); return; } - String targetKind = targetRef.getKind(); + + String targetKind = TargetTuple.getKind(); KubeDiscoveryNodeType targetType = KubeDiscoveryNodeType.fromKubernetesKind(targetKind); + Target target = targetTuple.toTarget(); + DiscoveryNode targetNode = DiscoveryNode.target(target, KubeDiscoveryNodeType.ENDPOINT); + target.discoveryNode = targetNode; + target.persist(); + if (targetType == KubeDiscoveryNodeType.POD) { // if the Endpoint points to a Pod, chase the owner chain up as far as possible, then // add that to the Namespace Pair pod = queryForNode( - targetRef.getNamespace(), targetRef.getName(), targetRef.getKind()); - pod.getRight() - .children - .add(DiscoveryNode.target(target, KubeDiscoveryNodeType.ENDPOINT)); + TargetTuple.getNamespace(), + TargetTuple.getName(), + TargetTuple.getKind()); + + pod.getRight().children.add(targetNode); pod.getRight().persist(); Pair node = pod; while (true) { - Pair owner = getOrCreateOwnerNode(node); + Pair owner = getOwnerNode(node); if (owner == null) { break; } @@ -242,14 +304,13 @@ private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { } else { // if the Endpoint points to something else(?) than a Pod, just add the target straight // to the Namespace - nsNode.children.add(DiscoveryNode.target(target, KubeDiscoveryNodeType.ENDPOINT)); + nsNode.children.add(targetNode); } - target.persist(); + nsNode.persist(); } - private Pair getOrCreateOwnerNode( - Pair child) { + private Pair getOwnerNode(Pair child) { HasMetadata childRef = child.getLeft(); if (childRef == null) { logger.errorv( @@ -270,10 +331,7 @@ private Pair getOrCreateOwnerNode( .filter(o -> KubeDiscoveryNodeType.fromKubernetesKind(o.getKind()) != null) .findFirst() .orElse(owners.get(0)); - Pair pair = - queryForNode(namespace, owner.getName(), owner.getKind()); - pair.getRight().persist(); - return pair; + return queryForNode(namespace, owner.getName(), owner.getKind()); } private Pair queryForNode( @@ -285,13 +343,31 @@ private Pair queryForNode( HasMetadata kubeObj = nodeType.getQueryFunction().apply(client()).apply(namespace).apply(name); - DiscoveryNode node = new DiscoveryNode(); - node.name = name; - node.nodeType = nodeType.getKind(); - node.labels = kubeObj != null ? kubeObj.getMetadata().getLabels() : new HashMap<>(); - node.children = new ArrayList<>(); - node.target = null; - return Pair.of(kubeObj, node); + + Optional node = + DiscoveryNode.getNode( + n -> { + return name.equals(n.name) + && namespace.equals(n.labels.get("cryostat.io/namespace")); + }); + + return Pair.of( + kubeObj, + node.orElseGet( + () -> { + DiscoveryNode newNode = new DiscoveryNode(); + newNode.name = name; + newNode.nodeType = nodeType.getKind(); + newNode.children = new ArrayList<>(); + newNode.target = null; + newNode.labels = + kubeObj != null + ? kubeObj.getMetadata().getLabels() + : new HashMap<>(); + // Add namespace to label to retrieve node later + newNode.labels.put("cryostat.io/namespace", namespace); + return newNode; + })); } @ApplicationScoped @@ -311,7 +387,7 @@ static final class KubeConfig { private KubernetesClient kubeClient; List getWatchNamespaces() { - return watchNamespaces.or(List.of()); + return watchNamespaces.orElse(List.of()); } String getOwnNamespace() { @@ -357,9 +433,6 @@ public void onUpdate(Endpoints oldEndpoints, Endpoints newEndpoints) { return; } - TargetTuple.compare(previousTuples).to(currentTuples).updated().stream() - .forEach(tuple -> handleEndpointEvent(tuple, EventKind.MODIFIED)); - TargetTuple.compare(previousTuples).to(currentTuples).added().stream() .forEach(tuple -> handleEndpointEvent(tuple, EventKind.FOUND)); @@ -413,7 +486,13 @@ public Target toTarget() { String ip = addr.getIp().replaceAll("\\.", "-"); String namespace = obj.getMetadata().getNamespace(); - String host = String.format("%s.%s.pod", ip, namespace); + + boolean isPod = obj.getKind() == KubeDiscoveryNodeType.POD.getKind(); + + String host = String.format("%s.%s", ip, namespace); + if (isPod) { + host = String.format("%s.pod", host); + } JMXServiceURL jmxUrl = new JMXServiceURL( @@ -441,7 +520,7 @@ public Target toTarget() { Integer.toString(port.getPort()), "NAMESPACE", objRef.getNamespace(), - "POD_NAME", + isPod ? "POD_NAME" : "OBJECT_NAME", objRef.getName())); return target; @@ -490,58 +569,16 @@ public Compare to(Collection current) { } public Collection added() { - return removeAllUpdatedTuples(addedOrUpdatedTuples(), updated()); - } - - public Collection removed() { - return removeAllUpdatedTuples(removedOrUpdatedTuples(), updated()); - } - - public Collection updated() { - Collection updated = addedOrUpdatedTuples(); - intersection(removedOrUpdatedTuples(), addedOrUpdatedTuples(), false) - .forEach((ref) -> updated.add(ref)); - return updated; - } - - private Collection addedOrUpdatedTuples() { Collection added = new HashSet<>(current); added.removeAll(previous); return added; } - private Collection removedOrUpdatedTuples() { + public Collection removed() { Collection removed = new HashSet<>(previous); removed.removeAll(current); return removed; } - - private Collection removeAllUpdatedTuples( - Collection src, Collection updated) { - Collection tnSet = new HashSet<>(src); - intersection(src, updated, true).stream().forEach((ref) -> tnSet.remove(ref)); - return tnSet; - } - - private Collection intersection( - Collection src, Collection other, boolean keepOld) { - final Collection intersection = new HashSet<>(); - - // A tuple is considered as modified if its target reference is the same - for (TargetTuple srcTuple : src) { - for (TargetTuple otherTuple : other) { - if (Objects.equals( - srcTuple.objRef.getNamespace(), - otherTuple.objRef.getNamespace()) - && Objects.equals( - srcTuple.objRef.getName(), otherTuple.objRef.getName())) { - - intersection.add(keepOld ? srcTuple : otherTuple); - } - } - } - return intersection; - } } } } From f262aa8ba1d9dc0e7e35cea3360ddc75c637b24a Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 26 Mar 2024 21:30:04 -0700 Subject: [PATCH 06/30] chore(typo): fix typo --- .../java/io/cryostat/discovery/KubeApiDiscovery.java | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 1f7751948..2fb095435 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -255,8 +255,8 @@ private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { } private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { - ObjectReference TargetTuple = targetTuple.addr.getTargetRef(); - if (TargetTuple == null) { + ObjectReference targetRef = targetTuple.addr.getTargetRef(); + if (targetRef == null) { logger.errorv( "Address {} for Endpoint {} had null target reference", targetTuple.addr.getIp() != null @@ -266,7 +266,7 @@ private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { return; } - String targetKind = TargetTuple.getKind(); + String targetKind = targetRef.getKind(); KubeDiscoveryNodeType targetType = KubeDiscoveryNodeType.fromKubernetesKind(targetKind); Target target = targetTuple.toTarget(); @@ -280,9 +280,7 @@ private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { Pair pod = queryForNode( - TargetTuple.getNamespace(), - TargetTuple.getName(), - TargetTuple.getKind()); + targetRef.getNamespace(), targetRef.getName(), targetRef.getKind()); pod.getRight().children.add(targetNode); pod.getRight().persist(); From b7a68a72317dcab86a2f0489cfecabab06a18e72 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 26 Mar 2024 21:32:51 -0700 Subject: [PATCH 07/30] chore(license): add missing license headers --- .../io/cryostat/discovery/KubeApiDiscovery.java | 15 +++++++++++++++ src/main/java/io/cryostat/discovery/NodeType.java | 15 +++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 2fb095435..68c067024 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -1,3 +1,18 @@ +/* + * Copyright The Cryostat Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package io.cryostat.discovery; import java.net.URI; diff --git a/src/main/java/io/cryostat/discovery/NodeType.java b/src/main/java/io/cryostat/discovery/NodeType.java index 032bfcae2..611cbfa11 100644 --- a/src/main/java/io/cryostat/discovery/NodeType.java +++ b/src/main/java/io/cryostat/discovery/NodeType.java @@ -1,3 +1,18 @@ +/* + * Copyright The Cryostat Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package io.cryostat.discovery; import java.util.function.Function; From aba6f8aa8a28aa87af99f597a12261834b9d29c4 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 26 Mar 2024 21:49:05 -0700 Subject: [PATCH 08/30] fix(orm): fix unwrap exception when using enum --- src/main/java/io/cryostat/discovery/DiscoveryNode.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/io/cryostat/discovery/DiscoveryNode.java b/src/main/java/io/cryostat/discovery/DiscoveryNode.java index 10901025a..35253e50f 100644 --- a/src/main/java/io/cryostat/discovery/DiscoveryNode.java +++ b/src/main/java/io/cryostat/discovery/DiscoveryNode.java @@ -92,7 +92,7 @@ public int hashCode() { } public static DiscoveryNode getUniverse() { - return DiscoveryNode.find(NODE_TYPE, BaseNodeType.UNIVERSE) + return DiscoveryNode.find(NODE_TYPE, BaseNodeType.UNIVERSE.getKind()) .singleResultOptional() .orElseGet( () -> environment(BaseNodeType.UNIVERSE.toString(), BaseNodeType.UNIVERSE)); From 0f63fa3f69ec8cf2aff414b7fac0928c8a47ac90 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 26 Mar 2024 22:47:01 -0700 Subject: [PATCH 09/30] chore(logs): fix broken logs format --- .../cryostat/discovery/KubeApiDiscovery.java | 22 +++++++++---------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 68c067024..34854e8f5 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -104,7 +104,7 @@ protected HashMap> initialize() ENDPOINTS_INFORMER_RESYNC_PERIOD)); logger.infov( "Started Endpoints SharedInformer for" - + " namespace \"{}\"", + + " namespace \"{0}\"", ns); }); return result; @@ -133,7 +133,7 @@ void onStart(@Observes StartupEvent evt) { universe.persist(); } - logger.infov("Starting %s client", REALM); + logger.infov("Starting {0} client", REALM); safeGetInformers(); // trigger lazy init } @@ -143,7 +143,7 @@ void onStop(@Observes ShutdownEvent evt) { return; } - logger.infov("Shutting down %s client", REALM); + logger.infov("Shutting down {0} client", REALM); } boolean enabled() { @@ -223,7 +223,7 @@ private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { ObjectReference TargetTuple = targetTuple.addr.getTargetRef(); if (TargetTuple == null) { logger.errorv( - "Address {} for Endpoint {} had null target reference", + "Address {0} for Endpoint {1} had null target reference", targetTuple.addr.getIp() != null ? targetTuple.addr.getIp() : targetTuple.addr.getHostname(), @@ -235,7 +235,6 @@ private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { KubeDiscoveryNodeType targetType = KubeDiscoveryNodeType.fromKubernetesKind(targetKind); Target target = Target.getTargetByConnectUrl(targetTuple.toTarget().connectUrl); - target.delete(); DiscoveryNode targetNode = target.discoveryNode; @@ -265,7 +264,7 @@ private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { } else { nsNode.children.remove(targetNode); } - + target.delete(); nsNode.persist(); } @@ -273,7 +272,7 @@ private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { ObjectReference targetRef = targetTuple.addr.getTargetRef(); if (targetRef == null) { logger.errorv( - "Address {} for Endpoint {} had null target reference", + "Address {0} for Endpoint {1} had null target reference", targetTuple.addr.getIp() != null ? targetTuple.addr.getIp() : targetTuple.addr.getHostname(), @@ -327,9 +326,8 @@ private Pair getOwnerNode(Pair owners = childRef.getMetadata().getOwnerReferences(); @@ -456,7 +454,7 @@ public void onUpdate(Endpoints oldEndpoints, Endpoints newEndpoints) { @Override public void onDelete(Endpoints endpoints, boolean deletedFinalStateUnknown) { if (deletedFinalStateUnknown) { - logger.warnv("Deleted final state unknown: {}", endpoints); + logger.warnv("Deleted final state unknown: {0}", endpoints); return; } getTargetTuplesFrom(endpoints) @@ -500,7 +498,7 @@ public Target toTarget() { String ip = addr.getIp().replaceAll("\\.", "-"); String namespace = obj.getMetadata().getNamespace(); - boolean isPod = obj.getKind() == KubeDiscoveryNodeType.POD.getKind(); + boolean isPod = obj.getKind().equals(KubeDiscoveryNodeType.POD.getKind()); String host = String.format("%s.%s", ip, namespace); if (isPod) { From e678a3ce95633b8d8b9e02d71e53d0ca0efb52bd Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Wed, 27 Mar 2024 00:50:38 -0700 Subject: [PATCH 10/30] fix(discovery): ignore when target does not exist in db --- .../cryostat/discovery/KubeApiDiscovery.java | 26 ++++++++++++------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 34854e8f5..c7b3b674d 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -54,6 +54,7 @@ import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; +import jakarta.persistence.NoResultException; import jakarta.transaction.Transactional; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.EqualsBuilder; @@ -216,6 +217,8 @@ public void handleEndpointEvent(TargetTuple tuple, EventKind eventKind) { } break; default: + logger.warnv("Unknown discovery event {0}", eventKind); + break; } } @@ -234,7 +237,12 @@ private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { String targetKind = TargetTuple.getKind(); KubeDiscoveryNodeType targetType = KubeDiscoveryNodeType.fromKubernetesKind(targetKind); - Target target = Target.getTargetByConnectUrl(targetTuple.toTarget().connectUrl); + Target target; + try { + target = Target.getTargetByConnectUrl(targetTuple.toTarget().connectUrl); + } catch (NoResultException e) { + return; + } DiscoveryNode targetNode = target.discoveryNode; @@ -444,11 +452,11 @@ public void onUpdate(Endpoints oldEndpoints, Endpoints newEndpoints) { return; } - TargetTuple.compare(previousTuples).to(currentTuples).added().stream() - .forEach(tuple -> handleEndpointEvent(tuple, EventKind.FOUND)); - TargetTuple.compare(previousTuples).to(currentTuples).removed().stream() .forEach(tuple -> handleEndpointEvent(tuple, EventKind.LOST)); + + TargetTuple.compare(previousTuples).to(currentTuples).added().stream() + .forEach(tuple -> handleEndpointEvent(tuple, EventKind.FOUND)); } @Override @@ -523,11 +531,11 @@ public Target toTarget() { .cryostat() .putAll( Map.of( - "REALM", // AnnotationKey.REALM, + "REALM", REALM, - "HOST", // AnnotationKey.HOST, + "HOST", addr.getIp(), - "PORT", // "AnnotationKey.PORT, + "PORT", Integer.toString(port.getPort()), "NAMESPACE", objRef.getNamespace(), @@ -558,8 +566,8 @@ public boolean equals(Object other) { .append(objRef.getKind(), sr.objRef.getKind()) .append(objRef.getNamespace(), sr.objRef.getNamespace()) .append(objRef.getName(), sr.objRef.getName()) - .append(addr, sr.addr) - .append(port, sr.port) + .append(addr.getIp(), sr.addr.getIp()) + .append(port.getPort(), sr.port.getPort()) .build(); } From 06ee05b818f90469cd90bc29c20fe2beb6dfd633 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 2 Apr 2024 13:51:55 -0700 Subject: [PATCH 11/30] fix(discovery): fix DELETE event handler --- .../cryostat/discovery/KubeApiDiscovery.java | 39 ++++++------------- 1 file changed, 12 insertions(+), 27 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index c7b3b674d..0c6e67bf5 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -223,8 +223,8 @@ public void handleEndpointEvent(TargetTuple tuple, EventKind eventKind) { } private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { - ObjectReference TargetTuple = targetTuple.addr.getTargetRef(); - if (TargetTuple == null) { + ObjectReference targetRef = targetTuple.addr.getTargetRef(); + if (targetRef == null) { logger.errorv( "Address {0} for Endpoint {1} had null target reference", targetTuple.addr.getIp() != null @@ -234,29 +234,11 @@ private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { return; } - String targetKind = TargetTuple.getKind(); - KubeDiscoveryNodeType targetType = KubeDiscoveryNodeType.fromKubernetesKind(targetKind); - - Target target; try { - target = Target.getTargetByConnectUrl(targetTuple.toTarget().connectUrl); - } catch (NoResultException e) { - return; - } - - DiscoveryNode targetNode = target.discoveryNode; + Target target = Target.getTargetByConnectUrl(targetTuple.toTarget().connectUrl); + DiscoveryNode targetNode = target.discoveryNode; - if (targetType == KubeDiscoveryNodeType.POD) { - Pair pod = - queryForNode( - TargetTuple.getNamespace(), - TargetTuple.getName(), - TargetTuple.getKind()); - - pod.getRight().children.remove(targetNode); - pod.getRight().persist(); - - Pair node = pod; + Pair node = Pair.of(null, targetNode); while (true) { Pair owner = getOwnerNode(node); if (owner == null) { @@ -269,11 +251,14 @@ private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { } node = owner; } - } else { - nsNode.children.remove(targetNode); + + target.delete(); + nsNode.persist(); + } catch (NoResultException e) { + logger.infov( + "Target with service URL {0} does not exist", + targetTuple.toTarget().connectUrl); } - target.delete(); - nsNode.persist(); } private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { From 0adbc41e5ed315a9ab1360702d1a3ee9fe3afdf1 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 2 Apr 2024 13:52:58 -0700 Subject: [PATCH 12/30] chore(spotless): add missing spotless fix --- src/main/java/io/cryostat/discovery/KubeApiDiscovery.java | 1 - 1 file changed, 1 deletion(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 0c6e67bf5..be7528053 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -33,7 +33,6 @@ import javax.management.remote.JMXServiceURL; import io.cryostat.core.sys.FileSystem; -import io.cryostat.discovery.KubeApiDiscovery.KubeConfig; import io.cryostat.targets.Target; import io.cryostat.targets.Target.Annotations; import io.cryostat.targets.Target.EventKind; From 010909ea44a742d5e30adf3ce371acbd6fbefb77 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Wed, 3 Apr 2024 19:59:16 -0700 Subject: [PATCH 13/30] chore(k8s): correct labels --- src/main/java/io/cryostat/discovery/KubeApiDiscovery.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index be7528053..849843840 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -351,7 +351,7 @@ private Pair queryForNode( DiscoveryNode.getNode( n -> { return name.equals(n.name) - && namespace.equals(n.labels.get("cryostat.io/namespace")); + && namespace.equals(n.labels.get("io.cryostat/namespace")); }); return Pair.of( @@ -368,7 +368,7 @@ private Pair queryForNode( ? kubeObj.getMetadata().getLabels() : new HashMap<>(); // Add namespace to label to retrieve node later - newNode.labels.put("cryostat.io/namespace", namespace); + newNode.labels.put("io.cryostat/namespace", namespace); return newNode; })); } From 8ff1804ceafdbb62b48f2b253cf964813c2432e7 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 9 Apr 2024 04:37:43 -0700 Subject: [PATCH 14/30] fixup(k8s): add missing changes for watch namespaces from Andrew --- .../cryostat/discovery/KubeApiDiscovery.java | 25 ++++++++++++++----- 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 849843840..adfa76006 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -42,6 +42,7 @@ import io.fabric8.kubernetes.api.model.EndpointSubset; import io.fabric8.kubernetes.api.model.Endpoints; import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.api.model.ObjectReference; import io.fabric8.kubernetes.api.model.OwnerReference; import io.fabric8.kubernetes.client.KubernetesClient; @@ -378,6 +379,8 @@ static final class KubeConfig { public static final String KUBERNETES_NAMESPACE_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/namespace"; + private static final String OWN_NAMESPACE = "."; + @Inject Logger logger; @Inject FileSystem fs; @@ -389,8 +392,17 @@ static final class KubeConfig { private KubernetesClient kubeClient; - List getWatchNamespaces() { - return watchNamespaces.orElse(List.of()); + Collection getWatchNamespaces() { + return watchNamespaces.orElse(List.of()).stream() + .map( + n -> { + if (OWN_NAMESPACE.equals(n)) { + return getOwnNamespace(); + } + return n; + }) + .filter(Objects::nonNull) + .collect(Collectors.toSet()); } String getOwnNamespace() { @@ -485,10 +497,11 @@ public Target toTarget() { queryForNode(objRef.getNamespace(), objRef.getName(), objRef.getKind()); HasMetadata obj = pair.getLeft(); try { - String targetName = objRef.getName(); + ObjectMeta metadata = obj.getMetadata(); + String targetName = metadata.getName(); String ip = addr.getIp().replaceAll("\\.", "-"); - String namespace = obj.getMetadata().getNamespace(); + String namespace = metadata.getNamespace(); boolean isPod = obj.getKind().equals(KubeDiscoveryNodeType.POD.getKind()); @@ -508,9 +521,9 @@ public Target toTarget() { target.activeRecordings = new ArrayList<>(); target.connectUrl = URI.create(jmxUrl.toString()); target.alias = targetName; - target.labels = obj.getMetadata().getLabels(); + target.labels = metadata.getLabels(); target.annotations = new Annotations(); - target.annotations.platform().putAll(obj.getMetadata().getAnnotations()); + target.annotations.platform().putAll(metadata.getAnnotations()); target.annotations .cryostat() .putAll( From 2fc59af444c3255201f780487581a43fc19a8584 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 9 Apr 2024 05:27:41 -0700 Subject: [PATCH 15/30] fix(k8s): fix string comparison --- src/main/java/io/cryostat/discovery/KubeApiDiscovery.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index adfa76006..27cacb990 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -197,7 +197,7 @@ public void handleEndpointEvent(TargetTuple tuple, EventKind eventKind) { tuple.objRef.getNamespace(), KubeDiscoveryNodeType.NAMESPACE); if (realm.children.contains(nsNode)) { nsNode = - DiscoveryNode.getChild(realm, n -> tuple.objRef.getNamespace() == n.name) + DiscoveryNode.getChild(realm, n -> tuple.objRef.getNamespace().equals(n.name)) .orElseThrow(); } From e3fc083590b2102f965826e7f76b2bbd482ef99c Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 9 Apr 2024 06:22:03 -0700 Subject: [PATCH 16/30] fix(k8s): fix duplicate namespace nodes --- .../java/io/cryostat/discovery/KubeApiDiscovery.java | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 27cacb990..92e6695c3 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -193,13 +193,11 @@ private List getTargetTuplesFrom(Endpoints endpoints) { public void handleEndpointEvent(TargetTuple tuple, EventKind eventKind) { DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); DiscoveryNode nsNode = - DiscoveryNode.environment( - tuple.objRef.getNamespace(), KubeDiscoveryNodeType.NAMESPACE); - if (realm.children.contains(nsNode)) { - nsNode = - DiscoveryNode.getChild(realm, n -> tuple.objRef.getNamespace().equals(n.name)) - .orElseThrow(); - } + DiscoveryNode.getChild(realm, n -> tuple.objRef.getNamespace().equals(n.name)) + .orElse( + DiscoveryNode.environment( + tuple.objRef.getNamespace(), + KubeDiscoveryNodeType.NAMESPACE)); switch (eventKind) { case FOUND: From 8746ff59e3423e63fcd05f03d318a5babaab3016 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 9 Apr 2024 15:49:40 -0700 Subject: [PATCH 17/30] fix(k8s): fix realm node not found and deletion constraint violation --- .../io/cryostat/discovery/DiscoveryNode.java | 4 + .../cryostat/discovery/KubeApiDiscovery.java | 167 ++++++++++-------- 2 files changed, 93 insertions(+), 78 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/DiscoveryNode.java b/src/main/java/io/cryostat/discovery/DiscoveryNode.java index 35253e50f..1a8942761 100644 --- a/src/main/java/io/cryostat/discovery/DiscoveryNode.java +++ b/src/main/java/io/cryostat/discovery/DiscoveryNode.java @@ -91,6 +91,10 @@ public int hashCode() { return Objects.hash(id, name, nodeType, labels, children, target); } + public boolean hasChildren() { + return !children.isEmpty(); + } + public static DiscoveryNode getUniverse() { return DiscoveryNode.find(NODE_TYPE, BaseNodeType.UNIVERSE.getKind()) .singleResultOptional() diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 92e6695c3..3339a772b 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -42,7 +42,6 @@ import io.fabric8.kubernetes.api.model.EndpointSubset; import io.fabric8.kubernetes.api.model.Endpoints; import io.fabric8.kubernetes.api.model.HasMetadata; -import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.api.model.ObjectReference; import io.fabric8.kubernetes.api.model.OwnerReference; import io.fabric8.kubernetes.client.KubernetesClient; @@ -51,6 +50,7 @@ import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; +import jakarta.annotation.Priority; import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; import jakarta.inject.Inject; @@ -70,6 +70,8 @@ public class KubeApiDiscovery { public static final long ENDPOINTS_INFORMER_RESYNC_PERIOD = Duration.ofSeconds(30).toMillis(); + public static final String NAMESPACE_LABEL_KEY = "io.cryostat/namespace"; + @Inject Logger logger; @Inject KubeConfig kubeConfig; @@ -113,7 +115,7 @@ protected HashMap> initialize() }; @Transactional - void onStart(@Observes StartupEvent evt) { + void onStart(@Observes @Priority(1) StartupEvent evt) { if (!(enabled())) { return; } @@ -135,7 +137,9 @@ void onStart(@Observes StartupEvent evt) { } logger.infov("Starting {0} client", REALM); + } + void onAfterStart(@Observes StartupEvent evt) { safeGetInformers(); // trigger lazy init } @@ -145,6 +149,13 @@ void onStop(@Observes ShutdownEvent evt) { } logger.infov("Shutting down {0} client", REALM); + safeGetInformers() + .forEach( + (ns, informer) -> { + informer.close(); + logger.infov( + "Closed Endpoints SharedInformer for namespace \"{0}\"", ns); + }); } boolean enabled() { @@ -165,16 +176,6 @@ KubernetesClient client() { return kubeConfig.kubeClient(); } - private Map> safeGetInformers() { - Map> informers; - try { - informers = nsInformers.get(); - } catch (ConcurrentException e) { - throw new IllegalStateException(e); - } - return informers; - } - private boolean isCompatiblePort(EndpointPort port) { return JmxPortNames.orElse(List.of()).contains(port.getName()) || JmxPortNumbers.orElse(List.of()).contains(port.getPort()); @@ -189,6 +190,16 @@ private List getTargetTuplesFrom(Endpoints endpoints) { .collect(Collectors.toList()); } + private Map> safeGetInformers() { + Map> informers; + try { + informers = nsInformers.get(); + } catch (ConcurrentException e) { + throw new IllegalStateException(e); + } + return informers; + } + @Transactional public void handleEndpointEvent(TargetTuple tuple, EventKind eventKind) { DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); @@ -199,29 +210,26 @@ public void handleEndpointEvent(TargetTuple tuple, EventKind eventKind) { tuple.objRef.getNamespace(), KubeDiscoveryNodeType.NAMESPACE)); - switch (eventKind) { - case FOUND: + try { + if (eventKind == EventKind.FOUND) { buildOwnerChain(nsNode, tuple); if (!realm.children.contains(nsNode)) { realm.children.add(nsNode); - realm.persist(); } - break; - case LOST: + } else if (eventKind == EventKind.LOST) { pruneOwnerChain(nsNode, tuple); - if (nsNode.children.isEmpty()) { + if (!nsNode.hasChildren()) { realm.children.remove(nsNode); - realm.persist(); } - break; - default: - logger.warnv("Unknown discovery event {0}", eventKind); - break; + } + realm.persist(); + } catch (Exception e) { + logger.warn("Endpoint handler exception", e); } } private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { - ObjectReference targetRef = targetTuple.addr.getTargetRef(); + ObjectReference targetRef = targetTuple.objRef; if (targetRef == null) { logger.errorv( "Address {0} for Endpoint {1} had null target reference", @@ -233,34 +241,40 @@ private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { } try { - Target target = Target.getTargetByConnectUrl(targetTuple.toTarget().connectUrl); - DiscoveryNode targetNode = target.discoveryNode; - - Pair node = Pair.of(null, targetNode); - while (true) { - Pair owner = getOwnerNode(node); - if (owner == null) { - break; - } - DiscoveryNode ownerNode = owner.getRight(); - if (node.getRight().children.isEmpty()) { - ownerNode.children.remove(node.getRight()); - ownerNode.persist(); + Target t = Target.getTargetByConnectUrl(targetTuple.toTarget().connectUrl); + DiscoveryNode targetNode = t.discoveryNode; + + if (nsNode.children.contains(targetNode)) { + nsNode.children.remove(targetNode); + } else { + Pair pod = + queryForNode( + targetRef.getNamespace(), targetRef.getName(), targetRef.getKind()); + pod.getRight().children.remove(targetNode); + pod.getRight().persist(); + + Pair node = pod; + while (true) { + Pair owner = getOwnerNode(node); + if (owner == null) { + break; + } + DiscoveryNode ownerNode = owner.getRight(); + if (!node.getRight().hasChildren()) { + ownerNode.children.remove(pod.getRight()); + ownerNode.persist(); + } + node = owner; } - node = owner; } - - target.delete(); nsNode.persist(); + t.delete(); } catch (NoResultException e) { - logger.infov( - "Target with service URL {0} does not exist", - targetTuple.toTarget().connectUrl); } } private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { - ObjectReference targetRef = targetTuple.addr.getTargetRef(); + ObjectReference targetRef = targetTuple.objRef; if (targetRef == null) { logger.errorv( "Address {0} for Endpoint {1} had null target reference", @@ -316,9 +330,6 @@ private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { private Pair getOwnerNode(Pair child) { HasMetadata childRef = child.getLeft(); if (childRef == null) { - logger.errorv( - "Could not locate node named {0} of kind {1} while traversing environment", - child.getRight().name, child.getRight().nodeType); return null; } List owners = childRef.getMetadata().getOwnerReferences(); @@ -346,30 +357,29 @@ private Pair queryForNode( HasMetadata kubeObj = nodeType.getQueryFunction().apply(client()).apply(namespace).apply(name); - Optional node = + DiscoveryNode node = DiscoveryNode.getNode( - n -> { - return name.equals(n.name) - && namespace.equals(n.labels.get("io.cryostat/namespace")); - }); - - return Pair.of( - kubeObj, - node.orElseGet( - () -> { - DiscoveryNode newNode = new DiscoveryNode(); - newNode.name = name; - newNode.nodeType = nodeType.getKind(); - newNode.children = new ArrayList<>(); - newNode.target = null; - newNode.labels = - kubeObj != null - ? kubeObj.getMetadata().getLabels() - : new HashMap<>(); - // Add namespace to label to retrieve node later - newNode.labels.put("io.cryostat/namespace", namespace); - return newNode; - })); + n -> { + return name.equals(n.name) + && namespace.equals(n.labels.get(NAMESPACE_LABEL_KEY)); + }) + .orElseGet( + () -> { + DiscoveryNode newNode = new DiscoveryNode(); + newNode.name = name; + newNode.nodeType = nodeType.getKind(); + newNode.children = new ArrayList<>(); + newNode.target = null; + newNode.labels = + kubeObj != null + ? kubeObj.getMetadata().getLabels() + : new HashMap<>(); + // Add namespace to label to retrieve node later + newNode.labels.put(NAMESPACE_LABEL_KEY, namespace); + return newNode; + }); + + return Pair.of(kubeObj, node); } @ApplicationScoped @@ -461,8 +471,8 @@ public void onDelete(Endpoints endpoints, boolean deletedFinalStateUnknown) { } getTargetTuplesFrom(endpoints) .forEach( - (tt) -> { - handleEndpointEvent(tt, EventKind.LOST); + (tuple) -> { + handleEndpointEvent(tuple, EventKind.LOST); }); } } @@ -495,13 +505,12 @@ public Target toTarget() { queryForNode(objRef.getNamespace(), objRef.getName(), objRef.getKind()); HasMetadata obj = pair.getLeft(); try { - ObjectMeta metadata = obj.getMetadata(); - String targetName = metadata.getName(); + String targetName = objRef.getName(); String ip = addr.getIp().replaceAll("\\.", "-"); - String namespace = metadata.getNamespace(); + String namespace = objRef.getNamespace(); - boolean isPod = obj.getKind().equals(KubeDiscoveryNodeType.POD.getKind()); + boolean isPod = objRef.getKind().equals(KubeDiscoveryNodeType.POD.getKind()); String host = String.format("%s.%s", ip, namespace); if (isPod) { @@ -519,9 +528,11 @@ public Target toTarget() { target.activeRecordings = new ArrayList<>(); target.connectUrl = URI.create(jmxUrl.toString()); target.alias = targetName; - target.labels = metadata.getLabels(); + target.labels = obj != null ? obj.getMetadata().getLabels() : new HashMap<>(); target.annotations = new Annotations(); - target.annotations.platform().putAll(metadata.getAnnotations()); + target.annotations + .platform() + .putAll(obj != null ? obj.getMetadata().getAnnotations() : Map.of()); target.annotations .cryostat() .putAll( From 24bf1b5bb71582a6901634272aba97d402dc232b Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Sun, 21 Apr 2024 19:39:51 -0700 Subject: [PATCH 18/30] fix(k8s): rebuild tree on endpoint events --- .../io/cryostat/discovery/DiscoveryNode.java | 4 + .../cryostat/discovery/KubeApiDiscovery.java | 266 +++++++++--------- src/main/java/io/cryostat/targets/Target.java | 69 +++++ 3 files changed, 207 insertions(+), 132 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/DiscoveryNode.java b/src/main/java/io/cryostat/discovery/DiscoveryNode.java index 1a8942761..0e7a29a16 100644 --- a/src/main/java/io/cryostat/discovery/DiscoveryNode.java +++ b/src/main/java/io/cryostat/discovery/DiscoveryNode.java @@ -116,6 +116,10 @@ public static Optional getNode(Predicate predicate return nodes.stream().filter(predicate).findFirst(); } + public static List findAllByNodeType(NodeType nodeType) { + return DiscoveryNode.find(DiscoveryNode.NODE_TYPE, nodeType.getKind()).list(); + } + public static DiscoveryNode environment(String name, NodeType nodeType) { DiscoveryNode node = new DiscoveryNode(); node.name = name; diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 3339a772b..a981587c2 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -35,7 +34,6 @@ import io.cryostat.core.sys.FileSystem; import io.cryostat.targets.Target; import io.cryostat.targets.Target.Annotations; -import io.cryostat.targets.Target.EventKind; import io.fabric8.kubernetes.api.model.EndpointAddress; import io.fabric8.kubernetes.api.model.EndpointPort; @@ -70,7 +68,8 @@ public class KubeApiDiscovery { public static final long ENDPOINTS_INFORMER_RESYNC_PERIOD = Duration.ofSeconds(30).toMillis(); - public static final String NAMESPACE_LABEL_KEY = "io.cryostat/namespace"; + public static final String DISCOVERY_NAMESPACE_LABEL_KEY = "discovery.cryostat.io/namespace"; + public static final String DISCOVERY_PARENT_LABEL_KEY = "discovery.cryostat.io/parent"; @Inject Logger logger; @@ -139,8 +138,9 @@ void onStart(@Observes @Priority(1) StartupEvent evt) { logger.infov("Starting {0} client", REALM); } + @Transactional void onAfterStart(@Observes StartupEvent evt) { - safeGetInformers(); // trigger lazy init + safeGetInformers(); } void onStop(@Observes ShutdownEvent evt) { @@ -201,26 +201,45 @@ private Map> safeGetInformers() { } @Transactional - public void handleEndpointEvent(TargetTuple tuple, EventKind eventKind) { + public void handleEndpointEvent(String namespace) { DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); DiscoveryNode nsNode = - DiscoveryNode.getChild(realm, n -> tuple.objRef.getNamespace().equals(n.name)) + DiscoveryNode.getChild(realm, n -> n.name.equals(namespace)) .orElse( DiscoveryNode.environment( - tuple.objRef.getNamespace(), - KubeDiscoveryNodeType.NAMESPACE)); + namespace, KubeDiscoveryNodeType.NAMESPACE)); try { - if (eventKind == EventKind.FOUND) { - buildOwnerChain(nsNode, tuple); - if (!realm.children.contains(nsNode)) { - realm.children.add(nsNode); - } - } else if (eventKind == EventKind.LOST) { - pruneOwnerChain(nsNode, tuple); - if (!nsNode.hasChildren()) { - realm.children.remove(nsNode); - } + List targetNodes = + DiscoveryNode.findAllByNodeType(KubeDiscoveryNodeType.ENDPOINT); + Map targetRefMap = new HashMap<>(); + safeGetInformers().get(namespace).getStore().list().stream() + .map((endpoint) -> getTargetTuplesFrom(endpoint)) + .flatMap(List::stream) + .filter((tuple) -> Objects.nonNull(tuple.objRef)) + .collect(Collectors.toList()) + .forEach((tuple) -> targetRefMap.put(tuple.toTarget(), tuple.objRef)); + + Set persistedTargets = + targetNodes.stream().map(node -> node.target).collect(Collectors.toSet()); + Set observedTargets = targetRefMap.keySet(); + + // Add newly added endpoints + Target.compare(persistedTargets) + .to(observedTargets) + .added() + .forEach((t) -> buildOwnerChain(nsNode, t, targetRefMap.get(t))); + + // Prune deleted endpoints + Target.compare(persistedTargets) + .to(observedTargets) + .removed() + .forEach((t) -> pruneOwnerChain(nsNode, t)); + + if (!nsNode.hasChildren()) { + realm.children.remove(nsNode); + } else if (!realm.children.contains(nsNode)) { + realm.children.add(nsNode); } realm.persist(); } catch (Exception e) { @@ -228,67 +247,59 @@ public void handleEndpointEvent(TargetTuple tuple, EventKind eventKind) { } } - private void pruneOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { - ObjectReference targetRef = targetTuple.objRef; - if (targetRef == null) { - logger.errorv( - "Address {0} for Endpoint {1} had null target reference", - targetTuple.addr.getIp() != null - ? targetTuple.addr.getIp() - : targetTuple.addr.getHostname(), - targetTuple.objRef.getName()); + private void pruneOwnerChain(DiscoveryNode nsNode, Target target) { + if (!target.isPersistent()) { + logger.infov( + "Target with serviceURL {0} does not exist in discovery tree. Skipped deleting", + target.connectUrl); return; } + DiscoveryNode targetNode = target.discoveryNode; - try { - Target t = Target.getTargetByConnectUrl(targetTuple.toTarget().connectUrl); - DiscoveryNode targetNode = t.discoveryNode; - - if (nsNode.children.contains(targetNode)) { - nsNode.children.remove(targetNode); - } else { - Pair pod = - queryForNode( - targetRef.getNamespace(), targetRef.getName(), targetRef.getKind()); - pod.getRight().children.remove(targetNode); - pod.getRight().persist(); - - Pair node = pod; - while (true) { - Pair owner = getOwnerNode(node); - if (owner == null) { - break; - } - DiscoveryNode ownerNode = owner.getRight(); - if (!node.getRight().hasChildren()) { - ownerNode.children.remove(pod.getRight()); - ownerNode.persist(); - } - node = owner; + if (nsNode.children.contains(targetNode)) { + nsNode.children.remove(targetNode); + } else { + final String namespace = nsNode.name; + DiscoveryNode child = targetNode; + while (true) { + String[] parentInfo = child.labels.get(DISCOVERY_PARENT_LABEL_KEY).split("/"); + Optional owner = + DiscoveryNode.getNode( + n -> { + return n.name.equals(parentInfo[1]) + && n.nodeType.equals(parentInfo[0]) + && namespace.equals( + n.labels.get(DISCOVERY_NAMESPACE_LABEL_KEY)); + }); + + if (owner.isEmpty()) { + break; } + logger.debugv("Found owner {0}", owner.get().name); + + owner.get().children.remove(child); + if (owner.get().hasChildren() + || owner.get().nodeType.equals(KubeDiscoveryNodeType.NAMESPACE.getKind())) { + break; + } + + child = owner.get(); } - nsNode.persist(); - t.delete(); - } catch (NoResultException e) { } + nsNode.persist(); + target.delete(); } - private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { - ObjectReference targetRef = targetTuple.objRef; - if (targetRef == null) { - logger.errorv( - "Address {0} for Endpoint {1} had null target reference", - targetTuple.addr.getIp() != null - ? targetTuple.addr.getIp() - : targetTuple.addr.getHostname(), - targetTuple.objRef.getName()); + private void buildOwnerChain(DiscoveryNode nsNode, Target target, ObjectReference targetRef) { + if (target.isPersistent()) { + logger.infov( + "Target with serviceURL {0} already exist in discovery tree. Skipped adding", + target.connectUrl); return; } - String targetKind = targetRef.getKind(); KubeDiscoveryNodeType targetType = KubeDiscoveryNodeType.fromKubernetesKind(targetKind); - Target target = targetTuple.toTarget(); DiscoveryNode targetNode = DiscoveryNode.target(target, KubeDiscoveryNodeType.ENDPOINT); target.discoveryNode = targetNode; target.persist(); @@ -302,22 +313,45 @@ private void buildOwnerChain(DiscoveryNode nsNode, TargetTuple targetTuple) { targetRef.getNamespace(), targetRef.getName(), targetRef.getKind()); pod.getRight().children.add(targetNode); + targetNode.labels.put( + DISCOVERY_PARENT_LABEL_KEY, + String.format( + "%s/%s", + pod.getRight().nodeType, + pod.getRight().name)); // Add a reference to parent node pod.getRight().persist(); - Pair node = pod; + Pair child = pod; while (true) { - Pair owner = getOwnerNode(node); + Pair owner = getOwnerNode(child); if (owner == null) { break; } + DiscoveryNode ownerNode = owner.getRight(); - ownerNode.children.add(node.getRight()); + DiscoveryNode childNode = child.getRight(); + + ownerNode.children.add(childNode); + childNode.labels.put( + DISCOVERY_PARENT_LABEL_KEY, + String.format( + "%s/%s", + ownerNode.nodeType, + ownerNode.name)); // Add a reference to parent node ownerNode.persist(); - node = owner; + child = owner; } - nsNode.children.add(node.getRight()); + nsNode.children.add(child.getRight()); + child.getRight() + .labels + .put( + DISCOVERY_PARENT_LABEL_KEY, + String.format( + "%s/%s", + nsNode.nodeType, + nsNode.name)); // Add a reference to parent node } else { // if the Endpoint points to something else(?) than a Pod, just add the target straight // to the Namespace @@ -361,7 +395,8 @@ private Pair queryForNode( DiscoveryNode.getNode( n -> { return name.equals(n.name) - && namespace.equals(n.labels.get(NAMESPACE_LABEL_KEY)); + && namespace.equals( + n.labels.get(DISCOVERY_NAMESPACE_LABEL_KEY)); }) .orElseGet( () -> { @@ -375,7 +410,7 @@ private Pair queryForNode( ? kubeObj.getMetadata().getLabels() : new HashMap<>(); // Add namespace to label to retrieve node later - newNode.labels.put(NAMESPACE_LABEL_KEY, namespace); + newNode.labels.put(DISCOVERY_NAMESPACE_LABEL_KEY, namespace); return newNode; }); @@ -409,7 +444,7 @@ Collection getWatchNamespaces() { } return n; }) - .filter(Objects::nonNull) + .filter((n) -> !n.isBlank()) .collect(Collectors.toSet()); } @@ -440,40 +475,31 @@ KubernetesClient kubeClient() { private final class EndpointsHandler implements ResourceEventHandler { @Override public void onAdd(Endpoints endpoints) { - getTargetTuplesFrom(endpoints) - .forEach( - (tuples) -> { - handleEndpointEvent(tuples, EventKind.FOUND); - }); + logger.infov( + "Endpoint {0} created in namespace {1}", + endpoints.getMetadata().getName(), endpoints.getMetadata().getNamespace()); + handleEndpointEvent(endpoints.getMetadata().getNamespace()); } @Override public void onUpdate(Endpoints oldEndpoints, Endpoints newEndpoints) { - Set previousTuples = new HashSet<>(getTargetTuplesFrom(oldEndpoints)); - Set currentTuples = new HashSet<>(getTargetTuplesFrom(newEndpoints)); - - if (previousTuples.equals(currentTuples)) { - return; - } - - TargetTuple.compare(previousTuples).to(currentTuples).removed().stream() - .forEach(tuple -> handleEndpointEvent(tuple, EventKind.LOST)); - - TargetTuple.compare(previousTuples).to(currentTuples).added().stream() - .forEach(tuple -> handleEndpointEvent(tuple, EventKind.FOUND)); + logger.infov( + "Endpoint {0} modified in namespace {1}", + newEndpoints.getMetadata().getName(), + newEndpoints.getMetadata().getNamespace()); + handleEndpointEvent(newEndpoints.getMetadata().getNamespace()); } @Override public void onDelete(Endpoints endpoints, boolean deletedFinalStateUnknown) { + logger.infov( + "Endpoint {0} deleted in namespace {1}", + endpoints.getMetadata().getName(), endpoints.getMetadata().getNamespace()); if (deletedFinalStateUnknown) { logger.warnv("Deleted final state unknown: {0}", endpoints); return; } - getTargetTuplesFrom(endpoints) - .forEach( - (tuple) -> { - handleEndpointEvent(tuple, EventKind.LOST); - }); + handleEndpointEvent(endpoints.getMetadata().getNamespace()); } } @@ -501,12 +527,7 @@ private class TargetTuple { } public Target toTarget() { - Pair pair = - queryForNode(objRef.getNamespace(), objRef.getName(), objRef.getKind()); - HasMetadata obj = pair.getLeft(); try { - String targetName = objRef.getName(); - String ip = addr.getIp().replaceAll("\\.", "-"); String namespace = objRef.getNamespace(); @@ -524,10 +545,20 @@ public Target toTarget() { 0, "/jndi/rmi://" + host + ':' + port.getPort() + "/jmxrmi"); - Target target = new Target(); + Target target; + try { + return Target.getTargetByConnectUrl(URI.create(jmxUrl.toString())); + } catch (NoResultException e) { + } + + Pair pair = + queryForNode(objRef.getNamespace(), objRef.getName(), objRef.getKind()); + HasMetadata obj = pair.getLeft(); + + target = new Target(); target.activeRecordings = new ArrayList<>(); target.connectUrl = URI.create(jmxUrl.toString()); - target.alias = targetName; + target.alias = objRef.getName(); target.labels = obj != null ? obj.getMetadata().getLabels() : new HashMap<>(); target.annotations = new Annotations(); target.annotations @@ -576,34 +607,5 @@ public boolean equals(Object other) { .append(port.getPort(), sr.port.getPort()) .build(); } - - public static Compare compare(Collection src) { - return new Compare(src); - } - - public static class Compare { - private Collection previous, current; - - public Compare(Collection previous) { - this.previous = new HashSet<>(previous); - } - - public Compare to(Collection current) { - this.current = new HashSet<>(current); - return this; - } - - public Collection added() { - Collection added = new HashSet<>(current); - added.removeAll(previous); - return added; - } - - public Collection removed() { - Collection removed = new HashSet<>(previous); - removed.removeAll(current); - return removed; - } - } } } diff --git a/src/main/java/io/cryostat/targets/Target.java b/src/main/java/io/cryostat/targets/Target.java index 2abb66f2e..66ed275d3 100644 --- a/src/main/java/io/cryostat/targets/Target.java +++ b/src/main/java/io/cryostat/targets/Target.java @@ -20,7 +20,9 @@ import java.nio.charset.StandardCharsets; import java.time.Duration; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -185,6 +187,73 @@ public boolean equals(Object obj) { && Objects.equals(labels, other.labels); } + public static Compare compare(Collection src) { + return new Compare(src); + } + + public static class Compare { + private Collection previous, current; + + public Compare(Collection previous) { + this.previous = new HashSet<>(previous); + } + + public Compare to(Collection current) { + this.current = new HashSet<>(current); + return this; + } + + public Collection added() { + return removeAllUpdatedRefs(addedOrUpdatedRefs(), updated()); + } + + public Collection removed() { + return removeAllUpdatedRefs(removedOrUpdatedRefs(), updated()); + } + + public Collection updated() { + Collection updated = new HashSet<>(); + intersection(removedOrUpdatedRefs(), addedOrUpdatedRefs(), false) + .forEach((ref) -> updated.add(ref)); + return updated; + } + + private Collection addedOrUpdatedRefs() { + Collection added = new HashSet<>(current); + added.removeAll(previous); + return added; + } + + private Collection removedOrUpdatedRefs() { + Collection removed = new HashSet<>(previous); + removed.removeAll(current); + return removed; + } + + private Collection removeAllUpdatedRefs( + Collection src, Collection updated) { + Collection tnSet = new HashSet<>(src); + intersection(src, updated, true).stream().forEach((ref) -> tnSet.remove(ref)); + return tnSet; + } + + private Collection intersection( + Collection src, Collection other, boolean keepOld) { + final Collection intersection = new HashSet<>(); + + // Manual removal since Target also compares jvmId + for (Target srcTarget : src) { + for (Target otherTarget : other) { + if (Objects.equals(srcTarget.connectUrl, otherTarget.connectUrl)) { + intersection.add(keepOld ? srcTarget : otherTarget); + } + } + } + + return intersection; + } + } + public enum EventKind { FOUND, MODIFIED, From 4bddb2aaca0871d7839972b1329c75a0dd07b05a Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Sun, 21 Apr 2024 22:31:44 -0700 Subject: [PATCH 19/30] feat(discovery): define parent reference for discovery node --- .../io/cryostat/discovery/DiscoveryNode.java | 10 ++- .../cryostat/discovery/KubeApiDiscovery.java | 69 ++++++------------- 2 files changed, 29 insertions(+), 50 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/DiscoveryNode.java b/src/main/java/io/cryostat/discovery/DiscoveryNode.java index 0e7a29a16..815941811 100644 --- a/src/main/java/io/cryostat/discovery/DiscoveryNode.java +++ b/src/main/java/io/cryostat/discovery/DiscoveryNode.java @@ -25,6 +25,7 @@ import io.cryostat.targets.Target; +import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude.Include; import com.fasterxml.jackson.annotation.JsonView; @@ -38,6 +39,8 @@ import jakarta.persistence.Entity; import jakarta.persistence.EntityListeners; import jakarta.persistence.FetchType; +import jakarta.persistence.JoinColumn; +import jakarta.persistence.ManyToOne; import jakarta.persistence.OneToMany; import jakarta.persistence.OneToOne; import jakarta.persistence.PostPersist; @@ -71,11 +74,16 @@ public class DiscoveryNode extends PanacheEntity { @JsonView(Views.Flat.class) public Map labels = new HashMap<>(); - @OneToMany(fetch = FetchType.LAZY, orphanRemoval = true) + @OneToMany(fetch = FetchType.LAZY, orphanRemoval = true, mappedBy = "parent") @JsonView(Views.Nested.class) @Nullable public List children = new ArrayList<>(); + @ManyToOne + @JsonIgnore + @JoinColumn(name = "parentNode") + public DiscoveryNode parent; + @OneToOne( mappedBy = "discoveryNode", cascade = {CascadeType.ALL}, diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index a981587c2..25c2ac141 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -69,7 +69,6 @@ public class KubeApiDiscovery { public static final long ENDPOINTS_INFORMER_RESYNC_PERIOD = Duration.ofSeconds(30).toMillis(); public static final String DISCOVERY_NAMESPACE_LABEL_KEY = "discovery.cryostat.io/namespace"; - public static final String DISCOVERY_PARENT_LABEL_KEY = "discovery.cryostat.io/parent"; @Inject Logger logger; @@ -131,6 +130,7 @@ void onStart(@Observes @Priority(1) StartupEvent evt) { plugin.realm = node; plugin.builtin = true; universe.children.add(node); + node.parent = universe; plugin.persist(); universe.persist(); } @@ -240,6 +240,7 @@ public void handleEndpointEvent(String namespace) { realm.children.remove(nsNode); } else if (!realm.children.contains(nsNode)) { realm.children.add(nsNode); + nsNode.parent = realm; } realm.persist(); } catch (Exception e) { @@ -256,36 +257,22 @@ private void pruneOwnerChain(DiscoveryNode nsNode, Target target) { } DiscoveryNode targetNode = target.discoveryNode; - if (nsNode.children.contains(targetNode)) { - nsNode.children.remove(targetNode); - } else { - final String namespace = nsNode.name; - DiscoveryNode child = targetNode; - while (true) { - String[] parentInfo = child.labels.get(DISCOVERY_PARENT_LABEL_KEY).split("/"); - Optional owner = - DiscoveryNode.getNode( - n -> { - return n.name.equals(parentInfo[1]) - && n.nodeType.equals(parentInfo[0]) - && namespace.equals( - n.labels.get(DISCOVERY_NAMESPACE_LABEL_KEY)); - }); - - if (owner.isEmpty()) { - break; - } - logger.debugv("Found owner {0}", owner.get().name); - - owner.get().children.remove(child); - if (owner.get().hasChildren() - || owner.get().nodeType.equals(KubeDiscoveryNodeType.NAMESPACE.getKind())) { - break; - } + DiscoveryNode child = targetNode; + while (true) { + DiscoveryNode parent = targetNode.parent; + if (parent == null) { + break; + } - child = owner.get(); + parent.children.remove(child); + if (parent.hasChildren() + || parent.nodeType.equals(KubeDiscoveryNodeType.NAMESPACE.getKind())) { + break; } + + child = parent; } + nsNode.persist(); target.delete(); } @@ -313,12 +300,7 @@ private void buildOwnerChain(DiscoveryNode nsNode, Target target, ObjectReferenc targetRef.getNamespace(), targetRef.getName(), targetRef.getKind()); pod.getRight().children.add(targetNode); - targetNode.labels.put( - DISCOVERY_PARENT_LABEL_KEY, - String.format( - "%s/%s", - pod.getRight().nodeType, - pod.getRight().name)); // Add a reference to parent node + targetNode.parent = pod.getRight(); pod.getRight().persist(); Pair child = pod; @@ -332,30 +314,19 @@ private void buildOwnerChain(DiscoveryNode nsNode, Target target, ObjectReferenc DiscoveryNode childNode = child.getRight(); ownerNode.children.add(childNode); - childNode.labels.put( - DISCOVERY_PARENT_LABEL_KEY, - String.format( - "%s/%s", - ownerNode.nodeType, - ownerNode.name)); // Add a reference to parent node - ownerNode.persist(); + childNode.parent = ownerNode; + ownerNode.persist(); child = owner; } nsNode.children.add(child.getRight()); - child.getRight() - .labels - .put( - DISCOVERY_PARENT_LABEL_KEY, - String.format( - "%s/%s", - nsNode.nodeType, - nsNode.name)); // Add a reference to parent node + child.getRight().parent = nsNode; } else { // if the Endpoint points to something else(?) than a Pod, just add the target straight // to the Namespace nsNode.children.add(targetNode); + targetNode.parent = nsNode; } nsNode.persist(); From a1ea98a25c40d96197fc6b0b18aeb14342322b1b Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Sun, 21 Apr 2024 23:17:11 -0700 Subject: [PATCH 20/30] feat(discovery): cache discovery queries --- .../cryostat/discovery/KubeApiDiscovery.java | 95 ++++++++++++------- 1 file changed, 62 insertions(+), 33 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 25c2ac141..b6df51efe 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -26,6 +26,7 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ForkJoinPool; import java.util.stream.Collectors; @@ -59,6 +60,7 @@ import org.apache.commons.lang3.concurrent.ConcurrentException; import org.apache.commons.lang3.concurrent.LazyInitializer; import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.logging.Logger; @@ -83,6 +85,11 @@ public class KubeApiDiscovery { @ConfigProperty(name = "cryostat.discovery.k8s.port-numbers") Optional> JmxPortNumbers; + private final Map, Pair> + discoveryNodeCache = new ConcurrentHashMap<>(); + private final Map, Object> queryLocks = + new ConcurrentHashMap<>(); + private final LazyInitializer>> nsInformers = new LazyInitializer>>() { @Override @@ -245,6 +252,9 @@ public void handleEndpointEvent(String namespace) { realm.persist(); } catch (Exception e) { logger.warn("Endpoint handler exception", e); + } finally { + discoveryNodeCache.clear(); + queryLocks.clear(); } } @@ -296,8 +306,8 @@ private void buildOwnerChain(DiscoveryNode nsNode, Target target, ObjectReferenc // add that to the Namespace Pair pod = - queryForNode( - targetRef.getNamespace(), targetRef.getName(), targetRef.getKind()); + discoveryNodeCache.computeIfAbsent( + cacheKey(targetRef.getNamespace(), targetRef), this::queryForNode); pod.getRight().children.add(targetNode); targetNode.parent = pod.getRight(); @@ -349,43 +359,60 @@ private Pair getOwnerNode(Pair KubeDiscoveryNodeType.fromKubernetesKind(o.getKind()) != null) .findFirst() .orElse(owners.get(0)); - return queryForNode(namespace, owner.getName(), owner.getKind()); + return discoveryNodeCache.computeIfAbsent(cacheKey(namespace, owner), this::queryForNode); + } + + private Triple cacheKey(String ns, OwnerReference resource) { + return Triple.of(ns, resource.getKind(), resource.getName()); + } + + // Unfortunately, ObjectReference and OwnerReference both independently implement getKind and + // getName - they don't come from a common base class. + private Triple cacheKey(String ns, ObjectReference resource) { + return Triple.of(ns, resource.getKind(), resource.getName()); } private Pair queryForNode( - String namespace, String name, String kind) { - KubeDiscoveryNodeType nodeType = KubeDiscoveryNodeType.fromKubernetesKind(kind); + Triple lookupKey) { + + String namespace = lookupKey.getLeft(); + String name = lookupKey.getRight(); + KubeDiscoveryNodeType nodeType = + KubeDiscoveryNodeType.fromKubernetesKind(lookupKey.getMiddle()); if (nodeType == null) { return null; } - HasMetadata kubeObj = - nodeType.getQueryFunction().apply(client()).apply(namespace).apply(name); - - DiscoveryNode node = - DiscoveryNode.getNode( - n -> { - return name.equals(n.name) - && namespace.equals( - n.labels.get(DISCOVERY_NAMESPACE_LABEL_KEY)); - }) - .orElseGet( - () -> { - DiscoveryNode newNode = new DiscoveryNode(); - newNode.name = name; - newNode.nodeType = nodeType.getKind(); - newNode.children = new ArrayList<>(); - newNode.target = null; - newNode.labels = - kubeObj != null - ? kubeObj.getMetadata().getLabels() - : new HashMap<>(); - // Add namespace to label to retrieve node later - newNode.labels.put(DISCOVERY_NAMESPACE_LABEL_KEY, namespace); - return newNode; - }); - - return Pair.of(kubeObj, node); + synchronized (queryLocks.computeIfAbsent(lookupKey, k -> new Object())) { + HasMetadata kubeObj = + nodeType.getQueryFunction().apply(client()).apply(namespace).apply(name); + + DiscoveryNode node = + DiscoveryNode.getNode( + n -> { + return name.equals(n.name) + && namespace.equals( + n.labels.get( + DISCOVERY_NAMESPACE_LABEL_KEY)); + }) + .orElseGet( + () -> { + DiscoveryNode newNode = new DiscoveryNode(); + newNode.name = name; + newNode.nodeType = nodeType.getKind(); + newNode.children = new ArrayList<>(); + newNode.target = null; + newNode.labels = + kubeObj != null + ? kubeObj.getMetadata().getLabels() + : new HashMap<>(); + // Add namespace to label to retrieve node later + newNode.labels.put( + DISCOVERY_NAMESPACE_LABEL_KEY, namespace); + return newNode; + }); + return Pair.of(kubeObj, node); + } } @ApplicationScoped @@ -523,7 +550,9 @@ public Target toTarget() { } Pair pair = - queryForNode(objRef.getNamespace(), objRef.getName(), objRef.getKind()); + discoveryNodeCache.computeIfAbsent( + cacheKey(namespace, objRef), KubeApiDiscovery.this::queryForNode); + HasMetadata obj = pair.getLeft(); target = new Target(); From 3029fe3312003e4448b1bc37ee03f20561cbdf78 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Mon, 22 Apr 2024 00:02:30 -0700 Subject: [PATCH 21/30] fix(discovery): fix bugs causing infinite loop in worker thread --- src/main/java/io/cryostat/discovery/KubeApiDiscovery.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index b6df51efe..434fcbbec 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -27,7 +27,6 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ForkJoinPool; import java.util.stream.Collectors; import javax.management.remote.JMXServiceURL; @@ -49,6 +48,7 @@ import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; +import io.smallrye.mutiny.infrastructure.Infrastructure; import jakarta.annotation.Priority; import jakarta.enterprise.context.ApplicationScoped; import jakarta.enterprise.event.Observes; @@ -265,11 +265,12 @@ private void pruneOwnerChain(DiscoveryNode nsNode, Target target) { target.connectUrl); return; } + DiscoveryNode targetNode = target.discoveryNode; DiscoveryNode child = targetNode; while (true) { - DiscoveryNode parent = targetNode.parent; + DiscoveryNode parent = child.parent; if (parent == null) { break; } @@ -463,7 +464,7 @@ KubernetesClient kubeClient() { if (kubeClient == null) { kubeClient = new KubernetesClientBuilder() - .withTaskExecutor(ForkJoinPool.commonPool()) + .withTaskExecutor(Infrastructure.getDefaultWorkerPool()) .build(); } return kubeClient; From 42521668d4fd6c2abab4f1eebc07ceb99417c602 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Mon, 22 Apr 2024 00:16:24 -0700 Subject: [PATCH 22/30] fix(discovery): ensure cascade remove when removing nodes --- .../java/io/cryostat/discovery/DiscoveryNode.java | 2 +- .../io/cryostat/discovery/KubeApiDiscovery.java | 14 +++++++++----- src/main/java/io/cryostat/targets/Target.java | 8 ++++---- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/DiscoveryNode.java b/src/main/java/io/cryostat/discovery/DiscoveryNode.java index 815941811..1dfe2aab1 100644 --- a/src/main/java/io/cryostat/discovery/DiscoveryNode.java +++ b/src/main/java/io/cryostat/discovery/DiscoveryNode.java @@ -79,7 +79,7 @@ public class DiscoveryNode extends PanacheEntity { @Nullable public List children = new ArrayList<>(); - @ManyToOne + @ManyToOne(cascade = {CascadeType.ALL}) @JsonIgnore @JoinColumn(name = "parentNode") public DiscoveryNode parent; diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 434fcbbec..2941bbbbf 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -231,13 +231,13 @@ public void handleEndpointEvent(String namespace) { targetNodes.stream().map(node -> node.target).collect(Collectors.toSet()); Set observedTargets = targetRefMap.keySet(); - // Add newly added endpoints + // Add new targets Target.compare(persistedTargets) .to(observedTargets) .added() .forEach((t) -> buildOwnerChain(nsNode, t, targetRefMap.get(t))); - // Prune deleted endpoints + // Prune deleted targets Target.compare(persistedTargets) .to(observedTargets) .removed() @@ -266,16 +266,17 @@ private void pruneOwnerChain(DiscoveryNode nsNode, Target target) { return; } - DiscoveryNode targetNode = target.discoveryNode; - - DiscoveryNode child = targetNode; + DiscoveryNode child = target.discoveryNode; while (true) { DiscoveryNode parent = child.parent; + if (parent == null) { break; } parent.children.remove(child); + parent.persist(); + if (parent.hasChildren() || parent.nodeType.equals(KubeDiscoveryNodeType.NAMESPACE.getKind())) { break; @@ -328,6 +329,8 @@ private void buildOwnerChain(DiscoveryNode nsNode, Target target, ObjectReferenc childNode.parent = ownerNode; ownerNode.persist(); + childNode.persist(); + child = owner; } @@ -338,6 +341,7 @@ private void buildOwnerChain(DiscoveryNode nsNode, Target target, ObjectReferenc // to the Namespace nsNode.children.add(targetNode); targetNode.parent = nsNode; + targetNode.persist(); } nsNode.persist(); diff --git a/src/main/java/io/cryostat/targets/Target.java b/src/main/java/io/cryostat/targets/Target.java index 66ed275d3..130ebcc93 100644 --- a/src/main/java/io/cryostat/targets/Target.java +++ b/src/main/java/io/cryostat/targets/Target.java @@ -204,16 +204,16 @@ public Compare to(Collection current) { } public Collection added() { - return removeAllUpdatedRefs(addedOrUpdatedRefs(), updated()); + return removeAllUpdatedRefs(addedOrUpdatedRefs(), updated(false)); } public Collection removed() { - return removeAllUpdatedRefs(removedOrUpdatedRefs(), updated()); + return removeAllUpdatedRefs(removedOrUpdatedRefs(), updated(true)); } - public Collection updated() { + public Collection updated(boolean keepOld) { Collection updated = new HashSet<>(); - intersection(removedOrUpdatedRefs(), addedOrUpdatedRefs(), false) + intersection(removedOrUpdatedRefs(), addedOrUpdatedRefs(), keepOld) .forEach((ref) -> updated.add(ref)); return updated; } From 2d4a4cf45c9dfc332491db812c10cbbba3bb3427 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Mon, 22 Apr 2024 01:21:12 -0700 Subject: [PATCH 23/30] fix(discovery): ensure all nodes have parent ref --- src/main/java/io/cryostat/discovery/ContainerDiscovery.java | 5 +++++ src/main/java/io/cryostat/discovery/CustomDiscovery.java | 2 ++ src/main/java/io/cryostat/discovery/DiscoveryNode.java | 5 +++-- src/main/java/io/cryostat/discovery/JDPDiscovery.java | 2 ++ 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/ContainerDiscovery.java b/src/main/java/io/cryostat/discovery/ContainerDiscovery.java index fd2b32833..4a253c362 100644 --- a/src/main/java/io/cryostat/discovery/ContainerDiscovery.java +++ b/src/main/java/io/cryostat/discovery/ContainerDiscovery.java @@ -172,6 +172,7 @@ void onStart(@Observes StartupEvent evt) { plugin.realm = node; plugin.builtin = true; universe.children.add(node); + node.parent = universe; plugin.persist(); universe.persist(); } @@ -361,7 +362,9 @@ public void handleContainerEvent(ContainerSpec desc, EventKind evtKind) { DiscoveryNode.environment(podName, ContainerDiscoveryNodeType.POD); if (!realm.children.contains(pod)) { pod.children.add(node); + node.parent = pod; realm.children.add(pod); + pod.parent = realm; } else { pod = DiscoveryNode.getChild( @@ -373,10 +376,12 @@ public void handleContainerEvent(ContainerSpec desc, EventKind evtKind) { .equals(n.nodeType)) .orElseThrow(); pod.children.add(node); + node.parent = pod; } pod.persist(); } else { realm.children.add(node); + node.parent = realm; } target.persist(); node.persist(); diff --git a/src/main/java/io/cryostat/discovery/CustomDiscovery.java b/src/main/java/io/cryostat/discovery/CustomDiscovery.java index 831de5ee4..38c522f72 100644 --- a/src/main/java/io/cryostat/discovery/CustomDiscovery.java +++ b/src/main/java/io/cryostat/discovery/CustomDiscovery.java @@ -82,6 +82,7 @@ void onStart(@Observes StartupEvent evt) { plugin.realm = node; plugin.builtin = true; universe.children.add(node); + node.parent = universe; plugin.persist(); universe.persist(); } @@ -172,6 +173,7 @@ Response doV2Create( DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); realm.children.add(node); + node.parent = realm; target.persist(); node.persist(); realm.persist(); diff --git a/src/main/java/io/cryostat/discovery/DiscoveryNode.java b/src/main/java/io/cryostat/discovery/DiscoveryNode.java index 1dfe2aab1..828eae17e 100644 --- a/src/main/java/io/cryostat/discovery/DiscoveryNode.java +++ b/src/main/java/io/cryostat/discovery/DiscoveryNode.java @@ -79,9 +79,10 @@ public class DiscoveryNode extends PanacheEntity { @Nullable public List children = new ArrayList<>(); - @ManyToOne(cascade = {CascadeType.ALL}) - @JsonIgnore + @Nullable + @ManyToOne(fetch = FetchType.LAZY) @JoinColumn(name = "parentNode") + @JsonIgnore public DiscoveryNode parent; @OneToOne( diff --git a/src/main/java/io/cryostat/discovery/JDPDiscovery.java b/src/main/java/io/cryostat/discovery/JDPDiscovery.java index 4012ea7b3..45edbe89e 100644 --- a/src/main/java/io/cryostat/discovery/JDPDiscovery.java +++ b/src/main/java/io/cryostat/discovery/JDPDiscovery.java @@ -74,6 +74,7 @@ void onStart(@Observes StartupEvent evt) { plugin.realm = node; plugin.builtin = true; universe.children.add(node); + node.parent = universe; plugin.persist(); universe.persist(); } @@ -143,6 +144,7 @@ public synchronized void handleJdpEvent(JvmDiscoveryEvent evt) { target.discoveryNode = node; realm.children.add(node); + node.parent = realm; target.persist(); node.persist(); realm.persist(); From 543e103d9a7e180e274fdb43200ef0b14cf2e5c6 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Mon, 22 Apr 2024 02:33:28 -0700 Subject: [PATCH 24/30] fix(discovery): properly delete nodes when targets disappear --- src/main/java/io/cryostat/discovery/ContainerDiscovery.java | 5 +++-- src/main/java/io/cryostat/discovery/CustomDiscovery.java | 1 + src/main/java/io/cryostat/discovery/JDPDiscovery.java | 1 + src/main/java/io/cryostat/discovery/KubeApiDiscovery.java | 2 ++ 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/ContainerDiscovery.java b/src/main/java/io/cryostat/discovery/ContainerDiscovery.java index 4a253c362..254cf6146 100644 --- a/src/main/java/io/cryostat/discovery/ContainerDiscovery.java +++ b/src/main/java/io/cryostat/discovery/ContainerDiscovery.java @@ -177,7 +177,7 @@ void onStart(@Observes StartupEvent evt) { universe.persist(); } - logger.info(String.format("Starting %s client", getRealm())); + logger.infov("Starting {0} client", getRealm()); queryContainers(); this.timerId = vertx.setPeriodic(pollPeriod.toMillis(), unused -> queryContainers()); @@ -187,7 +187,7 @@ void onStop(@Observes ShutdownEvent evt) { if (!enabled()) { return; } - logger.info(String.format("Shutting down %s client", getRealm())); + logger.infov("Shutting down {0} client", getRealm()); vertx.cancelTimer(timerId); } @@ -396,6 +396,7 @@ public void handleContainerEvent(ContainerSpec desc, EventKind evtKind) { } else { realm.children.remove(t.discoveryNode); } + t.discoveryNode.parent = null; realm.persist(); t.delete(); } diff --git a/src/main/java/io/cryostat/discovery/CustomDiscovery.java b/src/main/java/io/cryostat/discovery/CustomDiscovery.java index 38c522f72..f3b013942 100644 --- a/src/main/java/io/cryostat/discovery/CustomDiscovery.java +++ b/src/main/java/io/cryostat/discovery/CustomDiscovery.java @@ -214,6 +214,7 @@ public Response delete(@RestPath long id) throws URISyntaxException { Target target = Target.find("id", id).singleResult(); DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); realm.children.remove(target.discoveryNode); + target.discoveryNode = null; realm.persist(); target.delete(); return Response.noContent().build(); diff --git a/src/main/java/io/cryostat/discovery/JDPDiscovery.java b/src/main/java/io/cryostat/discovery/JDPDiscovery.java index 45edbe89e..4f057d959 100644 --- a/src/main/java/io/cryostat/discovery/JDPDiscovery.java +++ b/src/main/java/io/cryostat/discovery/JDPDiscovery.java @@ -152,6 +152,7 @@ public synchronized void handleJdpEvent(JvmDiscoveryEvent evt) { case LOST: Target t = Target.getTargetByConnectUrl(connectUrl); realm.children.remove(t.discoveryNode); + t.discoveryNode.parent = null; realm.persist(); t.delete(); break; diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 2941bbbbf..318a9b1cc 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -245,6 +245,7 @@ public void handleEndpointEvent(String namespace) { if (!nsNode.hasChildren()) { realm.children.remove(nsNode); + nsNode.parent = null; } else if (!realm.children.contains(nsNode)) { realm.children.add(nsNode); nsNode.parent = realm; @@ -275,6 +276,7 @@ private void pruneOwnerChain(DiscoveryNode nsNode, Target target) { } parent.children.remove(child); + child.parent = null; parent.persist(); if (parent.hasChildren() From 4221626e07ef3b0265a34ebf6ae60e1cba9688ba Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Mon, 22 Apr 2024 03:54:28 -0700 Subject: [PATCH 25/30] fix(discovery): only fetch target in namespace of interest --- .../io/cryostat/discovery/KubeApiDiscovery.java | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 318a9b1cc..75bb128fb 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -218,7 +218,12 @@ public void handleEndpointEvent(String namespace) { try { List targetNodes = - DiscoveryNode.findAllByNodeType(KubeDiscoveryNodeType.ENDPOINT); + DiscoveryNode.findAllByNodeType(KubeDiscoveryNodeType.ENDPOINT).stream() + .filter( + (n) -> + namespace.equals( + n.labels.get(DISCOVERY_NAMESPACE_LABEL_KEY))) + .collect(Collectors.toList()); Map targetRefMap = new HashMap<>(); safeGetInformers().get(namespace).getStore().list().stream() .map((endpoint) -> getTargetTuplesFrom(endpoint)) @@ -549,10 +554,10 @@ public Target toTarget() { "", 0, "/jndi/rmi://" + host + ':' + port.getPort() + "/jmxrmi"); + URI connectUrl = URI.create(jmxUrl.toString()); - Target target; try { - return Target.getTargetByConnectUrl(URI.create(jmxUrl.toString())); + return Target.getTargetByConnectUrl(connectUrl); } catch (NoResultException e) { } @@ -562,9 +567,9 @@ public Target toTarget() { HasMetadata obj = pair.getLeft(); - target = new Target(); + Target target = new Target(); target.activeRecordings = new ArrayList<>(); - target.connectUrl = URI.create(jmxUrl.toString()); + target.connectUrl = connectUrl; target.alias = objRef.getName(); target.labels = obj != null ? obj.getMetadata().getLabels() : new HashMap<>(); target.annotations = new Annotations(); From 559a53d6ac9b3df64fe4516532ce712baa5629c2 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 23 Apr 2024 18:22:25 -0700 Subject: [PATCH 26/30] chore(discovery): clean up and address reviews --- .../discovery/ContainerDiscovery.java | 22 +++ .../cryostat/discovery/KubeApiDiscovery.java | 147 +++++++++++++----- .../java/io/cryostat/discovery/NodeType.java | 91 ----------- src/main/resources/application-dev.properties | 3 - .../resources/application-test.properties | 4 - src/main/resources/application.properties | 8 +- 6 files changed, 139 insertions(+), 136 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/ContainerDiscovery.java b/src/main/java/io/cryostat/discovery/ContainerDiscovery.java index 254cf6146..ff8085c31 100644 --- a/src/main/java/io/cryostat/discovery/ContainerDiscovery.java +++ b/src/main/java/io/cryostat/discovery/ContainerDiscovery.java @@ -431,3 +431,25 @@ static record ContainerDetails(Config Config) {} static record Config(String Hostname) {} } + +enum ContainerDiscoveryNodeType implements NodeType { + // represents a container pod managed by Podman + POD("Pod"), + ; + + private final String kind; + + ContainerDiscoveryNodeType(String kind) { + this.kind = kind; + } + + @Override + public String getKind() { + return kind; + } + + @Override + public String toString() { + return getKind(); + } +} diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 75bb128fb..99c3d8673 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -24,9 +24,9 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; import java.util.stream.Collectors; import javax.management.remote.JMXServiceURL; @@ -68,8 +68,6 @@ public class KubeApiDiscovery { public static final String REALM = "KubernetesApi"; - public static final long ENDPOINTS_INFORMER_RESYNC_PERIOD = Duration.ofSeconds(30).toMillis(); - public static final String DISCOVERY_NAMESPACE_LABEL_KEY = "discovery.cryostat.io/namespace"; @Inject Logger logger; @@ -79,11 +77,14 @@ public class KubeApiDiscovery { @ConfigProperty(name = "cryostat.discovery.kubernetes.enabled") boolean enabled; - @ConfigProperty(name = "cryostat.discovery.k8s.port-names") - Optional> JmxPortNames; + @ConfigProperty(name = "cryostat.discovery.kubernetes.port-names") + List jmxPortNames; + + @ConfigProperty(name = "cryostat.discovery.kubernetes.port-numbers") + List jmxPortNumbers; - @ConfigProperty(name = "cryostat.discovery.k8s.port-numbers") - Optional> JmxPortNumbers; + @ConfigProperty(name = "cryostat.discovery.kubernetes.resync-period") + Duration informerResyncPeriod; private final Map, Pair> discoveryNodeCache = new ConcurrentHashMap<>(); @@ -109,7 +110,7 @@ protected HashMap> initialize() .inNamespace(ns) .inform( new EndpointsHandler(), - ENDPOINTS_INFORMER_RESYNC_PERIOD)); + informerResyncPeriod.toMillis())); logger.infov( "Started Endpoints SharedInformer for" + " namespace \"{0}\"", @@ -119,9 +120,11 @@ protected HashMap> initialize() } }; + // Priority is set higher than default 0 such that onStart is called first before onAfterStart + // This ensures realm node is persisted before initializing informers @Transactional void onStart(@Observes @Priority(1) StartupEvent evt) { - if (!(enabled())) { + if (!enabled()) { return; } @@ -172,7 +175,7 @@ boolean enabled() { boolean available() { try { boolean hasNamespace = StringUtils.isNotBlank(kubeConfig.getOwnNamespace()); - return kubeConfig.kubeApiAvailable() || hasNamespace; + return kubeConfig.kubeApiAvailable() && hasNamespace; } catch (Exception e) { logger.info(e); } @@ -184,8 +187,7 @@ KubernetesClient client() { } private boolean isCompatiblePort(EndpointPort port) { - return JmxPortNames.orElse(List.of()).contains(port.getName()) - || JmxPortNumbers.orElse(List.of()).contains(port.getPort()); + return jmxPortNames.contains(port.getName()) || jmxPortNumbers.contains(port.getPort()); } private List getTargetTuplesFrom(Endpoints endpoints) { @@ -332,7 +334,9 @@ private void buildOwnerChain(DiscoveryNode nsNode, Target target, ObjectReferenc DiscoveryNode ownerNode = owner.getRight(); DiscoveryNode childNode = child.getRight(); - ownerNode.children.add(childNode); + if (!ownerNode.children.contains(childNode)) { + ownerNode.children.add(childNode); + } childNode.parent = ownerNode; ownerNode.persist(); @@ -402,7 +406,8 @@ private Pair queryForNode( DiscoveryNode node = DiscoveryNode.getNode( n -> { - return name.equals(n.name) + return nodeType.getKind().equals(n.nodeType) + && name.equals(n.name) && namespace.equals( n.labels.get( DISCOVERY_NAMESPACE_LABEL_KEY)); @@ -429,24 +434,27 @@ private Pair queryForNode( @ApplicationScoped static final class KubeConfig { - public static final String KUBERNETES_NAMESPACE_PATH = - "/var/run/secrets/kubernetes.io/serviceaccount/namespace"; - private static final String OWN_NAMESPACE = "."; @Inject Logger logger; @Inject FileSystem fs; - @ConfigProperty(name = "cryostat.discovery.k8s.namespaces") - Optional> watchNamespaces; + @ConfigProperty(name = "cryostat.discovery.kubernetes.namespaces") + List watchNamespaces; @ConfigProperty(name = "kubernetes.service.host") - Optional serviceHost; + String serviceHost; - private KubernetesClient kubeClient; + @ConfigProperty(name = "cryostat.discovery.kubernetes.namespace-path") + String namespacePath; + + private final KubernetesClient kubeClient = + new KubernetesClientBuilder() + .withTaskExecutor(Infrastructure.getDefaultWorkerPool()) + .build(); Collection getWatchNamespaces() { - return watchNamespaces.orElse(List.of()).stream() + return watchNamespaces.stream() .map( n -> { if (OWN_NAMESPACE.equals(n)) { @@ -454,13 +462,13 @@ Collection getWatchNamespaces() { } return n; }) - .filter((n) -> !n.isBlank()) + .filter(StringUtils::isNotBlank) .collect(Collectors.toSet()); } String getOwnNamespace() { try { - return fs.readString(Path.of(KUBERNETES_NAMESPACE_PATH)); + return fs.readString(Path.of(namespacePath)); } catch (Exception e) { logger.trace(e); return null; @@ -468,16 +476,10 @@ String getOwnNamespace() { } boolean kubeApiAvailable() { - return serviceHost.isPresent(); + return StringUtils.isNotBlank(serviceHost); } KubernetesClient kubeClient() { - if (kubeClient == null) { - kubeClient = - new KubernetesClientBuilder() - .withTaskExecutor(Infrastructure.getDefaultWorkerPool()) - .build(); - } return kubeClient; } } @@ -485,7 +487,7 @@ KubernetesClient kubeClient() { private final class EndpointsHandler implements ResourceEventHandler { @Override public void onAdd(Endpoints endpoints) { - logger.infov( + logger.debugv( "Endpoint {0} created in namespace {1}", endpoints.getMetadata().getName(), endpoints.getMetadata().getNamespace()); handleEndpointEvent(endpoints.getMetadata().getNamespace()); @@ -493,7 +495,7 @@ public void onAdd(Endpoints endpoints) { @Override public void onUpdate(Endpoints oldEndpoints, Endpoints newEndpoints) { - logger.infov( + logger.debugv( "Endpoint {0} modified in namespace {1}", newEndpoints.getMetadata().getName(), newEndpoints.getMetadata().getNamespace()); @@ -502,7 +504,7 @@ public void onUpdate(Endpoints oldEndpoints, Endpoints newEndpoints) { @Override public void onDelete(Endpoints endpoints, boolean deletedFinalStateUnknown) { - logger.infov( + logger.debugv( "Endpoint {0} deleted in namespace {1}", endpoints.getMetadata().getName(), endpoints.getMetadata().getNamespace()); if (deletedFinalStateUnknown) { @@ -557,7 +559,18 @@ public Target toTarget() { URI connectUrl = URI.create(jmxUrl.toString()); try { - return Target.getTargetByConnectUrl(connectUrl); + Target persistedTarget = Target.getTargetByConnectUrl(connectUrl); + DiscoveryNode targetNode = persistedTarget.discoveryNode; + if (!targetNode.nodeType.equals(KubeDiscoveryNodeType.ENDPOINT.getKind())) { + logger.warnv( + "Expected persisted target with serviceURL {0} to have node type" + + " {1} but found {2} ", + persistedTarget.connectUrl, + KubeDiscoveryNodeType.ENDPOINT.getKind(), + targetNode.nodeType); + throw new IllegalStateException(); + } + return persistedTarget; } catch (NoResultException e) { } @@ -621,3 +634,67 @@ public boolean equals(Object other) { } } } + +enum KubeDiscoveryNodeType implements NodeType { + NAMESPACE("Namespace"), + STATEFULSET( + "StatefulSet", + c -> ns -> n -> c.apps().statefulSets().inNamespace(ns).withName(n).get()), + DAEMONSET("DaemonSet", c -> ns -> n -> c.apps().daemonSets().inNamespace(ns).withName(n).get()), + DEPLOYMENT( + "Deployment", c -> ns -> n -> c.apps().deployments().inNamespace(ns).withName(n).get()), + REPLICASET( + "ReplicaSet", c -> ns -> n -> c.apps().replicaSets().inNamespace(ns).withName(n).get()), + REPLICATIONCONTROLLER( + "ReplicationController", + c -> ns -> n -> c.replicationControllers().inNamespace(ns).withName(n).get()), + POD("Pod", c -> ns -> n -> c.pods().inNamespace(ns).withName(n).get()), + ENDPOINT("Endpoint", c -> ns -> n -> c.endpoints().inNamespace(ns).withName(n).get()), + // OpenShift resources + DEPLOYMENTCONFIG("DeploymentConfig"), + ; + + private final String kubernetesKind; + private final transient Function< + KubernetesClient, Function>> + getFn; + + KubeDiscoveryNodeType(String kubernetesKind) { + this(kubernetesKind, client -> namespace -> name -> null); + } + + KubeDiscoveryNodeType( + String kubernetesKind, + Function>> + getFn) { + this.kubernetesKind = kubernetesKind; + this.getFn = getFn; + } + + @Override + public String getKind() { + return kubernetesKind; + } + + public Function>> + getQueryFunction() { + return getFn; + } + + public static KubeDiscoveryNodeType fromKubernetesKind(String kubernetesKind) { + if (kubernetesKind == null) { + return null; + } + for (KubeDiscoveryNodeType nt : values()) { + if (kubernetesKind.equalsIgnoreCase(nt.kubernetesKind)) { + return nt; + } + } + return null; + } + + @Override + public String toString() { + return getKind(); + } +} diff --git a/src/main/java/io/cryostat/discovery/NodeType.java b/src/main/java/io/cryostat/discovery/NodeType.java index 611cbfa11..cd96d729e 100644 --- a/src/main/java/io/cryostat/discovery/NodeType.java +++ b/src/main/java/io/cryostat/discovery/NodeType.java @@ -15,11 +15,6 @@ */ package io.cryostat.discovery; -import java.util.function.Function; - -import io.fabric8.kubernetes.api.model.HasMetadata; -import io.fabric8.kubernetes.client.KubernetesClient; - public interface NodeType { String getKind(); @@ -55,89 +50,3 @@ public String toString() { return getKind(); } } - -enum KubeDiscoveryNodeType implements NodeType { - NAMESPACE("Namespace"), - STATEFULSET( - "StatefulSet", - c -> ns -> n -> c.apps().statefulSets().inNamespace(ns).withName(n).get()), - DAEMONSET("DaemonSet", c -> ns -> n -> c.apps().daemonSets().inNamespace(ns).withName(n).get()), - DEPLOYMENT( - "Deployment", c -> ns -> n -> c.apps().deployments().inNamespace(ns).withName(n).get()), - REPLICASET( - "ReplicaSet", c -> ns -> n -> c.apps().replicaSets().inNamespace(ns).withName(n).get()), - REPLICATIONCONTROLLER( - "ReplicationController", - c -> ns -> n -> c.replicationControllers().inNamespace(ns).withName(n).get()), - POD("Pod", c -> ns -> n -> c.pods().inNamespace(ns).withName(n).get()), - ENDPOINT("Endpoint", c -> ns -> n -> c.endpoints().inNamespace(ns).withName(n).get()), - // OpenShift resources - DEPLOYMENTCONFIG("DeploymentConfig"), - ; - - private final String kubernetesKind; - private final transient Function< - KubernetesClient, Function>> - getFn; - - KubeDiscoveryNodeType(String kubernetesKind) { - this(kubernetesKind, client -> namespace -> name -> null); - } - - KubeDiscoveryNodeType( - String kubernetesKind, - Function>> - getFn) { - this.kubernetesKind = kubernetesKind; - this.getFn = getFn; - } - - @Override - public String getKind() { - return kubernetesKind; - } - - public Function>> - getQueryFunction() { - return getFn; - } - - public static KubeDiscoveryNodeType fromKubernetesKind(String kubernetesKind) { - if (kubernetesKind == null) { - return null; - } - for (KubeDiscoveryNodeType nt : values()) { - if (kubernetesKind.equalsIgnoreCase(nt.kubernetesKind)) { - return nt; - } - } - return null; - } - - @Override - public String toString() { - return getKind(); - } -} - -enum ContainerDiscoveryNodeType implements NodeType { - // represents a container pod managed by Podman - POD("Pod"), - ; - - private final String kind; - - ContainerDiscoveryNodeType(String kind) { - this.kind = kind; - } - - @Override - public String getKind() { - return kind; - } - - @Override - public String toString() { - return getKind(); - } -} diff --git a/src/main/resources/application-dev.properties b/src/main/resources/application-dev.properties index a1e20551a..d08f95bab 100644 --- a/src/main/resources/application-dev.properties +++ b/src/main/resources/application-dev.properties @@ -19,9 +19,6 @@ cryostat.discovery.podman.enabled=true cryostat.discovery.docker.enabled=true cryostat.discovery.kubernetes.enabled=true -cryostat.discovery.k8s.namespaces= -kubernetes.service.host= - quarkus.datasource.devservices.enabled=true quarkus.datasource.devservices.image-name=quay.io/cryostat/cryostat-db diff --git a/src/main/resources/application-test.properties b/src/main/resources/application-test.properties index 753f240df..28d180f32 100644 --- a/src/main/resources/application-test.properties +++ b/src/main/resources/application-test.properties @@ -4,10 +4,6 @@ cryostat.discovery.jdp.enabled=true cryostat.discovery.podman.enabled=true cryostat.discovery.docker.enabled=true cryostat.discovery.kubernetes.enabled=true -cryostat.discovery.k8s.port-names= -cryostat.discovery.k8s.port-numbers= -cryostat.discovery.k8s.namespaces= -kubernetes.service.host= quarkus.test.env.JAVA_OPTS_APPEND=-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=9091 -Dcom.sun.management.jmxremote.rmi.port=9091 -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties index c22379797..745458ec6 100644 --- a/src/main/resources/application.properties +++ b/src/main/resources/application.properties @@ -11,9 +11,11 @@ cryostat.discovery.plugins.jwt.signature.algorithm=HS256 cryostat.discovery.plugins.jwt.encryption.algorithm=dir cryostat.discovery.plugins.jwt.encryption.method=A256GCM cryostat.discovery.kubernetes.enabled=false -cryostat.discovery.k8s.port-names= -cryostat.discovery.k8s.port-numbers= -cryostat.discovery.k8s.namespaces= +cryostat.discovery.kubernetes.port-names= +cryostat.discovery.kubernetes.port-numbers= +cryostat.discovery.kubernetes.namespaces= +cryostat.discovery.kubernetes.namespace-path=/var/run/secrets/kubernetes.io/serviceaccount/namespace +cryostat.discovery.kubernetes.resync-period=30s kubernetes.service.host= quarkus.test.integration-test-profile=test From fea439175a24ab26d69c416fa20143cbead3b5d8 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Tue, 23 Apr 2024 20:40:04 -0700 Subject: [PATCH 27/30] fixup(discovery): use optional for injected list --- .../java/io/cryostat/discovery/KubeApiDiscovery.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 99c3d8673..50986dded 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.function.Function; @@ -78,10 +79,10 @@ public class KubeApiDiscovery { boolean enabled; @ConfigProperty(name = "cryostat.discovery.kubernetes.port-names") - List jmxPortNames; + Optional> jmxPortNames; @ConfigProperty(name = "cryostat.discovery.kubernetes.port-numbers") - List jmxPortNumbers; + Optional> jmxPortNumbers; @ConfigProperty(name = "cryostat.discovery.kubernetes.resync-period") Duration informerResyncPeriod; @@ -187,7 +188,8 @@ KubernetesClient client() { } private boolean isCompatiblePort(EndpointPort port) { - return jmxPortNames.contains(port.getName()) || jmxPortNumbers.contains(port.getPort()); + return jmxPortNames.orElse(List.of()).contains(port.getName()) + || jmxPortNumbers.orElse(List.of()).contains(port.getPort()); } private List getTargetTuplesFrom(Endpoints endpoints) { @@ -440,7 +442,7 @@ static final class KubeConfig { @Inject FileSystem fs; @ConfigProperty(name = "cryostat.discovery.kubernetes.namespaces") - List watchNamespaces; + Optional> watchNamespaces; @ConfigProperty(name = "kubernetes.service.host") String serviceHost; @@ -454,7 +456,7 @@ static final class KubeConfig { .build(); Collection getWatchNamespaces() { - return watchNamespaces.stream() + return watchNamespaces.orElse(List.of()).stream() .map( n -> { if (OWN_NAMESPACE.equals(n)) { From 5f1d89e27fc9a3507326de3268c771bf8d6c9230 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Wed, 24 Apr 2024 09:31:50 -0700 Subject: [PATCH 28/30] fixup(transaction): run handler in correct transaction context --- .../cryostat/discovery/KubeApiDiscovery.java | 124 ++++++++++-------- 1 file changed, 70 insertions(+), 54 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 50986dded..157edaad0 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -47,6 +47,7 @@ import io.fabric8.kubernetes.client.KubernetesClientBuilder; import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.quarkus.narayana.jta.QuarkusTransaction; import io.quarkus.runtime.ShutdownEvent; import io.quarkus.runtime.StartupEvent; import io.smallrye.mutiny.infrastructure.Infrastructure; @@ -211,61 +212,76 @@ private Map> safeGetInformers() { return informers; } - @Transactional public void handleEndpointEvent(String namespace) { - DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); - DiscoveryNode nsNode = - DiscoveryNode.getChild(realm, n -> n.name.equals(namespace)) - .orElse( - DiscoveryNode.environment( - namespace, KubeDiscoveryNodeType.NAMESPACE)); - - try { - List targetNodes = - DiscoveryNode.findAllByNodeType(KubeDiscoveryNodeType.ENDPOINT).stream() - .filter( - (n) -> - namespace.equals( - n.labels.get(DISCOVERY_NAMESPACE_LABEL_KEY))) - .collect(Collectors.toList()); - Map targetRefMap = new HashMap<>(); - safeGetInformers().get(namespace).getStore().list().stream() - .map((endpoint) -> getTargetTuplesFrom(endpoint)) - .flatMap(List::stream) - .filter((tuple) -> Objects.nonNull(tuple.objRef)) - .collect(Collectors.toList()) - .forEach((tuple) -> targetRefMap.put(tuple.toTarget(), tuple.objRef)); - - Set persistedTargets = - targetNodes.stream().map(node -> node.target).collect(Collectors.toSet()); - Set observedTargets = targetRefMap.keySet(); - - // Add new targets - Target.compare(persistedTargets) - .to(observedTargets) - .added() - .forEach((t) -> buildOwnerChain(nsNode, t, targetRefMap.get(t))); - - // Prune deleted targets - Target.compare(persistedTargets) - .to(observedTargets) - .removed() - .forEach((t) -> pruneOwnerChain(nsNode, t)); - - if (!nsNode.hasChildren()) { - realm.children.remove(nsNode); - nsNode.parent = null; - } else if (!realm.children.contains(nsNode)) { - realm.children.add(nsNode); - nsNode.parent = realm; - } - realm.persist(); - } catch (Exception e) { - logger.warn("Endpoint handler exception", e); - } finally { - discoveryNodeCache.clear(); - queryLocks.clear(); - } + QuarkusTransaction.joiningExisting() + .run( + () -> { + DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); + DiscoveryNode nsNode = + DiscoveryNode.getChild(realm, n -> n.name.equals(namespace)) + .orElse( + DiscoveryNode.environment( + namespace, + KubeDiscoveryNodeType.NAMESPACE)); + + try { + List targetNodes = + DiscoveryNode.findAllByNodeType( + KubeDiscoveryNodeType.ENDPOINT) + .stream() + .filter( + (n) -> + namespace.equals( + n.labels.get( + DISCOVERY_NAMESPACE_LABEL_KEY))) + .collect(Collectors.toList()); + Map targetRefMap = new HashMap<>(); + safeGetInformers().get(namespace).getStore().list().stream() + .map((endpoint) -> getTargetTuplesFrom(endpoint)) + .flatMap(List::stream) + .filter((tuple) -> Objects.nonNull(tuple.objRef)) + .collect(Collectors.toList()) + .forEach( + (tuple) -> + targetRefMap.put( + tuple.toTarget(), tuple.objRef)); + + Set persistedTargets = + targetNodes.stream() + .map(node -> node.target) + .collect(Collectors.toSet()); + Set observedTargets = targetRefMap.keySet(); + + // Add new targets + Target.compare(persistedTargets) + .to(observedTargets) + .added() + .forEach( + (t) -> + buildOwnerChain( + nsNode, t, targetRefMap.get(t))); + + // Prune deleted targets + Target.compare(persistedTargets) + .to(observedTargets) + .removed() + .forEach((t) -> pruneOwnerChain(nsNode, t)); + + if (!nsNode.hasChildren()) { + realm.children.remove(nsNode); + nsNode.parent = null; + } else if (!realm.children.contains(nsNode)) { + realm.children.add(nsNode); + nsNode.parent = realm; + } + realm.persist(); + } catch (Exception e) { + logger.warn("Endpoint handler exception", e); + } finally { + discoveryNodeCache.clear(); + queryLocks.clear(); + } + }); } private void pruneOwnerChain(DiscoveryNode nsNode, Target target) { From eddd41e3a5b5e43a3b1455ba4a06d614d9b4a978 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Wed, 24 Apr 2024 10:50:24 -0700 Subject: [PATCH 29/30] test(integration): fix broken integration tests --- src/main/java/io/cryostat/discovery/CustomDiscovery.java | 2 +- src/main/java/io/cryostat/discovery/KubeApiDiscovery.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/CustomDiscovery.java b/src/main/java/io/cryostat/discovery/CustomDiscovery.java index f3b013942..2f7055cdb 100644 --- a/src/main/java/io/cryostat/discovery/CustomDiscovery.java +++ b/src/main/java/io/cryostat/discovery/CustomDiscovery.java @@ -214,7 +214,7 @@ public Response delete(@RestPath long id) throws URISyntaxException { Target target = Target.find("id", id).singleResult(); DiscoveryNode realm = DiscoveryNode.getRealm(REALM).orElseThrow(); realm.children.remove(target.discoveryNode); - target.discoveryNode = null; + target.discoveryNode.parent = null; realm.persist(); target.delete(); return Response.noContent().build(); diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 157edaad0..86f66491c 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -461,7 +461,7 @@ static final class KubeConfig { Optional> watchNamespaces; @ConfigProperty(name = "kubernetes.service.host") - String serviceHost; + Optional serviceHost; @ConfigProperty(name = "cryostat.discovery.kubernetes.namespace-path") String namespacePath; @@ -494,7 +494,7 @@ String getOwnNamespace() { } boolean kubeApiAvailable() { - return StringUtils.isNotBlank(serviceHost); + return StringUtils.isNotBlank(serviceHost.orElse("")); } KubernetesClient kubeClient() { From 978cc5ad5c26715ad3ae24414331867ed198cb56 Mon Sep 17 00:00:00 2001 From: Thuan Vo Date: Wed, 24 Apr 2024 11:22:08 -0700 Subject: [PATCH 30/30] fix(spotbugs): remove unused equals override --- .../cryostat/discovery/KubeApiDiscovery.java | 23 ------------------- 1 file changed, 23 deletions(-) diff --git a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java index 86f66491c..87ecfd356 100644 --- a/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java +++ b/src/main/java/io/cryostat/discovery/KubeApiDiscovery.java @@ -58,7 +58,6 @@ import jakarta.persistence.NoResultException; import jakarta.transaction.Transactional; import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.concurrent.ConcurrentException; import org.apache.commons.lang3.concurrent.LazyInitializer; import org.apache.commons.lang3.tuple.Pair; @@ -628,28 +627,6 @@ public Target toTarget() { return null; } } - - @Override - public boolean equals(Object other) { - if (other == null) { - return false; - } - if (other == this) { - return true; - } - if (!(other instanceof TargetTuple)) { - return false; - } - TargetTuple sr = (TargetTuple) other; - return new EqualsBuilder() - .append(objRef.getApiVersion(), sr.objRef.getApiVersion()) - .append(objRef.getKind(), sr.objRef.getKind()) - .append(objRef.getNamespace(), sr.objRef.getNamespace()) - .append(objRef.getName(), sr.objRef.getName()) - .append(addr.getIp(), sr.addr.getIp()) - .append(port.getPort(), sr.port.getPort()) - .build(); - } } }