From 1025301f1b2bd4773d6f5bccc03b678286c9c50b Mon Sep 17 00:00:00 2001 From: djin Date: Mon, 23 Jul 2018 14:01:19 -0700 Subject: [PATCH 01/27] Alerts pagination web service endpoints --- .../salesforce/dva/argus/entity/Alert.java | 432 +++++++++++++++++- .../dva/argus/service/AlertService.java | 50 ++ .../service/alert/DefaultAlertService.java | 52 ++- .../dva/argus/service/AlertServiceTest.java | 246 ++++++++++ .../dva/argus/ws/dto/ItemsCountDto.java | 107 +++++ .../argus/ws/resources/AlertResources.java | 83 ++++ 6 files changed, 968 insertions(+), 2 deletions(-) create mode 100644 ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/dto/ItemsCountDto.java diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java index 80f38147e..536944ded 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java @@ -149,6 +149,31 @@ @NamedQuery( name = "Alert.getSharedAlertsByOwner", query = "SELECT a from Alert a where a.owner = :owner AND a.shared = true AND a.id not in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = true)" + ), + // Paginated queries + @NamedQuery( + name = "Alert.findByOwnerPaged", + query = "SELECT a FROM Alert a WHERE a.owner = :owner AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false) order by a.id asc" + ), + @NamedQuery( + name = "Alert.countByOwner", + query = "SELECT count(a) FROM Alert a WHERE a.owner = :owner AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false)" + ), + @NamedQuery( + name = "Alert.getSharedAlertsPaged", + query = "SELECT a from Alert a where a.shared = true AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false) order by a.id asc" + ), + @NamedQuery( + name = "Alert.countSharedAlerts", + query = "SELECT count(a) from Alert a where a.shared = true AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false)" + ), + @NamedQuery( + name = "Alert.getPrivateAlertsForPrivilegedUserPaged", + query = "SELECT a from Alert a where a.shared = false AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false) order by a.id asc" + ), + @NamedQuery( + name = "Alert.countPrivateAlertsForPrivilegedUser", + query = "SELECT count(a) from Alert a where a.shared = false AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false)" ) } ) @@ -189,6 +214,10 @@ public class Alert extends JPAEntity implements Serializable, CronJob { @Metadata private boolean shared; + // Default values for page limit and page offset + private static int DEFAULT_PAGE_LIMIT = 10; + private static int DEFAULT_PAGE_OFFSET = 0; + //~ Constructors ********************************************************************************************************************************* /** @@ -284,6 +313,68 @@ public static List findByOwner(EntityManager em, PrincipalUser owner) { return new ArrayList<>(0); } } + + /** + * Finds all alerts for the given owner with given limit and offset. + * + * @param em + * The entity manager to user. Cannot be null. + * @param owner + * The owner to retrieve alerts for. Cannot be null. + * @param limit + * The limit of return to return. + * @param offset + * The starting offset of the result. + * + * @return The list of alerts for the owner. + */ + public static List findByOwnerPaged(EntityManager em, PrincipalUser owner, Integer limit, Integer offset) { + requireArgument(em != null, "Entity manager can not be null."); + requireArgument(owner != null, "Owner cannot be null."); + if (limit == null || limit <= 0) { + limit = DEFAULT_PAGE_LIMIT; + } + if (offset == null || offset < 0) { + offset = DEFAULT_PAGE_OFFSET; + } + + TypedQuery query = em.createNamedQuery("Alert.findByOwnerPaged", Alert.class); + query.setHint(QueryHints.REFRESH, HintValues.TRUE); + query.setHint("javax.persistence.cache.storeMode", "REFRESH"); + try { + query.setParameter("owner", owner); + query.setMaxResults(limit); + query.setFirstResult(offset); + return query.getResultList(); + } catch (NoResultException ex) { + return new ArrayList<>(0); + } + } + + /** + * Count the number of alerts for the given owner. + * + * @param em + * The entity manager to user. Cannot be null. + * @param owner + * owner The owner to retrieve alerts for. Cannot be null. + * + * @return The total number of alerts for the owner. + */ + public static int countByOwner(EntityManager em, PrincipalUser owner) { + requireArgument(em != null, "Entity manager can not be null."); + requireArgument(owner != null, "Owner cannot be null."); + + TypedQuery query = em.createNamedQuery("Alert.countByOwner", Long.class); + query.setHint(QueryHints.REFRESH, HintValues.TRUE); + query.setHint("javax.persistence.cache.storeMode", "REFRESH"); + try { + query.setParameter("owner", owner); + return query.getSingleResult().intValue(); + } catch (NoResultException ex) { + return 0; + } + } public static List findByOwnerMeta(EntityManager em, PrincipalUser owner) { requireArgument(em != null, "Entity manager can not be null."); @@ -326,6 +417,74 @@ public static List findByOwnerMeta(EntityManager em, PrincipalUser owner) return new ArrayList<>(0); } } + + /** + * Finds all alerts' meta for the given owner with given limit and offset. + * + * @param em + * The entity manager to user. Cannot be null. + * @param owner + * The owner to retrieve alerts for. Cannot be null. + * @param limit + * The limit of return to return. + * @param offset + * The starting offset of the result. + * + * @return The list of alerts for the owner. + */ + public static List findByOwnerMetaPaged(EntityManager em, PrincipalUser owner, Integer limit, + Integer offset) { + requireArgument(em != null, "Entity manager can not be null."); + if (limit == null || limit <= 0) { + limit = DEFAULT_PAGE_LIMIT; + } + if (offset == null || offset < 0) { + offset = DEFAULT_PAGE_OFFSET; + } + + try { + CriteriaBuilder cb = em.getCriteriaBuilder(); + CriteriaQuery cq = cb.createTupleQuery(); + Root e = cq.from(Alert.class); + + List> fieldsToSelect = new ArrayList<>(); + for (Field field : FieldUtils.getFieldsListWithAnnotation(Alert.class, Metadata.class)) { + fieldsToSelect.add(e.get(field.getName()).alias(field.getName())); + } + cq.multiselect(fieldsToSelect); + cq.where(cb.equal(e.get("deleted"), false), cb.equal(e.get("owner"), owner)); + cq.orderBy(cb.asc(e.get("id"))); + + TypedQuery query = em.createQuery(cq); + + // Set limit and offset for pagination + query.setMaxResults(limit); + query.setFirstResult(offset); + + List result = query.getResultList(); + + List alerts = new ArrayList<>(); + for (Tuple tuple : result) { + + Alert a = new Alert(PrincipalUser.class.cast(tuple.get("createdBy")), + PrincipalUser.class.cast(tuple.get("owner")), String.class.cast(tuple.get("name")), + String.class.cast(tuple.get("expression")), String.class.cast(tuple.get("cronEntry"))); + + a.id = BigInteger.class.cast(tuple.get("id")); + a.enabled = Boolean.class.cast(tuple.get("enabled")); + a.createdDate = Date.class.cast(tuple.get("createdDate")); + a.modifiedDate = Date.class.cast(tuple.get("modifiedDate")); + a.shared = Boolean.class.cast(tuple.get("shared")); + a.modifiedBy = PrincipalUser.class.cast(tuple.get("modifiedBy")); + + alerts.add(a); + } + + return alerts; + } catch (NoResultException ex) { + return new ArrayList<>(0); + } + } /** * Finds all alerts. @@ -579,12 +738,70 @@ public static List findSharedAlerts(EntityManager em, PrincipalUser owner return new ArrayList<>(0); } } + + /** + * Find all shared alerts with given limit and offset. + * + * @param em + * The entity manager to user. Cannot be null. + * @param owner + * The owner of shared alerts to filter on + * @param limit + * The maximum number of rows to return. + * @param offset + * The starting offset of the result. + * + * @return The list of shared alerts with given limit and offset. + */ + public static List findSharedAlertsPaged(EntityManager em, Integer limit, Integer offset) { + requireArgument(em != null, "Entity manager can not be null."); + if (limit == null || limit <= 0) { + limit = DEFAULT_PAGE_LIMIT; + } + if (offset == null || offset < 0) { + offset = DEFAULT_PAGE_OFFSET; + } + + TypedQuery query; + query = em.createNamedQuery("Alert.getSharedAlertsPaged", Alert.class); + query.setMaxResults(limit); + query.setFirstResult(offset); + query.setHint(QueryHints.REFRESH, HintValues.TRUE); + query.setHint("javax.persistence.cache.storeMode", "REFRESH"); + + try { + return query.getResultList(); + } catch (NoResultException ex) { + return new ArrayList<>(0); + } + } + + /** + * Count the total number of all shared alerts. + * + * @param em + * The entity manager to user. Cannot be null. + * + * @return The count of all shared alerts. + */ + public static int countSharedAlerts(EntityManager em) { + requireArgument(em != null, "Entity manager can not be null."); + + TypedQuery query = em.createNamedQuery("Alert.countSharedAlerts", Long.class); + query.setHint(QueryHints.REFRESH, HintValues.TRUE); + query.setHint("javax.persistence.cache.storeMode", "REFRESH"); + try { + return query.getSingleResult().intValue(); + } catch (NoResultException ex) { + return 0; + } + } /** * Gets all meta information of shared alerts with filtering. * * @param em The entity manager to user. Cannot be null. - * @param owner The owner of shared alerts to filter on + * @param owner The owner to filter on * @param limit The maximum number of rows to return. * * @return The list of all shared alerts with meta information only. Will never be null but may be empty. @@ -640,6 +857,219 @@ public static List findSharedAlertsMeta(EntityManager em, PrincipalUser o return new ArrayList<>(0); } } + + /** + * Find all shared alerts meta with given limit and offset. + * + * @param em + * The entity manager to user. Cannot be null. + * @param owner + * The owner to filter on + * @param limit + * The maximum number of rows to return. + * @param offset + * The starting offset of the result. + * + * @return The list of shared alerts with given limit and offset. + */ + public static List findSharedAlertsMetaPaged(EntityManager em, Integer limit, Integer offset) { + requireArgument(em != null, "Entity manager can not be null."); + if (limit == null || limit <= 0) { + limit = DEFAULT_PAGE_LIMIT; + } + if (offset == null || offset < 0) { + offset = DEFAULT_PAGE_OFFSET; + } + + try { + CriteriaBuilder cb = em.getCriteriaBuilder(); + CriteriaQuery cq = cb.createTupleQuery(); + Root e = cq.from(Alert.class); + + List> fieldsToSelect = new ArrayList<>(); + for (Field field : FieldUtils.getFieldsListWithAnnotation(Alert.class, Metadata.class)) { + fieldsToSelect.add(e.get(field.getName()).alias(field.getName())); + } + cq.multiselect(fieldsToSelect); + cq.where(cb.equal(e.get("deleted"), false), cb.equal(e.get("shared"), true)); + cq.orderBy(cb.asc(e.get("id"))); + + TypedQuery query = em.createQuery(cq); + query.setHint("javax.persistence.cache.storeMode", "REFRESH"); + query.setHint(QueryHints.REFRESH, HintValues.TRUE); + + // Set limit and offset for pagination + query.setMaxResults(limit); + query.setFirstResult(offset); + + List result = query.getResultList(); + + List alerts = new ArrayList<>(); + for (Tuple tuple : result) { + + Alert a = new Alert(PrincipalUser.class.cast(tuple.get("createdBy")), + PrincipalUser.class.cast(tuple.get("owner")), String.class.cast(tuple.get("name")), + String.class.cast(tuple.get("expression")), String.class.cast(tuple.get("cronEntry"))); + + a.id = BigInteger.class.cast(tuple.get("id")); + a.enabled = Boolean.class.cast(tuple.get("enabled")); + a.createdDate = Date.class.cast(tuple.get("createdDate")); + a.modifiedDate = Date.class.cast(tuple.get("modifiedDate")); + a.shared = Boolean.class.cast(tuple.get("shared")); + a.modifiedBy = PrincipalUser.class.cast(tuple.get("modifiedBy")); + + alerts.add(a); + } + + return alerts; + } catch (NoResultException ex) { + return new ArrayList<>(0); + } + } + + /** + * Find all private alerts (non-shared alerts) for given privileged user + * with given limit and offset. + * + * @param em + * The entity manager to user. Cannot be null. + * @param owner + * The owner to filter on + * @param limit + * The maximum number of rows to return. + * @param offset + * The starting offset of the result. + * + * @return The list of private alerts with given limit and offset. + */ + public static List findPrivateAlertsForPrivilegedUserPaged(EntityManager em, PrincipalUser owner, + Integer limit, Integer offset) { + requireArgument(em != null, "Entity manager can not be null."); + // Invalid user nor non-privileged user shall not view other's + // non-shared alerts, thus immediately return empty list + if (owner == null || !owner.isPrivileged()) { + return new ArrayList<>(0); + } + if (limit == null || limit <= 0) { + limit = DEFAULT_PAGE_LIMIT; + } + if (offset == null || offset < 0) { + offset = DEFAULT_PAGE_OFFSET; + } + + TypedQuery query = em.createNamedQuery("Alert.getPrivateAlertsForPrivilegedUserPaged", Alert.class); + query.setHint(QueryHints.REFRESH, HintValues.TRUE); + query.setHint("javax.persistence.cache.storeMode", "REFRESH"); + try { + query.setParameter("owner", owner); + query.setMaxResults(limit); + query.setFirstResult(offset); + return query.getResultList(); + } catch (NoResultException ex) { + return new ArrayList<>(0); + } + } + + /** + * Find all private alerts (non-shared alerts) meta for given privileged user with given limit and offset. + * + * @param em The entity manager to user. Cannot be null. + * @param owner The owner to filter on + * @param limit The maximum number of rows to return. + * @param offset The starting offset of the result. + * + * @return The list of private alerts' meta with given limit and offset. + */ + public static List findPrivateAlertsForPrivilegedUserMetaPaged(EntityManager em, PrincipalUser owner, Integer limit, Integer offset) { + requireArgument(em != null, "Entity manager can not be null."); + if (limit == null || limit <= 0) { + limit = DEFAULT_PAGE_LIMIT; + } + if (offset == null || offset < 0) { + offset = DEFAULT_PAGE_OFFSET; + } + + // Invalid user nor non-privileged user shall not view other's non-shared alerts, thus immediately return empty list + if (owner == null || !owner.isPrivileged()) { + return new ArrayList<>(0); + } + + try { + CriteriaBuilder cb = em.getCriteriaBuilder(); + CriteriaQuery cq = cb.createTupleQuery(); + Root e = cq.from(Alert.class); + + List> fieldsToSelect = new ArrayList<>(); + for(Field field : FieldUtils.getFieldsListWithAnnotation(Alert.class, Metadata.class)) { + fieldsToSelect.add(e.get(field.getName()).alias(field.getName())); + } + cq.multiselect(fieldsToSelect); + + // Query for alerts that are not marked as deleted, non-shared, owned by others + cq.where(cb.equal(e.get("deleted"), false), cb.equal(e.get("shared"), false)); + cq.orderBy(cb.asc(e.get("id"))); + + TypedQuery query = em.createQuery(cq); + query.setHint("javax.persistence.cache.storeMode", "REFRESH"); + query.setHint(QueryHints.REFRESH, HintValues.TRUE); + + // Set limit and offset for pagination + query.setMaxResults(limit); + query.setFirstResult(offset); + + List result = query.getResultList(); + + List alerts = new ArrayList<>(); + for(Tuple tuple : result) { + + Alert a = new Alert(PrincipalUser.class.cast(tuple.get("createdBy")), PrincipalUser.class.cast(tuple.get("owner")), + String.class.cast(tuple.get("name")), String.class.cast(tuple.get("expression")), + String.class.cast(tuple.get("cronEntry"))); + + a.id = BigInteger.class.cast(tuple.get("id")); + a.enabled = Boolean.class.cast(tuple.get("enabled")); + a.createdDate = Date.class.cast(tuple.get("createdDate")); + a.modifiedDate = Date.class.cast(tuple.get("modifiedDate")); + a.shared = Boolean.class.cast(tuple.get("shared")); + a.modifiedBy = PrincipalUser.class.cast(tuple.get("modifiedBy")); + + alerts.add(a); + } + + return alerts; + } catch (NoResultException ex) { + return new ArrayList<>(0); + } + } + + /** + * Count the total number of private alerts (non-shared alerts) for + * privileged user. + * + * @param em + * The entity manager to user. Cannot be null. + * @param owner + * The owner to filter on. + * + * @return The total number of private alerts for privileged user. + */ + public static int countPrivateAlertsForPrivilegedUser(EntityManager em, PrincipalUser owner) { + requireArgument(em != null, "Entity manager can not be null."); + requireArgument(owner != null, "Owner cannot be null."); + + if (!owner.isPrivileged()) { + return 0; + } + + TypedQuery query = em.createNamedQuery("Alert.countPrivateAlertsForPrivilegedUser", Long.class); + query.setHint(QueryHints.REFRESH, HintValues.TRUE); + query.setHint("javax.persistence.cache.storeMode", "REFRESH"); + try { + return query.getSingleResult().intValue(); + } catch (NoResultException ex) { + return 0; + } + } /** * Finds all alerts whose name starts with the given prefix. diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java index e184c5a86..f746dbc65 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java @@ -136,6 +136,24 @@ public interface AlertService extends Service { * @return The list of alerts. Will never be null, but may be empty. */ List findAlertsByOwner(PrincipalUser owner, boolean metadataOnly); + + /** + * Returns a list of alerts for an owner with given limit and offset. + * + * @param owner The owner to return alerts for. Cannot be null. + * + * @return The list of alerts. + */ + List findAlertsByOwnerPaged(PrincipalUser owner, boolean metadataOnly, Integer limit, Integer offset); + + /** + * Count the total number of alerts for the owner. + * + * @param owner The owner of the alerts. Cannot be null. + * + * @return The total number of owner's alert. + */ + int countAlertsByOwner(PrincipalUser owner); /** * Returns a list of alerts that have been marked for deletion. @@ -267,6 +285,20 @@ public interface AlertService extends Service { * @return The list of all alerts. Will never be null, but may be empty. */ List findSharedAlerts(boolean metadataOnly, PrincipalUser owner, Integer limit); + + /** + * Return a list of shared alerts. + * + * @return The list of shared alerts. + */ + List findSharedAlertsPaged(boolean metadataOnly, Integer limit, Integer offset); + + /** + * Count the total number of shared alerts. + * + * @return The total number of shared alerts. + */ + int countSharedAlerts(); /** * Returns the list of supported notifiers. @@ -290,6 +322,24 @@ public interface AlertService extends Service { * @param notifications The notifications to update. */ void updateNotificationsActiveStatusAndCooldown(List notifications); + + /** + * Find a list of private alerts (non-shared alerts) for the given privileged user. + * + * @param owner The owner to filter on. + * + * @return The list of private alerts if privileged user. + */ + List findPrivateAlertsForPrivilegedUserPaged(boolean metadataOnly, PrincipalUser owner, Integer limit, Integer offset); + + /** + * Count the total number of private alerts (non-shared alerts) for the privileged user. + * + * @param owner The owner to filter on. + * + * @return The total number of private alerts for privileged user. + */ + int countPrivateAlertsForPrivilegedUser(PrincipalUser owner); //~ Enums **************************************************************************************************************************************** diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java index cc0c2a387..339f29a8b 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java @@ -275,6 +275,21 @@ public List findAlertsByOwner(PrincipalUser owner, boolean metadataOnly) return metadataOnly ? Alert.findByOwnerMeta(_emProvider.get(), owner) : Alert.findByOwner(_emProvider.get(), owner); } + + @Override + public List findAlertsByOwnerPaged(PrincipalUser owner, boolean metadataOnly, Integer limit, Integer offset) { + requireNotDisposed(); + requireArgument(owner != null, "Owner cannot be null."); + + return metadataOnly ? Alert.findByOwnerMetaPaged(_emProvider.get(), owner, limit, offset) : Alert.findByOwnerPaged(_emProvider.get(), owner, limit, offset); + } + + @Override + public int countAlertsByOwner(PrincipalUser owner) { + requireNotDisposed(); + requireArgument(owner != null, "Owner cannot be null."); + return Alert.countByOwner(_emProvider.get(), owner); + } @Override public Alert findAlertByPrimaryKey(BigInteger id) { @@ -827,6 +842,42 @@ public List findSharedAlerts(boolean metadataOnly, PrincipalUser owner, I requireNotDisposed(); return metadataOnly ? Alert.findSharedAlertsMeta(_emProvider.get(), owner, limit) : Alert.findSharedAlerts(_emProvider.get(), owner, limit); } + + @Override + public List findSharedAlertsPaged(boolean metadataOnly, Integer limit, Integer offset) { + requireNotDisposed(); + return metadataOnly ? Alert.findSharedAlertsMetaPaged(_emProvider.get(), limit, offset) : Alert.findSharedAlertsPaged(_emProvider.get(), limit, offset); + } + + @Override + public int countSharedAlerts() { + requireNotDisposed(); + return Alert.countSharedAlerts(_emProvider.get()); + } + + @Override + public List findPrivateAlertsForPrivilegedUserPaged(boolean metadataOnly, PrincipalUser owner, Integer limit, Integer offset) { + requireNotDisposed(); + + // Invalid user nor non-privileged user shall not view other's non-shared alerts, thus immediately return empty list + if (owner == null || !owner.isPrivileged()) { + return new ArrayList<>(0); + } + + return metadataOnly ? Alert.findPrivateAlertsForPrivilegedUserMetaPaged(_emProvider.get(), owner, limit, offset) : Alert.findPrivateAlertsForPrivilegedUserPaged(_emProvider.get(), owner, limit, offset); + } + + @Override + public int countPrivateAlertsForPrivilegedUser(PrincipalUser owner) { + requireNotDisposed(); + + // Invalid user nor non-privileged user shall not view other's non-shared alerts, thus immediately return 0 + if (owner == null || !owner.isPrivileged()) { + return 0; + } + + return Alert.countPrivateAlertsForPrivilegedUser(_emProvider.get(), owner); + } /** * Returns an instance of a supported notifier. @@ -1154,6 +1205,5 @@ public void setTriggeredMetric(Metric triggeredMetric) { this.triggeredMetric = triggeredMetric; } } - } /* Copyright (c) 2016, Salesforce.com, Inc. All rights reserved. */ \ No newline at end of file diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java index ab943ed03..229607fd4 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java @@ -216,6 +216,73 @@ public void testfindAlertsByOwnerMeta() { assertTrue(actualSet.contains(alert)); } } + + @Test + public void testFindAlertsByOwnerPaged() { + UserService userService = system.getServiceFactory().getUserService(); + AlertService alertService = system.getServiceFactory().getAlertService(); + String userName = createRandomName(); + int alertsCount = 25; + PrincipalUser user = new PrincipalUser(admin, userName, userName + "@testcompany.com"); + + user = userService.updateUser(user); + + List expectedAlerts = new ArrayList<>(); + + for (int i = 0; i < alertsCount; i++) { + expectedAlerts.add(alertService.updateAlert(new Alert(user, user, "alert_" + i, EXPRESSION, "* * * * *"))); + } + + int limit = 10; // Page size + List actualAlerts = new ArrayList<>(); + + // Fetch first page + List page = alertService.findAlertsByOwnerPaged(user, true, limit, 0); + assertEquals(page.size(), limit); + actualAlerts.addAll(page); + + // Fetch second page + page = alertService.findAlertsByOwnerPaged(user, true, limit, actualAlerts.size()); + assertEquals(page.size(), limit); + actualAlerts.addAll(page); + + // Fetch remaining alerts (less than a page) + page = alertService.findAlertsByOwnerPaged(user, true, limit, actualAlerts.size()); + assertEquals(page.size(), expectedAlerts.size() - actualAlerts.size()); + actualAlerts.addAll(page); + + // Try to fetch again should be empty result + page = alertService.findAlertsByOwnerPaged(user, true, limit, actualAlerts.size()); + assertEquals(0, page.size()); + + Set actualSet = new HashSet<>(); + + actualSet.addAll(actualAlerts); + for (Alert alert : expectedAlerts) { + assertTrue(actualSet.contains(alert)); + } + } + + @Test + public void testCountAlertsByOwner() { + UserService userService = system.getServiceFactory().getUserService(); + AlertService alertService = system.getServiceFactory().getAlertService(); + String userName = createRandomName(); + int alertsCount = random.nextInt(20) + 1; + PrincipalUser user = new PrincipalUser(admin, userName, userName + "@testcompany.com"); + + user = userService.updateUser(user); + + List expectedAlerts = new ArrayList<>(); + + for (int i = 0; i < alertsCount; i++) { + expectedAlerts.add(alertService.updateAlert(new Alert(user, user, "alert_" + i, EXPRESSION, "* * * * *"))); + } + + int cnt = alertService.countAlertsByOwner(user); + + assertEquals(cnt, expectedAlerts.size()); + } @Test public void findAllAlerts() { @@ -555,6 +622,66 @@ public void testFindSharedAlertsMeta() { assertEquals(expectedSharedResult, alertService.findSharedAlerts(true, null, null)); } + + @Test + public void testFindSharedAlertsMetaPaged() { + UserService userService = system.getServiceFactory().getUserService(); + AlertService alertService = system.getServiceFactory().getAlertService(); + PrincipalUser user1 = userService.updateUser(new PrincipalUser(admin, "test1", "test1@salesforce.com")); + PrincipalUser user2 = userService.updateUser(new PrincipalUser(admin, "test2", "test2@salesforce.com")); + + Alert alert1 = alertService.updateAlert(new Alert(user1, user1, "alert1", EXPRESSION, "* * * * *")); + Alert alert2 = alertService.updateAlert(new Alert(user2, user2, "alert2", EXPRESSION, "* * * * *")); + Alert alert3 = alertService.updateAlert(new Alert(user2, user2, "alert3", EXPRESSION, "* * * * *")); + + + alert1.setShared(true); + alertService.updateAlert(alert1); + alert2.setShared(true); + alertService.updateAlert(alert2); + alert3.setShared(false); + alertService.updateAlert(alert3); + + Set sharedAlerts = new HashSet<>(); + sharedAlerts.add("alert1"); + sharedAlerts.add("alert2"); + + // First page + List page = alertService.findSharedAlertsPaged(true, 1, 0); + assertEquals(1, page.size()); + assertTrue(sharedAlerts.contains(page.get(0).getName())); + + // Second page + page = alertService.findSharedAlertsPaged(true, 1, 1); + assertEquals(1, page.size()); + assertTrue(sharedAlerts.contains(page.get(0).getName())); + + // Thrid page should be zero + page = alertService.findSharedAlertsPaged(true, 1, 2); + assertEquals(0, page.size()); + } + + @Test + public void testCountSharedAlertsMetaPaged() { + UserService userService = system.getServiceFactory().getUserService(); + AlertService alertService = system.getServiceFactory().getAlertService(); + PrincipalUser user1 = userService.updateUser(new PrincipalUser(admin, "test1", "test1@salesforce.com")); + PrincipalUser user2 = userService.updateUser(new PrincipalUser(admin, "test2", "test2@salesforce.com")); + + Alert alert1 = alertService.updateAlert(new Alert(user1, user1, "alert1", EXPRESSION, "* * * * *")); + Alert alert2 = alertService.updateAlert(new Alert(user2, user2, "alert2", EXPRESSION, "* * * * *")); + Alert alert3 = alertService.updateAlert(new Alert(user2, user2, "alert3", EXPRESSION, "* * * * *")); + + + alert1.setShared(true); + alertService.updateAlert(alert1); + alert2.setShared(true); + alertService.updateAlert(alert2); + alert3.setShared(false); + alertService.updateAlert(alert3); + + assertEquals(2, alertService.countSharedAlerts()); + } @Test public void testFindSharedAlertsByOwner() { @@ -639,6 +766,125 @@ public void testFindSharedAlertsMetaByOwner() { alertService.updateAlert(alertSharedAdmin); assertEquals(new ArrayList(), alertService.findSharedAlerts(true, admin, null)); } + + @Test + public void testFindPrivateAlertsPagedForNonPrivilegedUser() { + UserService userService = system.getServiceFactory().getUserService(); + AlertService alertService = system.getServiceFactory().getAlertService(); + + // By default user is not privileged + PrincipalUser user1 = userService.updateUser(new PrincipalUser(admin, "test1", "test1@salesforce.com")); + PrincipalUser user2 = userService.updateUser(new PrincipalUser(admin, "test2", "test2@salesforce.com")); + + + Alert alert1 = alertService.updateAlert(new Alert(user1, user1, "alert-name_private1", EXPRESSION, "* * * * *")); + Alert alert2 = alertService.updateAlert(new Alert(user2, user2, "alert-name-private2", EXPRESSION, "* * * * *")); + Alert alert3 = alertService.updateAlert(new Alert(user2, user2, "alert-name-private3", EXPRESSION, "* * * * *")); + + alert1.setShared(false); + alertService.updateAlert(alert1); + alert2.setShared(false); + alertService.updateAlert(alert2); + alert3.setShared(false); + alertService.updateAlert(alert3); + + // Assert result is empty for non-privileged user + assertEquals(0, alertService.findPrivateAlertsForPrivilegedUserPaged(true, user1, 100, 0).size()); + } + + @Test + public void testCountPrivateAlertsForNonPrivilegedUser() { + UserService userService = system.getServiceFactory().getUserService(); + AlertService alertService = system.getServiceFactory().getAlertService(); + + // By default user is not privileged + PrincipalUser user1 = userService.updateUser(new PrincipalUser(admin, "test1", "test1@salesforce.com")); + PrincipalUser user2 = userService.updateUser(new PrincipalUser(admin, "test2", "test2@salesforce.com")); + + Alert alert1 = alertService.updateAlert(new Alert(user1, user1, "alert-name_private1", EXPRESSION, "* * * * *")); + Alert alert2 = alertService.updateAlert(new Alert(user2, user2, "alert-name-private2", EXPRESSION, "* * * * *")); + Alert alert3 = alertService.updateAlert(new Alert(user2, user2, "alert-name-private3", EXPRESSION, "* * * * *")); + + alert1.setShared(false); + alertService.updateAlert(alert1); + alert2.setShared(false); + alertService.updateAlert(alert2); + alert3.setShared(false); + alertService.updateAlert(alert3); + + // Assert non-privileged user see zero private alerts + assertEquals(0, alertService.countPrivateAlertsForPrivilegedUser(user1)); + } + + @Test + public void testFindPrivateAlertsPagedForPrivilegedUser() { + UserService userService = system.getServiceFactory().getUserService(); + AlertService alertService = system.getServiceFactory().getAlertService(); + ManagementService managementService = system.getServiceFactory().getManagementService(); + + // By default user is not privileged + PrincipalUser user1 = userService.updateUser(new PrincipalUser(admin, "test1", "test1@salesforce.com")); + managementService.setAdministratorPrivilege(user1, true); + PrincipalUser user2 = userService.updateUser(new PrincipalUser(admin, "test2", "test2@salesforce.com")); + + + Alert alert1 = alertService.updateAlert(new Alert(user1, user1, "alert-name_private1", EXPRESSION, "* * * * *")); + Alert alert2 = alertService.updateAlert(new Alert(user2, user2, "alert-name-private2", EXPRESSION, "* * * * *")); + Alert alert3 = alertService.updateAlert(new Alert(user2, user2, "alert-name-shared3", EXPRESSION, "* * * * *")); + + alert1.setShared(false); + alertService.updateAlert(alert1); + alert2.setShared(false); + alertService.updateAlert(alert2); + alert3.setShared(true); + alertService.updateAlert(alert3); + + Set alertNames = new HashSet<>(); + + // Fetch first page + List page = alertService.findPrivateAlertsForPrivilegedUserPaged(true, user1, 1, 0); + assertEquals(1, page.size()); + alertNames.add(page.get(0).getName()); + + // Fetch second page + page = alertService.findPrivateAlertsForPrivilegedUserPaged(true, user1, 1, 1); + assertEquals(1, page.size()); + alertNames.add(page.get(0).getName()); + + // Fetch third page, should be empty + page = alertService.findPrivateAlertsForPrivilegedUserPaged(true, user1, 1, 2); + assertEquals(0, page.size()); + + // Assert all private alerts are fetched + assertTrue(alertNames.contains("alert-name_private1")); + assertTrue(alertNames.contains("alert-name-private2")); + } + + @Test + public void testCountPrivateAlertsForPrivilegedUser() { + UserService userService = system.getServiceFactory().getUserService(); + AlertService alertService = system.getServiceFactory().getAlertService(); + ManagementService managementService = system.getServiceFactory().getManagementService(); + + // By default user is not privileged + PrincipalUser user1 = userService.updateUser(new PrincipalUser(admin, "test1", "test1@salesforce.com")); + managementService.setAdministratorPrivilege(user1, true); + PrincipalUser user2 = userService.updateUser(new PrincipalUser(admin, "test2", "test2@salesforce.com")); + + + Alert alert1 = alertService.updateAlert(new Alert(user1, user1, "alert-name_private1", EXPRESSION, "* * * * *")); + Alert alert2 = alertService.updateAlert(new Alert(user2, user2, "alert-name-private2", EXPRESSION, "* * * * *")); + Alert alert3 = alertService.updateAlert(new Alert(user2, user2, "alert-name-shared3", EXPRESSION, "* * * * *")); + + alert1.setShared(false); + alertService.updateAlert(alert1); + alert2.setShared(false); + alertService.updateAlert(alert2); + alert3.setShared(true); + alertService.updateAlert(alert3); + + assertEquals(2, alertService.countPrivateAlertsForPrivilegedUser(user1)); + } @Test public void testAlertSerDes() { diff --git a/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/dto/ItemsCountDto.java b/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/dto/ItemsCountDto.java new file mode 100644 index 000000000..86cbe4564 --- /dev/null +++ b/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/dto/ItemsCountDto.java @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2016, Salesforce.com, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of Salesforce.com nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +package com.salesforce.dva.argus.ws.dto; + +import java.io.Serializable; + +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.Response.Status; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; + +/** + * Items count DTO. + * + * @author Dongpu Jin (djin@salesforce.com) + */ +@SuppressWarnings("serial") +@JsonIgnoreProperties(ignoreUnknown = true) +public class ItemsCountDto extends BaseDto implements Serializable { + + // ~ Instance fields + // ****************************************************************************************************************************** + + private int value; + + // ~ Methods + // ************************************************************************************************************************************** + + /** + * Converts an integer to ItemsCountDto instance. + * + * @param value + * The items count. + * + * @return An itemsCountDto object. + * + * @throws WebApplicationException + * If an error occurs. + */ + public static ItemsCountDto transformToDto(int value) { + if (value < 0) { + throw new WebApplicationException("Items count cannot be negative", Status.INTERNAL_SERVER_ERROR); + } + + ItemsCountDto result = new ItemsCountDto(); + result.setValue(value); + return result; + } + + /** + * Returns the items count. + * + * @return The items count. + */ + public int getValue() { + return this.value; + } + + /** + * Specifies the items count. + * + * @param itemsCount + * The items count. + */ + public void setValue(int value) { + this.value = value; + } + + @Override + public Object createExample() { + ItemsCountDto result = new ItemsCountDto(); + + result.setValue(0); + + return result; + } +} +/* Copyright (c) 2016, Salesforce.com, Inc. All rights reserved. */ diff --git a/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java b/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java index aebe57563..a65a03e5f 100644 --- a/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java +++ b/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java @@ -38,6 +38,7 @@ import com.salesforce.dva.argus.service.AlertService; import com.salesforce.dva.argus.ws.annotation.Description; import com.salesforce.dva.argus.ws.dto.AlertDto; +import com.salesforce.dva.argus.ws.dto.ItemsCountDto; import com.salesforce.dva.argus.ws.dto.NotificationDto; import com.salesforce.dva.argus.ws.dto.TriggerDto; import java.math.BigInteger; @@ -155,6 +156,88 @@ public List getAlertsMeta(@Context HttpServletRequest req, @QueryParam List result = getAlertsObj(alertName, owner, shared, true, limit); return AlertDto.transformToDto(result); } + + @GET + @Produces(MediaType.APPLICATION_JSON) + @Path("/meta/user") + @Description("Returns user's alerts' metadata. This endpoint is paginated.") + public List getAlertsMetaByOwner(@Context HttpServletRequest req, + @QueryParam("ownername") String ownerName, + @QueryParam("pagesize") Integer pagesize, + @QueryParam("pagenumber") Integer pagenumber) { + + PrincipalUser owner = validateAndGetOwner(req, ownerName); + List result = new ArrayList<>(); + result = alertService.findAlertsByOwnerPaged(owner, true, pagesize, (pagenumber - 1) * pagesize); + return AlertDto.transformToDto(result); + } + + @GET + @Produces(MediaType.APPLICATION_JSON) + @Path("/meta/user/count") + @Description("Returns user's alerts' metadata count.") + public ItemsCountDto countAlertsMetaByOwner(@Context HttpServletRequest req, + @QueryParam("ownername") String ownerName) { + PrincipalUser owner = validateAndGetOwner(req, ownerName); + int result = alertService.countAlertsByOwner(owner); + return ItemsCountDto.transformToDto(result); + } + + @GET + @Produces(MediaType.APPLICATION_JSON) + @Path("/meta/shared") + @Description("Returns all shared alerts' metadata. This endpoint is paginated.") + public List getSharedAlertsMeta(@Context HttpServletRequest req, + @QueryParam("pagesize") Integer pagesize, + @QueryParam("pagenumber") Integer pagenumber) { + + List result = new ArrayList<>(); + result = alertService.findSharedAlertsPaged(true, pagesize, (pagenumber - 1) * pagesize); + return AlertDto.transformToDto(result); + } + + @GET + @Produces(MediaType.APPLICATION_JSON) + @Path("/meta/shared/count") + @Description("Returns all shared alerts' metadata count.") + public ItemsCountDto countSharedAlertsMeta(@Context HttpServletRequest req) { + int result = alertService.countSharedAlerts(); + return ItemsCountDto.transformToDto(result); + } + + @GET + @Produces(MediaType.APPLICATION_JSON) + @Path("/meta/privileged") + @Description("Returns all private (non-shared) alerts's meta for given priviledged user. This endpoint is paginated.") + public List getAlertsMetaForPrivilegedUser(@Context HttpServletRequest req, + @QueryParam("ownername") String ownerName, + @QueryParam("pagesize") Integer pagesize, + @QueryParam("pagenumber") Integer pagenumber) { + + PrincipalUser owner = validateAndGetOwner(req, ownerName); + if (owner == null || !owner.isPrivileged()) { + return new ArrayList<>(0); + } + + List result = new ArrayList<>(); + result = alertService.findPrivateAlertsForPrivilegedUserPaged(true, owner, pagesize, pagenumber); + return AlertDto.transformToDto(result); + } + + @GET + @Produces(MediaType.APPLICATION_JSON) + @Path("/meta/privileged/count") + @Description("Returns all private (non-shared) alerts's meta for given priviledged user.") + public ItemsCountDto countAlertsMetaForPrivilegedUser(@Context HttpServletRequest req, + @QueryParam("ownername") String ownerName) { + PrincipalUser owner = validateAndGetOwner(req, ownerName); + if (owner == null || !owner.isPrivileged()) { + return ItemsCountDto.transformToDto(0); + } + + int result = alertService.countPrivateAlertsForPrivilegedUser(owner); + return ItemsCountDto.transformToDto(result); + } /** * Returns the list of alerts created by the user. From 22f9294de0219b21cc4cde6d3a66f29afa3fa9a6 Mon Sep 17 00:00:00 2001 From: djin Date: Tue, 24 Jul 2018 13:21:15 -0700 Subject: [PATCH 02/27] Factor out alerts query common logic --- .../salesforce/dva/argus/entity/Alert.java | 344 +++++++----------- 1 file changed, 122 insertions(+), 222 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java index 536944ded..0181e9892 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java @@ -40,7 +40,9 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Date; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.Objects; import javax.persistence.Basic; @@ -62,6 +64,7 @@ import javax.persistence.UniqueConstraint; import javax.persistence.criteria.CriteriaBuilder; import javax.persistence.criteria.CriteriaQuery; +import javax.persistence.criteria.Predicate; import javax.persistence.criteria.Root; import javax.persistence.criteria.Selection; @@ -80,7 +83,6 @@ import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.module.SimpleModule; import com.salesforce.dva.argus.service.metric.MetricReader; -import com.salesforce.dva.argus.util.Cron; /** * The entity which encapsulates information about a Dashboard. @@ -217,6 +219,11 @@ public class Alert extends JPAEntity implements Serializable, CronJob { // Default values for page limit and page offset private static int DEFAULT_PAGE_LIMIT = 10; private static int DEFAULT_PAGE_OFFSET = 0; + + // WHERE condition key names used in criteria queries + private static String DELETED_KEY = "deleted"; + private static String SHARED_KEY = "shared"; + private static String OWNER_KEY = "owner"; //~ Constructors ********************************************************************************************************************************* @@ -378,41 +385,15 @@ public static int countByOwner(EntityManager em, PrincipalUser owner) { public static List findByOwnerMeta(EntityManager em, PrincipalUser owner) { requireArgument(em != null, "Entity manager can not be null."); + requireArgument(owner != null, "Owner can not be null."); try { - CriteriaBuilder cb = em.getCriteriaBuilder(); - CriteriaQuery cq = cb.createTupleQuery(); - Root e = cq.from(Alert.class); - - List> fieldsToSelect = new ArrayList<>(); - for(Field field : FieldUtils.getFieldsListWithAnnotation(Alert.class, Metadata.class)) { - fieldsToSelect.add(e.get(field.getName()).alias(field.getName())); - } - cq.multiselect(fieldsToSelect); - cq.where(cb.equal(e.get("deleted"), false), cb.equal(e.get("owner"), owner)); - - TypedQuery query = em.createQuery(cq); - - List result = query.getResultList(); - - List alerts = new ArrayList<>(); - for(Tuple tuple : result) { + Map whereParams = new HashMap<>(); + whereParams.put(DELETED_KEY, false); + whereParams.put(OWNER_KEY, owner); - Alert a = new Alert(PrincipalUser.class.cast(tuple.get("createdBy")), PrincipalUser.class.cast(tuple.get("owner")), - String.class.cast(tuple.get("name")), String.class.cast(tuple.get("expression")), - String.class.cast(tuple.get("cronEntry"))); - - a.id = BigInteger.class.cast(tuple.get("id")); - a.enabled = Boolean.class.cast(tuple.get("enabled")); - a.createdDate = Date.class.cast(tuple.get("createdDate")); - a.modifiedDate = Date.class.cast(tuple.get("modifiedDate")); - a.shared = Boolean.class.cast(tuple.get("shared")); - a.modifiedBy = PrincipalUser.class.cast(tuple.get("modifiedBy")); - - alerts.add(a); - } - - return alerts; + // Get alerts meta + return getAlertsMetaPaged(em, null, null, whereParams); } catch (NoResultException ex) { return new ArrayList<>(0); } @@ -435,6 +416,8 @@ public static List findByOwnerMeta(EntityManager em, PrincipalUser owner) public static List findByOwnerMetaPaged(EntityManager em, PrincipalUser owner, Integer limit, Integer offset) { requireArgument(em != null, "Entity manager can not be null."); + requireArgument(owner != null, "Owner cannot be null"); + if (limit == null || limit <= 0) { limit = DEFAULT_PAGE_LIMIT; } @@ -443,44 +426,12 @@ public static List findByOwnerMetaPaged(EntityManager em, PrincipalUser o } try { - CriteriaBuilder cb = em.getCriteriaBuilder(); - CriteriaQuery cq = cb.createTupleQuery(); - Root e = cq.from(Alert.class); - - List> fieldsToSelect = new ArrayList<>(); - for (Field field : FieldUtils.getFieldsListWithAnnotation(Alert.class, Metadata.class)) { - fieldsToSelect.add(e.get(field.getName()).alias(field.getName())); - } - cq.multiselect(fieldsToSelect); - cq.where(cb.equal(e.get("deleted"), false), cb.equal(e.get("owner"), owner)); - cq.orderBy(cb.asc(e.get("id"))); - - TypedQuery query = em.createQuery(cq); - - // Set limit and offset for pagination - query.setMaxResults(limit); - query.setFirstResult(offset); - - List result = query.getResultList(); + Map whereParams = new HashMap<>(); + whereParams.put(DELETED_KEY, false); + whereParams.put(OWNER_KEY, owner); - List alerts = new ArrayList<>(); - for (Tuple tuple : result) { - - Alert a = new Alert(PrincipalUser.class.cast(tuple.get("createdBy")), - PrincipalUser.class.cast(tuple.get("owner")), String.class.cast(tuple.get("name")), - String.class.cast(tuple.get("expression")), String.class.cast(tuple.get("cronEntry"))); - - a.id = BigInteger.class.cast(tuple.get("id")); - a.enabled = Boolean.class.cast(tuple.get("enabled")); - a.createdDate = Date.class.cast(tuple.get("createdDate")); - a.modifiedDate = Date.class.cast(tuple.get("modifiedDate")); - a.shared = Boolean.class.cast(tuple.get("shared")); - a.modifiedBy = PrincipalUser.class.cast(tuple.get("modifiedBy")); - - alerts.add(a); - } - - return alerts; + // Get alerts meta + return getAlertsMetaPaged(em, limit, offset, whereParams); } catch (NoResultException ex) { return new ArrayList<>(0); } @@ -517,39 +468,11 @@ public static List findAllMeta(EntityManager em) { requireArgument(em != null, "Entity manager can not be null."); try { - CriteriaBuilder cb = em.getCriteriaBuilder(); - CriteriaQuery cq = cb.createTupleQuery(); - Root e = cq.from(Alert.class); - - List> fieldsToSelect = new ArrayList<>(); - for(Field field : FieldUtils.getFieldsListWithAnnotation(Alert.class, Metadata.class)) { - fieldsToSelect.add(e.get(field.getName()).alias(field.getName())); - } - cq.multiselect(fieldsToSelect); - - cq.where(cb.equal(e.get("deleted"), false)); - TypedQuery query = em.createQuery(cq); - - List result = query.getResultList(); - - List alerts = new ArrayList<>(); - for(Tuple tuple : result) { - - Alert a = new Alert(PrincipalUser.class.cast(tuple.get("createdBy")), PrincipalUser.class.cast(tuple.get("owner")), - String.class.cast(tuple.get("name")), String.class.cast(tuple.get("expression")), - String.class.cast(tuple.get("cronEntry"))); - - a.id = BigInteger.class.cast(tuple.get("id")); - a.enabled = Boolean.class.cast(tuple.get("enabled")); - a.createdDate = Date.class.cast(tuple.get("createdDate")); - a.modifiedDate = Date.class.cast(tuple.get("modifiedDate")); - a.shared = Boolean.class.cast(tuple.get("shared")); - a.modifiedBy = PrincipalUser.class.cast(tuple.get("modifiedBy")); + Map whereParams = new HashMap<>(); + whereParams.put(DELETED_KEY, false); - alerts.add(a); - } - - return alerts; + // Get alerts meta + return getAlertsMetaPaged(em, null, null, whereParams); } catch (NoResultException ex) { return new ArrayList<>(0); } @@ -810,49 +733,16 @@ public static List findSharedAlertsMeta(EntityManager em, PrincipalUser o requireArgument(em != null, "Entity manager can not be null."); try { - CriteriaBuilder cb = em.getCriteriaBuilder(); - CriteriaQuery cq = cb.createTupleQuery(); - Root e = cq.from(Alert.class); - - List> fieldsToSelect = new ArrayList<>(); - for(Field field : FieldUtils.getFieldsListWithAnnotation(Alert.class, Metadata.class)) { - fieldsToSelect.add(e.get(field.getName()).alias(field.getName())); - } - cq.multiselect(fieldsToSelect); - - if(owner != null){ - cq.where(cb.equal(e.get("deleted"), false), cb.equal(e.get("shared"), true), cb.equal(e.get("owner"), owner)); - } else{ - cq.where(cb.equal(e.get("deleted"), false), cb.equal(e.get("shared"), true)); - } - - TypedQuery query = em.createQuery(cq); - query.setHint("javax.persistence.cache.storeMode", "REFRESH"); - query.setHint(QueryHints.REFRESH, HintValues.TRUE); - if (limit != null) { - query.setMaxResults(limit); - } - - List result = query.getResultList(); - - List alerts = new ArrayList<>(); - for(Tuple tuple : result) { - - Alert a = new Alert(PrincipalUser.class.cast(tuple.get("createdBy")), PrincipalUser.class.cast(tuple.get("owner")), - String.class.cast(tuple.get("name")), String.class.cast(tuple.get("expression")), - String.class.cast(tuple.get("cronEntry"))); - - a.id = BigInteger.class.cast(tuple.get("id")); - a.enabled = Boolean.class.cast(tuple.get("enabled")); - a.createdDate = Date.class.cast(tuple.get("createdDate")); - a.modifiedDate = Date.class.cast(tuple.get("modifiedDate")); - a.shared = Boolean.class.cast(tuple.get("shared")); - a.modifiedBy = PrincipalUser.class.cast(tuple.get("modifiedBy")); - - alerts.add(a); + Map whereParams = new HashMap<>(); + whereParams.put(DELETED_KEY, false); + whereParams.put(SHARED_KEY, true); + + if (owner != null) { + whereParams.put(OWNER_KEY, owner); } - return alerts; + // Get alerts meta + return getAlertsMetaPaged(em, limit, null, whereParams); } catch (NoResultException ex) { return new ArrayList<>(0); } @@ -882,46 +772,13 @@ public static List findSharedAlertsMetaPaged(EntityManager em, Integer li } try { - CriteriaBuilder cb = em.getCriteriaBuilder(); - CriteriaQuery cq = cb.createTupleQuery(); - Root e = cq.from(Alert.class); - - List> fieldsToSelect = new ArrayList<>(); - for (Field field : FieldUtils.getFieldsListWithAnnotation(Alert.class, Metadata.class)) { - fieldsToSelect.add(e.get(field.getName()).alias(field.getName())); - } - cq.multiselect(fieldsToSelect); - cq.where(cb.equal(e.get("deleted"), false), cb.equal(e.get("shared"), true)); - cq.orderBy(cb.asc(e.get("id"))); - - TypedQuery query = em.createQuery(cq); - query.setHint("javax.persistence.cache.storeMode", "REFRESH"); - query.setHint(QueryHints.REFRESH, HintValues.TRUE); + // Get shared alerts + Map whereParams = new HashMap<>(); + whereParams.put(DELETED_KEY, false); + whereParams.put(SHARED_KEY, true); - // Set limit and offset for pagination - query.setMaxResults(limit); - query.setFirstResult(offset); - - List result = query.getResultList(); - - List alerts = new ArrayList<>(); - for (Tuple tuple : result) { - - Alert a = new Alert(PrincipalUser.class.cast(tuple.get("createdBy")), - PrincipalUser.class.cast(tuple.get("owner")), String.class.cast(tuple.get("name")), - String.class.cast(tuple.get("expression")), String.class.cast(tuple.get("cronEntry"))); - - a.id = BigInteger.class.cast(tuple.get("id")); - a.enabled = Boolean.class.cast(tuple.get("enabled")); - a.createdDate = Date.class.cast(tuple.get("createdDate")); - a.modifiedDate = Date.class.cast(tuple.get("modifiedDate")); - a.shared = Boolean.class.cast(tuple.get("shared")); - a.modifiedBy = PrincipalUser.class.cast(tuple.get("modifiedBy")); - - alerts.add(a); - } - - return alerts; + // Get alerts meta + return getAlertsMetaPaged(em, limit, offset, whereParams); } catch (NoResultException ex) { return new ArrayList<>(0); } @@ -995,48 +852,13 @@ public static List findPrivateAlertsForPrivilegedUserMetaPaged(EntityMana } try { - CriteriaBuilder cb = em.getCriteriaBuilder(); - CriteriaQuery cq = cb.createTupleQuery(); - Root e = cq.from(Alert.class); - - List> fieldsToSelect = new ArrayList<>(); - for(Field field : FieldUtils.getFieldsListWithAnnotation(Alert.class, Metadata.class)) { - fieldsToSelect.add(e.get(field.getName()).alias(field.getName())); - } - cq.multiselect(fieldsToSelect); - // Query for alerts that are not marked as deleted, non-shared, owned by others - cq.where(cb.equal(e.get("deleted"), false), cb.equal(e.get("shared"), false)); - cq.orderBy(cb.asc(e.get("id"))); - - TypedQuery query = em.createQuery(cq); - query.setHint("javax.persistence.cache.storeMode", "REFRESH"); - query.setHint(QueryHints.REFRESH, HintValues.TRUE); - - // Set limit and offset for pagination - query.setMaxResults(limit); - query.setFirstResult(offset); - - List result = query.getResultList(); - - List alerts = new ArrayList<>(); - for(Tuple tuple : result) { - - Alert a = new Alert(PrincipalUser.class.cast(tuple.get("createdBy")), PrincipalUser.class.cast(tuple.get("owner")), - String.class.cast(tuple.get("name")), String.class.cast(tuple.get("expression")), - String.class.cast(tuple.get("cronEntry"))); + Map whereParams = new HashMap<>(); + whereParams.put(DELETED_KEY, false); + whereParams.put(SHARED_KEY, false); - a.id = BigInteger.class.cast(tuple.get("id")); - a.enabled = Boolean.class.cast(tuple.get("enabled")); - a.createdDate = Date.class.cast(tuple.get("createdDate")); - a.modifiedDate = Date.class.cast(tuple.get("modifiedDate")); - a.shared = Boolean.class.cast(tuple.get("shared")); - a.modifiedBy = PrincipalUser.class.cast(tuple.get("modifiedBy")); - - alerts.add(a); - } - - return alerts; + // Get alerts meta + return getAlertsMetaPaged(em, limit, offset, whereParams); } catch (NoResultException ex) { return new ArrayList<>(0); } @@ -1095,7 +917,85 @@ public static List findByPrefix(EntityManager em, String prefix) { } //~ Methods ************************************************************************************************************************************** + + /* + * A helper method executes the criteria query to get alerts meta with given + * limit and offset. + */ + private static List getAlertsMetaPaged(EntityManager em, Integer limit, Integer offset, + Map whereParams) { + CriteriaBuilder cb = em.getCriteriaBuilder(); + CriteriaQuery cq = cb.createTupleQuery(); + Root e = cq.from(Alert.class); + + // Set fields to select + List> fieldsToSelect = new ArrayList<>(); + for (Field field : FieldUtils.getFieldsListWithAnnotation(Alert.class, Metadata.class)) { + fieldsToSelect.add(e.get(field.getName()).alias(field.getName())); + } + cq.multiselect(fieldsToSelect); + + // Set where conditions, so far we only use boolean and PrincipalUser + // type conditions. New types can be easily added here on demand. + if (whereParams != null && whereParams.size() > 0) { + List predicates = new ArrayList<>(); + + for (String key : whereParams.keySet()) { + Object value = whereParams.get(key); + if (value instanceof Boolean) { + // Boolean type condition + boolean val = (boolean) value; + predicates.add(cb.equal(e.get(key), val)); + } else if (value instanceof PrincipalUser) { + // PrincipalUser type condition + predicates.add(cb.equal(e.get(key), (PrincipalUser) value)); + } + } + + if (predicates.size() > 0) { + cq.where(predicates.toArray(new Predicate[predicates.size()])); + } + } + + // Sort result by alert id + cq.orderBy(cb.asc(e.get("id"))); + + TypedQuery query = em.createQuery(cq); + query.setHint("javax.persistence.cache.storeMode", "REFRESH"); + query.setHint(QueryHints.REFRESH, HintValues.TRUE); + + // Set limit for pagination + if (limit != null && limit > 0) { + query.setMaxResults(limit); + } + // Set offset for pagination + if (offset != null && offset >= 0) { + query.setFirstResult(offset); + } + + List result = query.getResultList(); + List alerts = new ArrayList<>(); + + for (Tuple tuple : result) { + + Alert a = new Alert(PrincipalUser.class.cast(tuple.get("createdBy")), + PrincipalUser.class.cast(tuple.get("owner")), String.class.cast(tuple.get("name")), + String.class.cast(tuple.get("expression")), String.class.cast(tuple.get("cronEntry"))); + + a.id = BigInteger.class.cast(tuple.get("id")); + a.enabled = Boolean.class.cast(tuple.get("enabled")); + a.createdDate = Date.class.cast(tuple.get("createdDate")); + a.modifiedDate = Date.class.cast(tuple.get("modifiedDate")); + a.shared = Boolean.class.cast(tuple.get("shared")); + a.modifiedBy = PrincipalUser.class.cast(tuple.get("modifiedBy")); + + alerts.add(a); + } + + return alerts; + } + /** * Returns the CRON entry for the alert. * From 2685810bdc0d9ba821dbdac2a2fd70be527b0a6f Mon Sep 17 00:00:00 2001 From: djin Date: Tue, 24 Jul 2018 16:22:53 -0700 Subject: [PATCH 03/27] Consolidate count APIs --- .../dva/argus/service/AlertService.java | 37 ++++++--------- .../service/alert/DefaultAlertService.java | 45 ++++++++++--------- .../dva/argus/service/AlertServiceTest.java | 8 ++-- .../argus/ws/resources/AlertResources.java | 6 +-- 4 files changed, 45 insertions(+), 51 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java index f746dbc65..4c9547b99 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java @@ -145,15 +145,6 @@ public interface AlertService extends Service { * @return The list of alerts. */ List findAlertsByOwnerPaged(PrincipalUser owner, boolean metadataOnly, Integer limit, Integer offset); - - /** - * Count the total number of alerts for the owner. - * - * @param owner The owner of the alerts. Cannot be null. - * - * @return The total number of owner's alert. - */ - int countAlertsByOwner(PrincipalUser owner); /** * Returns a list of alerts that have been marked for deletion. @@ -292,13 +283,6 @@ public interface AlertService extends Service { * @return The list of shared alerts. */ List findSharedAlertsPaged(boolean metadataOnly, Integer limit, Integer offset); - - /** - * Count the total number of shared alerts. - * - * @return The total number of shared alerts. - */ - int countSharedAlerts(); /** * Returns the list of supported notifiers. @@ -333,14 +317,19 @@ public interface AlertService extends Service { List findPrivateAlertsForPrivilegedUserPaged(boolean metadataOnly, PrincipalUser owner, Integer limit, Integer offset); /** - * Count the total number of private alerts (non-shared alerts) for the privileged user. - * - * @param owner The owner to filter on. - * - * @return The total number of private alerts for privileged user. - */ - int countPrivateAlertsForPrivilegedUser(PrincipalUser owner); - + * Count alerts. If both countSharedAlertsOnly and countPrivateAlertsOnly + * are false, it counts owner's alerts. + * + * @param countSharedAlertsOnly + * The flag of whether to only count all shared alerts. + * @param countPrivateAlertsOnly + * The flag of whether only count all private (non-shared) + * alerts. countSharedAlertsOnly must be false. + * @param owner + * The owner + * @return Alerts count. + */ + int countAlerts(boolean countSharedAlertsOnly, boolean countPrivateAlertsOnly, PrincipalUser owner); //~ Enums **************************************************************************************************************************************** diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java index 339f29a8b..1bdf80419 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java @@ -284,13 +284,6 @@ public List findAlertsByOwnerPaged(PrincipalUser owner, boolean metadataO return metadataOnly ? Alert.findByOwnerMetaPaged(_emProvider.get(), owner, limit, offset) : Alert.findByOwnerPaged(_emProvider.get(), owner, limit, offset); } - @Override - public int countAlertsByOwner(PrincipalUser owner) { - requireNotDisposed(); - requireArgument(owner != null, "Owner cannot be null."); - return Alert.countByOwner(_emProvider.get(), owner); - } - @Override public Alert findAlertByPrimaryKey(BigInteger id) { requireNotDisposed(); @@ -848,12 +841,6 @@ public List findSharedAlertsPaged(boolean metadataOnly, Integer limit, In requireNotDisposed(); return metadataOnly ? Alert.findSharedAlertsMetaPaged(_emProvider.get(), limit, offset) : Alert.findSharedAlertsPaged(_emProvider.get(), limit, offset); } - - @Override - public int countSharedAlerts() { - requireNotDisposed(); - return Alert.countSharedAlerts(_emProvider.get()); - } @Override public List findPrivateAlertsForPrivilegedUserPaged(boolean metadataOnly, PrincipalUser owner, Integer limit, Integer offset) { @@ -868,15 +855,32 @@ public List findPrivateAlertsForPrivilegedUserPaged(boolean metadataOnly, } @Override - public int countPrivateAlertsForPrivilegedUser(PrincipalUser owner) { + public int countAlerts(boolean countSharedAlertsOnly, boolean countPrivateAlertsOnly, PrincipalUser owner) { requireNotDisposed(); - - // Invalid user nor non-privileged user shall not view other's non-shared alerts, thus immediately return 0 - if (owner == null || !owner.isPrivileged()) { - return 0; + + // Count total number of shared alerts for the shared alerts tab + if (countSharedAlertsOnly) { + return Alert.countSharedAlerts(_emProvider.get()); } - - return Alert.countPrivateAlertsForPrivilegedUser(_emProvider.get(), owner); + + // Count total number of private alerts (non-shared alerts) if user is + // privileged user, otherwise return 0 + if (countPrivateAlertsOnly) { + // Invalid user nor non-privileged user shall not view other's + // non-shared alerts, thus immediately return 0 + if (owner == null || !owner.isPrivileged()) { + return 0; + } + + return Alert.countPrivateAlertsForPrivilegedUser(_emProvider.get(), owner); + } + + // Count total number of user alerts + if (owner != null) { + return Alert.countByOwner(_emProvider.get(), owner); + } + + return 0; } /** @@ -1205,5 +1209,6 @@ public void setTriggeredMetric(Metric triggeredMetric) { this.triggeredMetric = triggeredMetric; } } + } /* Copyright (c) 2016, Salesforce.com, Inc. All rights reserved. */ \ No newline at end of file diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java index 229607fd4..5ba15d21f 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java @@ -279,7 +279,7 @@ public void testCountAlertsByOwner() { expectedAlerts.add(alertService.updateAlert(new Alert(user, user, "alert_" + i, EXPRESSION, "* * * * *"))); } - int cnt = alertService.countAlertsByOwner(user); + int cnt = alertService.countAlerts(false, false, user); assertEquals(cnt, expectedAlerts.size()); } @@ -680,7 +680,7 @@ public void testCountSharedAlertsMetaPaged() { alert3.setShared(false); alertService.updateAlert(alert3); - assertEquals(2, alertService.countSharedAlerts()); + assertEquals(2, alertService.countAlerts(true, false, null)); } @Test @@ -813,7 +813,7 @@ public void testCountPrivateAlertsForNonPrivilegedUser() { alertService.updateAlert(alert3); // Assert non-privileged user see zero private alerts - assertEquals(0, alertService.countPrivateAlertsForPrivilegedUser(user1)); + assertEquals(0, alertService.countAlerts(false, true, user1)); } @Test @@ -883,7 +883,7 @@ public void testCountPrivateAlertsForPrivilegedUser() { alert3.setShared(true); alertService.updateAlert(alert3); - assertEquals(2, alertService.countPrivateAlertsForPrivilegedUser(user1)); + assertEquals(2, alertService.countAlerts(false, true, user1)); } @Test diff --git a/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java b/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java index 63642eed4..943db715a 100644 --- a/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java +++ b/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java @@ -179,7 +179,7 @@ public List getAlertsMetaByOwner(@Context HttpServletRequest req, public ItemsCountDto countAlertsMetaByOwner(@Context HttpServletRequest req, @QueryParam("ownername") String ownerName) { PrincipalUser owner = validateAndGetOwner(req, ownerName); - int result = alertService.countAlertsByOwner(owner); + int result = alertService.countAlerts(false, false, owner); return ItemsCountDto.transformToDto(result); } @@ -201,7 +201,7 @@ public List getSharedAlertsMeta(@Context HttpServletRequest req, @Path("/meta/shared/count") @Description("Returns all shared alerts' metadata count.") public ItemsCountDto countSharedAlertsMeta(@Context HttpServletRequest req) { - int result = alertService.countSharedAlerts(); + int result = alertService.countAlerts(true, false, null); return ItemsCountDto.transformToDto(result); } @@ -235,7 +235,7 @@ public ItemsCountDto countAlertsMetaForPrivilegedUser(@Context HttpServletReques return ItemsCountDto.transformToDto(0); } - int result = alertService.countPrivateAlertsForPrivilegedUser(owner); + int result = alertService.countAlerts(false, true, owner); return ItemsCountDto.transformToDto(result); } From 73e7e7888127164dab03e5be269632c0dcdcc08d Mon Sep 17 00:00:00 2001 From: djin Date: Thu, 26 Jul 2018 09:51:59 -0700 Subject: [PATCH 04/27] Count with context --- .../dva/argus/service/AlertService.java | 15 +-- .../service/alert/AlertsCountContext.java | 111 ++++++++++++++++++ .../service/alert/DefaultAlertService.java | 12 +- .../dva/argus/service/AlertServiceTest.java | 13 +- .../argus/ws/resources/AlertResources.java | 12 +- 5 files changed, 143 insertions(+), 20 deletions(-) create mode 100644 ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/AlertsCountContext.java diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java index 4c9547b99..960c50bc7 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java @@ -36,6 +36,7 @@ import com.salesforce.dva.argus.entity.Notification; import com.salesforce.dva.argus.entity.PrincipalUser; import com.salesforce.dva.argus.entity.Trigger; +import com.salesforce.dva.argus.service.alert.AlertsCountContext; import com.salesforce.dva.argus.service.alert.DefaultAlertService.NotificationContext; import com.salesforce.dva.argus.service.alert.notifier.*; import com.salesforce.dva.argus.service.warden.WardenApiNotifier; @@ -317,19 +318,13 @@ public interface AlertService extends Service { List findPrivateAlertsForPrivilegedUserPaged(boolean metadataOnly, PrincipalUser owner, Integer limit, Integer offset); /** - * Count alerts. If both countSharedAlertsOnly and countPrivateAlertsOnly - * are false, it counts owner's alerts. + * Count alerts with the given AlertsCountContext. * - * @param countSharedAlertsOnly - * The flag of whether to only count all shared alerts. - * @param countPrivateAlertsOnly - * The flag of whether only count all private (non-shared) - * alerts. countSharedAlertsOnly must be false. - * @param owner - * The owner + * @param context + * The context of counting alerts. * @return Alerts count. */ - int countAlerts(boolean countSharedAlertsOnly, boolean countPrivateAlertsOnly, PrincipalUser owner); + int countAlerts(AlertsCountContext context); //~ Enums **************************************************************************************************************************************** diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/AlertsCountContext.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/AlertsCountContext.java new file mode 100644 index 000000000..bc50c498c --- /dev/null +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/AlertsCountContext.java @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2016, Salesforce.com, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of Salesforce.com nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +package com.salesforce.dva.argus.service.alert; + +import com.salesforce.dva.argus.entity.PrincipalUser; + +/** + * A class represents the context of counting alerts. + * + * @author Dongpu Jin (djin@salesforce.com) + */ +public class AlertsCountContext { + + // Boolean flag of whether counting user alerts + private boolean countUserAlerts; + + // Boolean flag of whether counting shared alerts + private boolean countSharedAlerts; + + // Boolean flag of whether counting private (non-shared) alerts for + // privileged user + private boolean countPrivateAlerts; + + // Current owner + private PrincipalUser owner; + + // Constructor + private AlertsCountContext(AlertsCountContextBuilder builder) { + this.countUserAlerts = builder.userAlerts; + this.countSharedAlerts = builder.sharedAlerts; + this.countPrivateAlerts = builder.privateAlerts; + this.owner = builder.owner; + } + + public boolean isCountUserAlerts() { + return countUserAlerts; + } + + public boolean isCountSharedAlerts() { + return countSharedAlerts; + } + + public boolean isCountPrivateAlerts() { + return countPrivateAlerts; + } + + public PrincipalUser getPrincipalUser() { + return owner; + } + + // Builder for the context. + public static class AlertsCountContextBuilder { + private boolean userAlerts = false; + private boolean sharedAlerts = false; + private boolean privateAlerts = false; + private PrincipalUser owner = null; + + public AlertsCountContextBuilder countUserAlerts() { + this.userAlerts = true; + return this; + } + + public AlertsCountContextBuilder countSharedAlerts() { + this.sharedAlerts = true; + return this; + } + + public AlertsCountContextBuilder countPrivateAlerts() { + this.privateAlerts = true; + return this; + } + + public AlertsCountContextBuilder setPrincipalUser(PrincipalUser owner) { + this.owner = owner; + return this; + } + + public AlertsCountContext build() { + return new AlertsCountContext(this); + } + } +} diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java index 1bdf80419..e23618ecf 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java @@ -855,17 +855,23 @@ public List findPrivateAlertsForPrivilegedUserPaged(boolean metadataOnly, } @Override - public int countAlerts(boolean countSharedAlertsOnly, boolean countPrivateAlertsOnly, PrincipalUser owner) { + public int countAlerts(AlertsCountContext context) { requireNotDisposed(); + + if (context == null) { + return 0; + } // Count total number of shared alerts for the shared alerts tab - if (countSharedAlertsOnly) { + if (context.isCountSharedAlerts()) { return Alert.countSharedAlerts(_emProvider.get()); } + PrincipalUser owner = context.getPrincipalUser(); + // Count total number of private alerts (non-shared alerts) if user is // privileged user, otherwise return 0 - if (countPrivateAlertsOnly) { + if (context.isCountPrivateAlerts()) { // Invalid user nor non-privileged user shall not view other's // non-shared alerts, thus immediately return 0 if (owner == null || !owner.isPrivileged()) { diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java index 5ba15d21f..0670cc18c 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java @@ -57,6 +57,7 @@ import com.salesforce.dva.argus.entity.PrincipalUser; import com.salesforce.dva.argus.entity.Trigger; import com.salesforce.dva.argus.entity.Trigger.TriggerType; +import com.salesforce.dva.argus.service.alert.AlertsCountContext; import com.salesforce.dva.argus.service.alert.DefaultAlertService.AlertWithTimestamp; public class AlertServiceTest extends AbstractTest { @@ -279,7 +280,8 @@ public void testCountAlertsByOwner() { expectedAlerts.add(alertService.updateAlert(new Alert(user, user, "alert_" + i, EXPRESSION, "* * * * *"))); } - int cnt = alertService.countAlerts(false, false, user); + AlertsCountContext context = new AlertsCountContext.AlertsCountContextBuilder().countUserAlerts().setPrincipalUser(user).build(); + int cnt = alertService.countAlerts(context); assertEquals(cnt, expectedAlerts.size()); } @@ -680,7 +682,8 @@ public void testCountSharedAlertsMetaPaged() { alert3.setShared(false); alertService.updateAlert(alert3); - assertEquals(2, alertService.countAlerts(true, false, null)); + AlertsCountContext context = new AlertsCountContext.AlertsCountContextBuilder().countSharedAlerts().build(); + assertEquals(2, alertService.countAlerts(context)); } @Test @@ -813,7 +816,8 @@ public void testCountPrivateAlertsForNonPrivilegedUser() { alertService.updateAlert(alert3); // Assert non-privileged user see zero private alerts - assertEquals(0, alertService.countAlerts(false, true, user1)); + AlertsCountContext context = new AlertsCountContext.AlertsCountContextBuilder().countPrivateAlerts().setPrincipalUser(user1).build(); + assertEquals(0, alertService.countAlerts(context)); } @Test @@ -883,7 +887,8 @@ public void testCountPrivateAlertsForPrivilegedUser() { alert3.setShared(true); alertService.updateAlert(alert3); - assertEquals(2, alertService.countAlerts(false, true, user1)); + AlertsCountContext context = new AlertsCountContext.AlertsCountContextBuilder().countPrivateAlerts().setPrincipalUser(user1).build(); + assertEquals(2, alertService.countAlerts(context)); } @Test diff --git a/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java b/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java index 943db715a..d4a55dc24 100644 --- a/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java +++ b/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java @@ -36,6 +36,7 @@ import com.salesforce.dva.argus.entity.PrincipalUser; import com.salesforce.dva.argus.entity.Trigger; import com.salesforce.dva.argus.service.AlertService; +import com.salesforce.dva.argus.service.alert.AlertsCountContext; import com.salesforce.dva.argus.ws.annotation.Description; import com.salesforce.dva.argus.ws.dto.AlertDto; import com.salesforce.dva.argus.ws.dto.ItemsCountDto; @@ -179,7 +180,9 @@ public List getAlertsMetaByOwner(@Context HttpServletRequest req, public ItemsCountDto countAlertsMetaByOwner(@Context HttpServletRequest req, @QueryParam("ownername") String ownerName) { PrincipalUser owner = validateAndGetOwner(req, ownerName); - int result = alertService.countAlerts(false, false, owner); + AlertsCountContext context = new AlertsCountContext.AlertsCountContextBuilder().countUserAlerts() + .setPrincipalUser(owner).build(); + int result = alertService.countAlerts(context); return ItemsCountDto.transformToDto(result); } @@ -201,7 +204,8 @@ public List getSharedAlertsMeta(@Context HttpServletRequest req, @Path("/meta/shared/count") @Description("Returns all shared alerts' metadata count.") public ItemsCountDto countSharedAlertsMeta(@Context HttpServletRequest req) { - int result = alertService.countAlerts(true, false, null); + AlertsCountContext context = new AlertsCountContext.AlertsCountContextBuilder().countSharedAlerts().build(); + int result = alertService.countAlerts(context); return ItemsCountDto.transformToDto(result); } @@ -235,7 +239,9 @@ public ItemsCountDto countAlertsMetaForPrivilegedUser(@Context HttpServletReques return ItemsCountDto.transformToDto(0); } - int result = alertService.countAlerts(false, true, owner); + AlertsCountContext context = new AlertsCountContext.AlertsCountContextBuilder().countPrivateAlerts() + .setPrincipalUser(owner).build(); + int result = alertService.countAlerts(context); return ItemsCountDto.transformToDto(result); } From 930f2e2fed0ca85a92d4121abc7c839e6e0da570 Mon Sep 17 00:00:00 2001 From: djin Date: Fri, 27 Jul 2018 15:00:07 -0700 Subject: [PATCH 05/27] Code clean up --- .../salesforce/dva/argus/entity/Alert.java | 140 ++---------------- .../dva/argus/service/AlertService.java | 13 +- .../service/alert/AlertsCountContext.java | 9 +- .../service/alert/DefaultAlertService.java | 12 +- .../dva/argus/service/AlertServiceTest.java | 69 +++++++-- .../argus/ws/resources/AlertResources.java | 6 +- 6 files changed, 93 insertions(+), 156 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java index 0181e9892..ff4ca074c 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/Alert.java @@ -152,27 +152,15 @@ name = "Alert.getSharedAlertsByOwner", query = "SELECT a from Alert a where a.owner = :owner AND a.shared = true AND a.id not in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = true)" ), - // Paginated queries - @NamedQuery( - name = "Alert.findByOwnerPaged", - query = "SELECT a FROM Alert a WHERE a.owner = :owner AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false) order by a.id asc" - ), + // Count alert queries @NamedQuery( name = "Alert.countByOwner", query = "SELECT count(a) FROM Alert a WHERE a.owner = :owner AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false)" ), - @NamedQuery( - name = "Alert.getSharedAlertsPaged", - query = "SELECT a from Alert a where a.shared = true AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false) order by a.id asc" - ), @NamedQuery( name = "Alert.countSharedAlerts", query = "SELECT count(a) from Alert a where a.shared = true AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false)" ), - @NamedQuery( - name = "Alert.getPrivateAlertsForPrivilegedUserPaged", - query = "SELECT a from Alert a where a.shared = false AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false) order by a.id asc" - ), @NamedQuery( name = "Alert.countPrivateAlertsForPrivilegedUser", query = "SELECT count(a) from Alert a where a.shared = false AND a.id in (SELECT jpa.id from JPAEntity jpa where jpa.deleted = false)" @@ -321,43 +309,6 @@ public static List findByOwner(EntityManager em, PrincipalUser owner) { } } - /** - * Finds all alerts for the given owner with given limit and offset. - * - * @param em - * The entity manager to user. Cannot be null. - * @param owner - * The owner to retrieve alerts for. Cannot be null. - * @param limit - * The limit of return to return. - * @param offset - * The starting offset of the result. - * - * @return The list of alerts for the owner. - */ - public static List findByOwnerPaged(EntityManager em, PrincipalUser owner, Integer limit, Integer offset) { - requireArgument(em != null, "Entity manager can not be null."); - requireArgument(owner != null, "Owner cannot be null."); - if (limit == null || limit <= 0) { - limit = DEFAULT_PAGE_LIMIT; - } - if (offset == null || offset < 0) { - offset = DEFAULT_PAGE_OFFSET; - } - - TypedQuery query = em.createNamedQuery("Alert.findByOwnerPaged", Alert.class); - query.setHint(QueryHints.REFRESH, HintValues.TRUE); - query.setHint("javax.persistence.cache.storeMode", "REFRESH"); - try { - query.setParameter("owner", owner); - query.setMaxResults(limit); - query.setFirstResult(offset); - return query.getResultList(); - } catch (NoResultException ex) { - return new ArrayList<>(0); - } - } - /** * Count the number of alerts for the given owner. * @@ -383,6 +334,15 @@ public static int countByOwner(EntityManager em, PrincipalUser owner) { } } + /** + * Find alerts meta filtered by the owner. + * + * @param em + * The entity manager to user. Cannot be null. + * @param owner + * The owner to retrieve alerts for. Cannot be null. + * @return A list of alerts owned by the owner. + */ public static List findByOwnerMeta(EntityManager em, PrincipalUser owner) { requireArgument(em != null, "Entity manager can not be null."); requireArgument(owner != null, "Owner can not be null."); @@ -662,43 +622,6 @@ public static List findSharedAlerts(EntityManager em, PrincipalUser owner } } - /** - * Find all shared alerts with given limit and offset. - * - * @param em - * The entity manager to user. Cannot be null. - * @param owner - * The owner of shared alerts to filter on - * @param limit - * The maximum number of rows to return. - * @param offset - * The starting offset of the result. - * - * @return The list of shared alerts with given limit and offset. - */ - public static List findSharedAlertsPaged(EntityManager em, Integer limit, Integer offset) { - requireArgument(em != null, "Entity manager can not be null."); - if (limit == null || limit <= 0) { - limit = DEFAULT_PAGE_LIMIT; - } - if (offset == null || offset < 0) { - offset = DEFAULT_PAGE_OFFSET; - } - - TypedQuery query; - query = em.createNamedQuery("Alert.getSharedAlertsPaged", Alert.class); - query.setMaxResults(limit); - query.setFirstResult(offset); - query.setHint(QueryHints.REFRESH, HintValues.TRUE); - query.setHint("javax.persistence.cache.storeMode", "REFRESH"); - - try { - return query.getResultList(); - } catch (NoResultException ex) { - return new ArrayList<>(0); - } - } - /** * Count the total number of all shared alerts. * @@ -784,49 +707,6 @@ public static List findSharedAlertsMetaPaged(EntityManager em, Integer li } } - /** - * Find all private alerts (non-shared alerts) for given privileged user - * with given limit and offset. - * - * @param em - * The entity manager to user. Cannot be null. - * @param owner - * The owner to filter on - * @param limit - * The maximum number of rows to return. - * @param offset - * The starting offset of the result. - * - * @return The list of private alerts with given limit and offset. - */ - public static List findPrivateAlertsForPrivilegedUserPaged(EntityManager em, PrincipalUser owner, - Integer limit, Integer offset) { - requireArgument(em != null, "Entity manager can not be null."); - // Invalid user nor non-privileged user shall not view other's - // non-shared alerts, thus immediately return empty list - if (owner == null || !owner.isPrivileged()) { - return new ArrayList<>(0); - } - if (limit == null || limit <= 0) { - limit = DEFAULT_PAGE_LIMIT; - } - if (offset == null || offset < 0) { - offset = DEFAULT_PAGE_OFFSET; - } - - TypedQuery query = em.createNamedQuery("Alert.getPrivateAlertsForPrivilegedUserPaged", Alert.class); - query.setHint(QueryHints.REFRESH, HintValues.TRUE); - query.setHint("javax.persistence.cache.storeMode", "REFRESH"); - try { - query.setParameter("owner", owner); - query.setMaxResults(limit); - query.setFirstResult(offset); - return query.getResultList(); - } catch (NoResultException ex) { - return new ArrayList<>(0); - } - } - /** * Find all private alerts (non-shared alerts) meta for given privileged user with given limit and offset. * diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java index 960c50bc7..84df30ef3 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/AlertService.java @@ -142,10 +142,12 @@ public interface AlertService extends Service { * Returns a list of alerts for an owner with given limit and offset. * * @param owner The owner to return alerts for. Cannot be null. + * @param limit The number of items to fetch. + * @param offset The starting point of current page. * * @return The list of alerts. */ - List findAlertsByOwnerPaged(PrincipalUser owner, boolean metadataOnly, Integer limit, Integer offset); + List findAlertsByOwnerPaged(PrincipalUser owner, Integer limit, Integer offset); /** * Returns a list of alerts that have been marked for deletion. @@ -280,10 +282,13 @@ public interface AlertService extends Service { /** * Return a list of shared alerts. + * + * @param limit The number of items to fetch. + * @param offset The starting point of current page. * * @return The list of shared alerts. */ - List findSharedAlertsPaged(boolean metadataOnly, Integer limit, Integer offset); + List findSharedAlertsPaged(Integer limit, Integer offset); /** * Returns the list of supported notifiers. @@ -312,10 +317,12 @@ public interface AlertService extends Service { * Find a list of private alerts (non-shared alerts) for the given privileged user. * * @param owner The owner to filter on. + * @param limit The number of items to fetch. + * @param offset The starting point of current page. * * @return The list of private alerts if privileged user. */ - List findPrivateAlertsForPrivilegedUserPaged(boolean metadataOnly, PrincipalUser owner, Integer limit, Integer offset); + List findPrivateAlertsForPrivilegedUserPaged(PrincipalUser owner, Integer limit, Integer offset); /** * Count alerts with the given AlertsCountContext. diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/AlertsCountContext.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/AlertsCountContext.java index bc50c498c..1fe0fc646 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/AlertsCountContext.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/AlertsCountContext.java @@ -62,15 +62,18 @@ private AlertsCountContext(AlertsCountContextBuilder builder) { } public boolean isCountUserAlerts() { - return countUserAlerts; + boolean isValid = owner != null && !countSharedAlerts && !countPrivateAlerts; + return isValid && countUserAlerts; } public boolean isCountSharedAlerts() { - return countSharedAlerts; + boolean isValid = !countUserAlerts && !countPrivateAlerts; + return isValid && countSharedAlerts; } public boolean isCountPrivateAlerts() { - return countPrivateAlerts; + boolean isValid = owner != null && !countUserAlerts && !countSharedAlerts; + return isValid && countPrivateAlerts; } public PrincipalUser getPrincipalUser() { diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java index e23618ecf..62e82fef0 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/alert/DefaultAlertService.java @@ -277,11 +277,11 @@ public List findAlertsByOwner(PrincipalUser owner, boolean metadataOnly) } @Override - public List findAlertsByOwnerPaged(PrincipalUser owner, boolean metadataOnly, Integer limit, Integer offset) { + public List findAlertsByOwnerPaged(PrincipalUser owner, Integer limit, Integer offset) { requireNotDisposed(); requireArgument(owner != null, "Owner cannot be null."); - return metadataOnly ? Alert.findByOwnerMetaPaged(_emProvider.get(), owner, limit, offset) : Alert.findByOwnerPaged(_emProvider.get(), owner, limit, offset); + return Alert.findByOwnerMetaPaged(_emProvider.get(), owner, limit, offset); } @Override @@ -837,13 +837,13 @@ public List findSharedAlerts(boolean metadataOnly, PrincipalUser owner, I } @Override - public List findSharedAlertsPaged(boolean metadataOnly, Integer limit, Integer offset) { + public List findSharedAlertsPaged(Integer limit, Integer offset) { requireNotDisposed(); - return metadataOnly ? Alert.findSharedAlertsMetaPaged(_emProvider.get(), limit, offset) : Alert.findSharedAlertsPaged(_emProvider.get(), limit, offset); + return Alert.findSharedAlertsMetaPaged(_emProvider.get(), limit, offset); } @Override - public List findPrivateAlertsForPrivilegedUserPaged(boolean metadataOnly, PrincipalUser owner, Integer limit, Integer offset) { + public List findPrivateAlertsForPrivilegedUserPaged(PrincipalUser owner, Integer limit, Integer offset) { requireNotDisposed(); // Invalid user nor non-privileged user shall not view other's non-shared alerts, thus immediately return empty list @@ -851,7 +851,7 @@ public List findPrivateAlertsForPrivilegedUserPaged(boolean metadataOnly, return new ArrayList<>(0); } - return metadataOnly ? Alert.findPrivateAlertsForPrivilegedUserMetaPaged(_emProvider.get(), owner, limit, offset) : Alert.findPrivateAlertsForPrivilegedUserPaged(_emProvider.get(), owner, limit, offset); + return Alert.findPrivateAlertsForPrivilegedUserMetaPaged(_emProvider.get(), owner, limit, offset); } @Override diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java index 0670cc18c..bc146fa31 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/AlertServiceTest.java @@ -238,22 +238,22 @@ public void testFindAlertsByOwnerPaged() { List actualAlerts = new ArrayList<>(); // Fetch first page - List page = alertService.findAlertsByOwnerPaged(user, true, limit, 0); + List page = alertService.findAlertsByOwnerPaged(user, limit, 0); assertEquals(page.size(), limit); actualAlerts.addAll(page); // Fetch second page - page = alertService.findAlertsByOwnerPaged(user, true, limit, actualAlerts.size()); + page = alertService.findAlertsByOwnerPaged(user, limit, actualAlerts.size()); assertEquals(page.size(), limit); actualAlerts.addAll(page); // Fetch remaining alerts (less than a page) - page = alertService.findAlertsByOwnerPaged(user, true, limit, actualAlerts.size()); + page = alertService.findAlertsByOwnerPaged(user, limit, actualAlerts.size()); assertEquals(page.size(), expectedAlerts.size() - actualAlerts.size()); actualAlerts.addAll(page); // Try to fetch again should be empty result - page = alertService.findAlertsByOwnerPaged(user, true, limit, actualAlerts.size()); + page = alertService.findAlertsByOwnerPaged(user, limit, actualAlerts.size()); assertEquals(0, page.size()); Set actualSet = new HashSet<>(); @@ -649,17 +649,17 @@ public void testFindSharedAlertsMetaPaged() { sharedAlerts.add("alert2"); // First page - List page = alertService.findSharedAlertsPaged(true, 1, 0); + List page = alertService.findSharedAlertsPaged(1, 0); assertEquals(1, page.size()); assertTrue(sharedAlerts.contains(page.get(0).getName())); // Second page - page = alertService.findSharedAlertsPaged(true, 1, 1); + page = alertService.findSharedAlertsPaged(1, 1); assertEquals(1, page.size()); assertTrue(sharedAlerts.contains(page.get(0).getName())); // Thrid page should be zero - page = alertService.findSharedAlertsPaged(true, 1, 2); + page = alertService.findSharedAlertsPaged(1, 2); assertEquals(0, page.size()); } @@ -792,7 +792,7 @@ public void testFindPrivateAlertsPagedForNonPrivilegedUser() { alertService.updateAlert(alert3); // Assert result is empty for non-privileged user - assertEquals(0, alertService.findPrivateAlertsForPrivilegedUserPaged(true, user1, 100, 0).size()); + assertEquals(0, alertService.findPrivateAlertsForPrivilegedUserPaged(user1, 100, 0).size()); } @Test @@ -846,17 +846,17 @@ public void testFindPrivateAlertsPagedForPrivilegedUser() { Set alertNames = new HashSet<>(); // Fetch first page - List page = alertService.findPrivateAlertsForPrivilegedUserPaged(true, user1, 1, 0); + List page = alertService.findPrivateAlertsForPrivilegedUserPaged(user1, 1, 0); assertEquals(1, page.size()); alertNames.add(page.get(0).getName()); // Fetch second page - page = alertService.findPrivateAlertsForPrivilegedUserPaged(true, user1, 1, 1); + page = alertService.findPrivateAlertsForPrivilegedUserPaged(user1, 1, 1); assertEquals(1, page.size()); alertNames.add(page.get(0).getName()); // Fetch third page, should be empty - page = alertService.findPrivateAlertsForPrivilegedUserPaged(true, user1, 1, 2); + page = alertService.findPrivateAlertsForPrivilegedUserPaged(user1, 1, 2); assertEquals(0, page.size()); // Assert all private alerts are fetched @@ -1003,6 +1003,53 @@ public void testUpdateNotification() { assertTrue(n.getActiveStatusMap().size() == 1); assertTrue(n.getCooldownExpirationMap().size() == 1); } + + @Test + public void testAlertsCountContext() { + String userName = createRandomName(); + PrincipalUser user = new PrincipalUser(admin, userName, userName + "@testcompany.com"); + // Test count user alerts context + // Normal case + AlertsCountContext userCtx1 = new AlertsCountContext.AlertsCountContextBuilder().countUserAlerts() + .setPrincipalUser(user).build(); + assertTrue(userCtx1.isCountUserAlerts()); + + // Missing user + AlertsCountContext userCtx2 = new AlertsCountContext.AlertsCountContextBuilder().countUserAlerts().build(); + assertFalse(userCtx2.isCountUserAlerts()); + + // Not mutual exclusive + AlertsCountContext userCtx3 = new AlertsCountContext.AlertsCountContextBuilder().countUserAlerts() + .countSharedAlerts().setPrincipalUser(user).build(); + assertFalse(userCtx3.isCountUserAlerts()); + + // Test count shared alerts context + // Normal case + AlertsCountContext sharedCtx1 = new AlertsCountContext.AlertsCountContextBuilder().countSharedAlerts().build(); + assertTrue(sharedCtx1.isCountSharedAlerts()); + + // Not mutual exclusive + AlertsCountContext sharedCtx2 = new AlertsCountContext.AlertsCountContextBuilder().countSharedAlerts() + .countPrivateAlerts().build(); + assertFalse(sharedCtx2.isCountSharedAlerts()); + + // Test count private alerts context + // Normal case + AlertsCountContext privateCtx1 = new AlertsCountContext.AlertsCountContextBuilder().countPrivateAlerts() + .setPrincipalUser(user).build(); + assertTrue(privateCtx1.isCountPrivateAlerts()); + + // Missing user + AlertsCountContext privateCtx2 = new AlertsCountContext.AlertsCountContextBuilder().countPrivateAlerts() + .build(); + assertFalse(privateCtx2.isCountPrivateAlerts()); + + // Not mutual exclusive + AlertsCountContext privateCtx3 = new AlertsCountContext.AlertsCountContextBuilder().countPrivateAlerts() + .countUserAlerts().setPrincipalUser(user).build(); + assertFalse(privateCtx3.isCountPrivateAlerts()); + } + } /* Copyright (c) 2016, Salesforce.com, Inc. All rights reserved. */ \ No newline at end of file diff --git a/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java b/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java index c44deab2f..f01c518d3 100644 --- a/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java +++ b/ArgusWebServices/src/main/java/com/salesforce/dva/argus/ws/resources/AlertResources.java @@ -170,7 +170,7 @@ public List getAlertsMetaByOwner(@Context HttpServletRequest req, PrincipalUser owner = validateAndGetOwner(req, ownerName); List result = new ArrayList<>(); - result = alertService.findAlertsByOwnerPaged(owner, true, pagesize, (pagenumber - 1) * pagesize); + result = alertService.findAlertsByOwnerPaged(owner, pagesize, (pagenumber - 1) * pagesize); return AlertDto.transformToDto(result); } @@ -196,7 +196,7 @@ public List getSharedAlertsMeta(@Context HttpServletRequest req, @QueryParam("pagenumber") Integer pagenumber) { List result = new ArrayList<>(); - result = alertService.findSharedAlertsPaged(true, pagesize, (pagenumber - 1) * pagesize); + result = alertService.findSharedAlertsPaged(pagesize, (pagenumber - 1) * pagesize); return AlertDto.transformToDto(result); } @@ -225,7 +225,7 @@ public List getAlertsMetaForPrivilegedUser(@Context HttpServletRequest } List result = new ArrayList<>(); - result = alertService.findPrivateAlertsForPrivilegedUserPaged(true, owner, pagesize, pagenumber); + result = alertService.findPrivateAlertsForPrivilegedUserPaged(owner, pagesize, pagenumber); return AlertDto.transformToDto(result); } From 2e3eff4282f88f54f8a56947d22d687a3cc6798b Mon Sep 17 00:00:00 2001 From: dilipdevaraj-sfdc Date: Fri, 27 Jul 2018 20:42:37 -0700 Subject: [PATCH 06/27] Use actual verion instead of project.version since maven is not reflecting this within pom's version --- ArgusClient/pom.xml | 2 +- ArgusCore/pom.xml | 2 +- ArgusSDK/pom.xml | 2 +- ArgusWeb/pom.xml | 2 +- ArgusWebServices/pom.xml | 2 +- pom.xml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ArgusClient/pom.xml b/ArgusClient/pom.xml index d5c13f975..9b4fe841f 100644 --- a/ArgusClient/pom.xml +++ b/ArgusClient/pom.xml @@ -4,7 +4,7 @@ argus com.salesforce.argus - ${project.version} + 2.17.0 .. argus-client diff --git a/ArgusCore/pom.xml b/ArgusCore/pom.xml index 9c9d0dc6d..f802d4b36 100644 --- a/ArgusCore/pom.xml +++ b/ArgusCore/pom.xml @@ -5,7 +5,7 @@ argus com.salesforce.argus - ${project.version} + 2.17.0 .. argus-core diff --git a/ArgusSDK/pom.xml b/ArgusSDK/pom.xml index bcc2a052f..3138e2c5b 100644 --- a/ArgusSDK/pom.xml +++ b/ArgusSDK/pom.xml @@ -4,7 +4,7 @@ argus com.salesforce.argus - ${project.version} + 2.17.0 .. argus-sdk diff --git a/ArgusWeb/pom.xml b/ArgusWeb/pom.xml index b94e09cd8..b0f3dd009 100644 --- a/ArgusWeb/pom.xml +++ b/ArgusWeb/pom.xml @@ -4,7 +4,7 @@ argus com.salesforce.argus - ${project.version} + 2.17.0 .. argus-web diff --git a/ArgusWebServices/pom.xml b/ArgusWebServices/pom.xml index 9dfddd7b3..bc5e2f0a5 100644 --- a/ArgusWebServices/pom.xml +++ b/ArgusWebServices/pom.xml @@ -3,7 +3,7 @@ argus com.salesforce.argus - ${project.version} + 2.17.0 .. argus-webservices diff --git a/pom.xml b/pom.xml index 1b91198b2..b58ff41f2 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ com.salesforce.argus argus - ${project.version} + 2.17.0 pom Argus From 1c46cbee97530499fd45d46634fa3d62b33a8668 Mon Sep 17 00:00:00 2001 From: Justin Harringa Date: Sun, 29 Jul 2018 13:49:41 -0700 Subject: [PATCH 07/27] +Argus SDK README badge (points to Maven Central) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d6aa1ded3..c339abe40 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ Argus [![Build Status](https://travis-ci.org/salesforce/Argus.svg?branch=master)](https://travis-ci.org/salesforce/Argus) [![Static Analysis](https://scan.coverity.com/projects/8155/badge.svg)](https://scan.coverity.com/projects/salesforceeng-argus) +[![argus-sdk - Maven Central](https://maven-badges.herokuapp.com/maven-central/com.salesforce.argus/argus-sdk/badge.svg?maxAge=3600)](https://maven-badges.herokuapp.com/maven-central/com.salesforce.argus/argus-sdk) ===== Argus is a time-series monitoring and alerting platform. It consists of discrete services to configure alerts, ingest and transform metrics & events, send notifications, create namespaces, and to both establish and enforce policies and quotas for usage. From b2ea704c492059a82daa1ab0bed86eded0bd2cb9 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Thu, 19 Jul 2018 14:44:35 -0700 Subject: [PATCH 08/27] Add new scope+metric index --- .../dva/argus/service/MonitorService.java | 5 + .../service/schema/AbstractSchemaService.java | 96 +++++++--- .../schema/AsyncHbaseSchemaService.java | 3 +- .../schema/ElasticSearchSchemaService.java | 167 ++++++++++++------ .../schema/MetricSchemaRecordList.java | 30 +--- .../service/schema/SchemaRecordList.java | 42 +++++ .../schema/ScopeOnlySchemaRecordList.java | 25 --- .../schema/AbstractSchemaServiceTest.java | 9 +- 8 files changed, 240 insertions(+), 137 deletions(-) create mode 100644 ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/SchemaRecordList.java diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/MonitorService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/MonitorService.java index 5f95a97a6..a143e27fe 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/MonitorService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/MonitorService.java @@ -206,6 +206,11 @@ public static enum Counter { COMMIT_CLIENT_METRIC_WRITES("argus.core", "commit.client.metric.writes"), SCHEMACOMMIT_CLIENT_METRIC_WRITES("argus.core", "schemacommit.client.metric.writes"), + SCOPEANDMETRICNAMES_WRITTEN("argus.core", "scopeandmetricnames.written"), + SCOPEANDMETRICNAMES_WRITE_LATENCY("argus.core", "scopeandmetricnames.write.latency"), + SCOPEANDMETRICNAMES_QUERY_COUNT("argus.core", "scopeandmetricnames.query.count"), + SCOPEANDMETRICNAMES_QUERY_LATENCY("argus.core", "scopeandmetricnames.query.latency"), + SCOPENAMES_WRITTEN("argus.core", "scopenames.written"), SCOPENAMES_WRITE_LATENCY("argus.core", "scopenames.write.latency"), SCOPENAMES_QUERY_COUNT("argus.core", "scopenames.query.count"), diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java index c967276b2..84312ed15 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java @@ -14,6 +14,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import org.apache.commons.lang.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -39,18 +40,22 @@ public abstract class AbstractSchemaService extends DefaultService implements Sc private static final int DAY_IN_SECONDS = 24 * 60 * 60; private static final int HOUR_IN_SECONDS = 60 * 60; - /* Have two separate bloom filters one for metrics schema and another for scope names schema. - * Since scopes will continue to repeat more often on subsequent kafka batch reads, we can easily check this from the bloom filter for scopes only. + /* Have three separate bloom filters one for metrics schema, one only for scope names schema and one only for scope name and metric name schema. + * Since scopes will continue to repeat more often on subsequent kafka batch reads, we can easily check this from the bloom filter for scopes only. * Hence we can avoid the extra call to populate scopenames index on ES in subsequent Kafka reads. + * The same logic applies to scope name and metric name schema. */ protected static BloomFilter bloomFilter; protected static BloomFilter bloomFilterScopeOnly; + protected static BloomFilter bloomFilterScopeAndMetricOnly; private Random rand = new Random(); private int randomNumber = rand.nextInt(); private int bloomFilterExpectedNumberInsertions; private double bloomFilterErrorRate; private int bloomFilterScopeOnlyExpectedNumberInsertions; - private double bloomFilterScopeOnlyErrorRate; + private double bloomFilterScopeOnlyErrorRate; + private int bloomFilterScopeAndMetricOnlyExpectedNumberInsertions; + private double bloomFilterScopeAndMetricOnlyErrorRate; private final Logger _logger = LoggerFactory.getLogger(getClass()); private final Thread _bloomFilterMonitorThread; protected final boolean _syncPut; @@ -64,12 +69,22 @@ protected AbstractSchemaService(SystemConfiguration config) { Property.BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); bloomFilterErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_ERROR_RATE.getName(), Property.BLOOMFILTER_ERROR_RATE.getDefaultValue())); - bloomFilterScopeOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), + + bloomFilterScopeOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); bloomFilterScopeOnlyErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_ERROR_RATE.getName(), Property.BLOOMFILTER_SCOPE_ONLY_ERROR_RATE.getDefaultValue())); + + bloomFilterScopeAndMetricOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), + Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); + bloomFilterScopeAndMetricOnlyErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE.getName(), + Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE.getDefaultValue())); + bloomFilter = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterExpectedNumberInsertions , bloomFilterErrorRate); + bloomFilterScopeOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterScopeOnlyExpectedNumberInsertions , bloomFilterScopeOnlyErrorRate); + bloomFilterScopeAndMetricOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), + bloomFilterScopeAndMetricOnlyExpectedNumberInsertions , bloomFilterScopeAndMetricOnlyErrorRate); _syncPut = Boolean.parseBoolean( config.getValue(Property.SYNC_PUT.getName(), Property.SYNC_PUT.getDefaultValue())); @@ -99,6 +114,8 @@ public void put(List metrics) { List metricsToPut = new ArrayList<>(metrics.size()); Set scopesToPut = new HashSet<>(metrics.size()); + List scopesAndMetricsNamesToPut = new ArrayList<>(metrics.size()); + for(Metric metric : metrics) { // check metric schema bloom filter if(metric.getTags().isEmpty()) { @@ -124,15 +141,25 @@ public void put(List metrics) { } } + String scopeName = metric.getScope(); + String metricName = metric.getMetric(); + // Check scope only bloom filter - String key = constructScopeOnlyKey(metric.getScope()); + String key = constructScopeOnlyKey(scopeName); boolean found = bloomFilterScopeOnly.mightContain(key); if(!found) { - scopesToPut.add(metric.getScope()); - } + scopesToPut.add(scopeName); + } + + // Check scope and metric only bloom filter + key = constructScopeAndMetricOnlyKey(scopeName, metricName); + found = bloomFilterScopeAndMetricOnly.mightContain(key); + if(!found) { + scopesAndMetricsNamesToPut.add(new Metric(scopeName, metricName)); + } } - implementationSpecificPut(metricsToPut, scopesToPut); + implementationSpecificPut(metricsToPut, scopesToPut, scopesAndMetricsNamesToPut); } /* @@ -140,8 +167,10 @@ public void put(List metrics) { * * @param metrics The metrics metadata that will be written to a separate index. * @param scopeNames The scope names that that will be written to a separate index. + * @param scopesAndMetricNames The scope and metric names that that will be written to a separate index. */ - protected abstract void implementationSpecificPut(List metrics, Set scopeNames); + protected abstract void implementationSpecificPut(List metrics, Set scopeNames, + List scopesAndMetricNames); @Override public void dispose() { @@ -195,18 +224,22 @@ protected String constructKey(Metric metric, Entry tagEntry) { } protected String constructKey(String scope, String metric, String tagk, String tagv, String namespace) { + StringBuilder sb = new StringBuilder(scope); - sb.append('\0').append(metric); - if(namespace != null) { + if(!StringUtils.isEmpty(metric)) { + sb.append('\0').append(metric); + } + + if(!StringUtils.isEmpty(namespace)) { sb.append('\0').append(namespace); } - if(tagk != null) { + if(!StringUtils.isEmpty(tagk)) { sb.append('\0').append(tagk); } - if(tagv != null) { + if(!StringUtils.isEmpty(tagv)) { sb.append('\0').append(tagv); } @@ -218,13 +251,13 @@ protected String constructKey(String scope, String metric, String tagk, String t } protected String constructScopeOnlyKey(String scope) { - StringBuilder sb = new StringBuilder(scope); - // Add randomness for each instance of bloom filter running on different - // schema clients to reduce probability of false positives that metric schemas are not written to ES - sb.append('\0').append(randomNumber); + return constructKey(scope, null, null, null, null); + } - return sb.toString(); + protected String constructScopeAndMetricOnlyKey(String scope, String metric) { + + return constructKey(scope, metric, null, null, null); } private void createScheduledExecutorService(int targetHourToStartAt){ @@ -261,11 +294,26 @@ public enum Property { SYNC_PUT("service.property.schema.sync.put", "false"), BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.expected.number.insertions", "40"), BLOOMFILTER_ERROR_RATE("service.property.schema.bloomfilter.error.rate", "0.00001"), - BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.only.expected.number.insertions", "40"), + + /* + * Estimated Filter Size using https://hur.st/bloomfilter/?n=1000000&p=1.0E-5&m=&k= + * 2.86MiB + */ + + BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.only.expected.number.insertions", "1000000"), BLOOMFILTER_SCOPE_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.only.error.rate", "0.00001"), + + /* + * Estimated Filter Size using https://hur.st/bloomfilter/?n=10000000&p=1.0E-5&m=&k= + * 28.56MiB + */ + + BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.and.metric.only.expected.number.insertions", "10000000"), + BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.and.metric.only.error.rate", "0.00001"), + /* * Have a different configured flush start hour for different machines to prevent thundering herd problem. - */ + */ BLOOM_FILTER_FLUSH_HOUR_TO_START_AT("service.property.schema.bloomfilter.flush.hour.to.start.at","2"); private final String _name; @@ -323,7 +371,9 @@ private void _checkBloomFilterUsage() { _logger.info("Metrics Bloom approx no. elements = {}", bloomFilter.approximateElementCount()); _logger.info("Metrics Bloom expected error rate = {}", bloomFilter.expectedFpp()); _logger.info("Scope only Bloom approx no. elements = {}", bloomFilterScopeOnly.approximateElementCount()); - _logger.info("Scope only Bloom expected error rate = {}", bloomFilterScopeOnly.expectedFpp()); + _logger.info("Scope only Bloom expected error rate = {}", bloomFilterScopeOnly.expectedFpp()); + _logger.info("Scope and metric only Bloom approx no. elements = {}", bloomFilterScopeAndMetricOnly.approximateElementCount()); + _logger.info("Scope and metric only Bloom expected error rate = {}", bloomFilterScopeAndMetricOnly.expectedFpp()); } private void _sleepForPollPeriod() { @@ -351,8 +401,10 @@ private void _flushBloomFilter() { _logger.info("Flushing out bloom filter entries"); bloomFilter = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterExpectedNumberInsertions , bloomFilterErrorRate); bloomFilterScopeOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterScopeOnlyExpectedNumberInsertions , bloomFilterScopeOnlyErrorRate); + bloomFilterScopeAndMetricOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), + bloomFilterScopeAndMetricOnlyExpectedNumberInsertions , bloomFilterScopeAndMetricOnlyErrorRate); /* Don't need explicit synchronization to prevent slowness majority of the time*/ randomNumber = rand.nextInt(); } } -} \ No newline at end of file +} diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AsyncHbaseSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AsyncHbaseSchemaService.java index 0395eb7b7..c32b3f518 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AsyncHbaseSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AsyncHbaseSchemaService.java @@ -192,7 +192,8 @@ private String _plusOneNConstructRowKey(MetricSchemaRecord record, String table //~ Methods ************************************************************************************************************************************** @Override - protected void implementationSpecificPut(List metrics, Set scopeNames) { + protected void implementationSpecificPut(List metrics, Set scopeNames, + List scopesAndMetricNames) { requireNotDisposed(); SystemAssert.requireArgument(metrics != null, "Metric list cannot be null."); diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index 14e5d02f3..748dcf9a1 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -16,7 +16,10 @@ import java.util.Map; import java.util.Properties; import java.util.Set; +import java.util.function.Supplier; +import com.google.common.hash.BloomFilter; +import com.salesforce.dva.argus.entity.*; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.HttpStatus; @@ -45,11 +48,6 @@ import com.fasterxml.jackson.databind.node.ObjectNode; import com.google.inject.Inject; import com.google.inject.Singleton; -import com.salesforce.dva.argus.entity.KeywordQuery; -import com.salesforce.dva.argus.entity.Metric; -import com.salesforce.dva.argus.entity.MetricSchemaRecord; -import com.salesforce.dva.argus.entity.MetricSchemaRecordQuery; -import com.salesforce.dva.argus.entity.ScopeOnlySchemaRecord; import com.salesforce.dva.argus.service.MonitorService; import com.salesforce.dva.argus.service.MonitorService.Counter; import com.salesforce.dva.argus.service.SchemaService; @@ -71,6 +69,9 @@ public class ElasticSearchSchemaService extends AbstractSchemaService { private static String SCOPE_INDEX_NAME; private static String SCOPE_TYPE_NAME; + private static String SCOPE_AND_METRIC_INDEX_NAME; + private static String SCOPE_AND_METRIC_TYPE_NAME; + private static final String INDEX_NAME = "metadata_index"; private static final String TYPE_NAME = "metadata_type"; private static final String KEEP_SCROLL_CONTEXT_OPEN_FOR = "1m"; @@ -88,7 +89,9 @@ public class ElasticSearchSchemaService extends AbstractSchemaService { private final int _replicationFactor; private final int _numShards; private final int _replicationFactorForScopeIndex; - private final int _numShardsForScopeIndex; + private final int _numShardsForScopeIndex; + private final int _replicationFactorForScopeAndMetricIndex; + private final int _numShardsForScopeAndMetricIndex; private final int _bulkIndexingSize; private HashAlgorithm _idgenHashAlgo; @@ -104,6 +107,12 @@ public ElasticSearchSchemaService(SystemConfiguration config, MonitorService mon Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getDefaultValue()); SCOPE_TYPE_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_TYPE_NAME.getName(), Property.ELASTICSEARCH_SCOPE_TYPE_NAME.getDefaultValue()); + + SCOPE_AND_METRIC_INDEX_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME.getName(), + Property.ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME.getDefaultValue()); + SCOPE_AND_METRIC_TYPE_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME.getName(), + Property.ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME.getDefaultValue()); + String algorithm = config.getValue(Property.ELASTICSEARCH_IDGEN_HASH_ALGO.getName(), Property.ELASTICSEARCH_IDGEN_HASH_ALGO.getDefaultValue()); try { _idgenHashAlgo = HashAlgorithm.fromString(algorithm); @@ -126,6 +135,12 @@ public ElasticSearchSchemaService(SystemConfiguration config, MonitorService mon _numShardsForScopeIndex = Integer.parseInt( config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX.getName(), Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX.getDefaultValue())); + _replicationFactorForScopeAndMetricIndex = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX.getDefaultValue())); + + _numShardsForScopeAndMetricIndex = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX.getDefaultValue())); + _bulkIndexingSize = Integer.parseInt( config.getValue(Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getName(), Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getDefaultValue())); @@ -180,8 +195,13 @@ public Builder customizeRequestConfig(Builder requestConfigBuilder) { .setMaxRetryTimeoutMillis(MAX_RETRY_TIMEOUT) .build(); - _createIndexIfNotExists(); - _createScopeIndexIfNotExists(); + _createIndexIfNotExists(INDEX_NAME, _replicationFactor, _numShards, () -> _createMappingsNode()); + + _createIndexIfNotExists(SCOPE_INDEX_NAME, _replicationFactorForScopeIndex, _numShardsForScopeIndex, + () -> _createScopeMappingsNode()); + + _createIndexIfNotExists(SCOPE_AND_METRIC_INDEX_NAME, _replicationFactorForScopeAndMetricIndex, + _numShardsForScopeAndMetricIndex, () -> _createScopeAndMetricMappingsNode()); } @@ -207,9 +227,9 @@ public Properties getServiceProperties() { } @Override - protected void implementationSpecificPut(List metrics, Set scopeNames) { + protected void implementationSpecificPut(List metrics, Set scopeNames, + List scopesAndMetricNames) { SystemAssert.requireArgument(metrics != null, "Metrics list cannot be null."); - SystemAssert.requireArgument(scopeNames != null, "Scope names set cannot be null."); _logger.info("{} new metrics need to be indexed on ES.", metrics.size()); @@ -218,7 +238,7 @@ protected void implementationSpecificPut(List metrics, Set scope for(List records : fracturedList) { if(!records.isEmpty()) { - _upsert(records); + _upsert(records, INDEX_NAME, TYPE_NAME, bloomFilter); } } @@ -248,10 +268,29 @@ protected void implementationSpecificPut(List metrics, Set scope _monitorService.modifyCounter(MonitorService.Counter.SCOPENAMES_WRITTEN, count, null); _monitorService.modifyCounter(MonitorService.Counter.SCOPENAMES_WRITE_LATENCY, (System.currentTimeMillis() - start), null); + + _logger.info("{} new scope and metric names need to be indexed on ES.", scopesAndMetricNames.size()); + + start = System.currentTimeMillis(); + List> fracturedScopesAndMetricsList = _fracture(scopesAndMetricNames); + + for(List records : fracturedScopesAndMetricsList) { + if(!records.isEmpty()) { + _upsert(records, SCOPE_AND_METRIC_INDEX_NAME, SCOPE_AND_METRIC_TYPE_NAME, bloomFilterScopeAndMetricOnly); + } + } + + count = 0; + for(List records : fracturedScopesAndMetricsList) { + count += records.size(); + } + + _monitorService.modifyCounter(MonitorService.Counter.SCOPEANDMETRICNAMES_WRITTEN, count, null); + _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_WRITE_LATENCY, (System.currentTimeMillis() - start), null); } /* Convert the given list of metrics to a list of metric schema records. At the same time, fracture the records list - * if its size is greater than INDEXING_BATCH_SIZE. + * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. */ protected List> _fracture(List metrics) { List> fracturedList = new ArrayList<>(); @@ -279,12 +318,15 @@ protected List> _fracture(List metrics) { } } - fracturedList.add(records); + if(!records.isEmpty()) { + fracturedList.add(records); + } + return fracturedList; } /* Convert the given list of scopes to a list of scope only schema records. At the same time, fracture the records list - * if its size is greater than INDEXING_BATCH_SIZE. + * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. */ protected List> _fractureScopes(Set scopeNames) { List> fracturedList = new ArrayList<>(); @@ -299,7 +341,10 @@ protected List> _fractureScopes(Set scopeNam } } - fracturedList.add(records); + if(!records.isEmpty()) { + fracturedList.add(records); + } + return fracturedList; } @@ -353,7 +398,8 @@ public List get(MetricSchemaRecordQuery query) { requestBody.put("scroll_id", scrollID); requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); - response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); + response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), + new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); list = toEntity(extractResponse(response), new TypeReference() {}); records.addAll(list.getRecords()); @@ -621,11 +667,11 @@ private List _analyzedTokens(String query) { } } - private void _upsert(List records) { + private void _upsert(List records, String indexName, String indexType, BloomFilter bloomFilter) { String requestUrl = new StringBuilder().append("/") - .append(INDEX_NAME) + .append(indexName) .append("/") - .append(TYPE_NAME) + .append(indexType) .append("/") .append("_bulk") .toString(); @@ -665,7 +711,7 @@ private void _upsert(List records) { } } //add to bloom filter - _addToBloomFilter(records); + _addToBloomFilter(records, bloomFilter); } catch(IOException e) { throw new SystemException("Failed to parse reponse of put metrics. The response was: " + strResponse, e); @@ -723,7 +769,7 @@ private void _upsertScopes(List records) { } } - protected void _addToBloomFilter(List records){ + protected void _addToBloomFilter(List records, BloomFilter bloomFilter){ _logger.info("Adding {} records into bloom filter.", records.size()); for(MetricSchemaRecord record : records) { String key = constructKey(record.getScope(), record.getMetric(), record.getTagKey(), record.getTagValue(), record.getNamespace()); @@ -976,7 +1022,7 @@ private ObjectMapper _createObjectMapper() { SimpleModule module = new SimpleModule(); module.addSerializer(MetricSchemaRecordList.class, new MetricSchemaRecordList.Serializer()); module.addDeserializer(MetricSchemaRecordList.class, new MetricSchemaRecordList.Deserializer()); - module.addDeserializer(List.class, new MetricSchemaRecordList.AggDeserializer()); + module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); mapper.registerModule(module); return mapper; @@ -989,7 +1035,7 @@ private ObjectMapper _createScopeOnlyObjectMapper() { SimpleModule module = new SimpleModule(); module.addSerializer(ScopeOnlySchemaRecordList.class, new ScopeOnlySchemaRecordList.Serializer()); module.addDeserializer(ScopeOnlySchemaRecordList.class, new ScopeOnlySchemaRecordList.Deserializer()); - module.addDeserializer(List.class, new ScopeOnlySchemaRecordList.AggDeserializer()); + module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); mapper.registerModule(module); return mapper; @@ -1044,6 +1090,26 @@ private ObjectNode _createMappingsNode() { return mappingsNode; } + private ObjectNode _createScopeAndMetricMappingsNode() { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode propertiesNode = mapper.createObjectNode(); + propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.METRIC.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + + propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + propertiesNode.put("cts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + + ObjectNode typeNode = mapper.createObjectNode(); + typeNode.put("properties", propertiesNode); + + ObjectNode mappingsNode = mapper.createObjectNode(); + mappingsNode.put(SCOPE_AND_METRIC_TYPE_NAME, typeNode); + + return mappingsNode; + } + + private ObjectNode _createScopeMappingsNode() { ObjectMapper mapper = new ObjectMapper(); @@ -1084,51 +1150,29 @@ private ObjectNode _createFieldNodeNoAnalyzer(String type) { return fieldNode; } - private void _createIndexIfNotExists() { - try { - Response response = _esRestClient.performRequest(HttpMethod.HEAD.getName(), "/" + INDEX_NAME); - boolean indexExists = response.getStatusLine().getStatusCode() == HttpStatus.SC_OK ? true : false; - - if(!indexExists) { - _logger.info("Index [" + INDEX_NAME + "] does not exist. Will create one."); - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode rootNode = mapper.createObjectNode(); - rootNode.put("settings", _createSettingsNode(_replicationFactor, _numShards)); - rootNode.put("mappings", _createMappingsNode()); - - String settingsAndMappingsJson = rootNode.toString(); - String requestUrl = new StringBuilder().append("/").append(INDEX_NAME).toString(); - - response = _esRestClient.performRequest(HttpMethod.PUT.getName(), requestUrl, Collections.emptyMap(), new StringEntity(settingsAndMappingsJson)); - extractResponse(response); - } - } catch (Exception e) { - _logger.error("Failed to check/create elasticsearch index. ElasticSearchSchemaService may not function.", e); - } - } - - private void _createScopeIndexIfNotExists() { + private void _createIndexIfNotExists(String indexName, int replicationFactor, int numShards, + Supplier createMappingsNode) { try { - Response response = _esRestClient.performRequest(HttpMethod.HEAD.getName(), "/" + SCOPE_INDEX_NAME); + Response response = _esRestClient.performRequest(HttpMethod.HEAD.getName(), "/" + indexName); boolean indexExists = response.getStatusLine().getStatusCode() == HttpStatus.SC_OK ? true : false; if(!indexExists) { - _logger.info("Index [" + SCOPE_INDEX_NAME + "] does not exist. Will create one."); + _logger.info("Index [" + indexName + "] does not exist. Will create one."); ObjectMapper mapper = new ObjectMapper(); ObjectNode rootNode = mapper.createObjectNode(); - rootNode.put("settings", _createSettingsNode(_replicationFactorForScopeIndex, _numShardsForScopeIndex)); - rootNode.put("mappings", _createScopeMappingsNode()); + rootNode.put("settings", _createSettingsNode(replicationFactor, numShards)); + rootNode.put("mappings", createMappingsNode.get()); String settingsAndMappingsJson = rootNode.toString(); - String requestUrl = new StringBuilder().append("/").append(SCOPE_INDEX_NAME).toString(); + String requestUrl = new StringBuilder().append("/").append(indexName).toString(); response = _esRestClient.performRequest(HttpMethod.PUT.getName(), requestUrl, Collections.emptyMap(), new StringEntity(settingsAndMappingsJson)); extractResponse(response); } } catch (Exception e) { - _logger.error("Failed to check/create elasticsearch scope index. ElasticSearchSchemaService may not function.", e); + _logger.error("Failed to check/create {} index. ElasticSearchSchemaService may not function. {}", + indexName, e); } } @@ -1190,10 +1234,21 @@ public enum Property { ELASTICSEARCH_INDEXING_BATCH_SIZE("service.property.schema.elasticsearch.indexing.batch.size", "10000"), /** The hashing algorithm to use for generating document id. */ ELASTICSEARCH_IDGEN_HASH_ALGO("service.property.schema.elasticsearch.idgen.hash.algo", "MD5"), + /** Name of scope only index */ ELASTICSEARCH_SCOPE_INDEX_NAME("service.property.schema.elasticsearch.scope.index.name", "scopenames"), /** Type within scope only index */ - ELASTICSEARCH_SCOPE_TYPE_NAME("service.property.schema.elasticsearch.scope.type.name", "scope_type"); + ELASTICSEARCH_SCOPE_TYPE_NAME("service.property.schema.elasticsearch.scope.type.name", "scope_type"), + + /** Replication factor for scope and metric names */ + ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.num.replicas.for.scope.metric.index", "1"), + /** Shard count for scope and metric names */ + ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.shards.count.for.scope.metric.index", "10"), + + /** Name of scope and metric only index */ + ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME("service.property.schema.elasticsearch.scope.metric.index.name", "scopemetricnames"), + /** Type within scope and metric only index */ + ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME("service.property.schema.elasticsearch.scope.metric.type.name", "scopemetric_type"); private final String _name; private final String _defaultValue; @@ -1353,4 +1408,4 @@ public void setReason(String reason) { } } -} \ No newline at end of file +} diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/MetricSchemaRecordList.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/MetricSchemaRecordList.java index 060f31734..968f8abfd 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/MetricSchemaRecordList.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/MetricSchemaRecordList.java @@ -126,7 +126,7 @@ public MetricSchemaRecordList deserialize(JsonParser jp, DeserializationContext while(iter.hasNext()) { JsonNode hit = iter.next(); JsonNode source = hit.get("_source"); - + JsonNode namespaceNode = source.get(RecordType.NAMESPACE.getName()); JsonNode scopeNode = source.get(RecordType.SCOPE.getName()); JsonNode metricNode = source.get(RecordType.METRIC.getName()); @@ -145,32 +145,4 @@ public MetricSchemaRecordList deserialize(JsonParser jp, DeserializationContext } } - - - static class AggDeserializer extends JsonDeserializer> { - - @Override - public List deserialize(JsonParser jp, DeserializationContext context) - throws IOException, JsonProcessingException { - - List values = Collections.emptyList(); - - JsonNode rootNode = jp.getCodec().readTree(jp); - JsonNode buckets = rootNode.get("aggregations").get("distinct_values").get("buckets"); - - if(JsonNodeType.ARRAY.equals(buckets.getNodeType())) { - values = new ArrayList<>(buckets.size()); - Iterator iter = buckets.elements(); - while(iter.hasNext()) { - JsonNode bucket = iter.next(); - String value = bucket.get("key").asText(); - values.add(value); - } - } - - return values; - } - - } - } diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/SchemaRecordList.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/SchemaRecordList.java new file mode 100644 index 000000000..0e763e776 --- /dev/null +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/SchemaRecordList.java @@ -0,0 +1,42 @@ +package com.salesforce.dva.argus.service.schema; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.JsonNodeType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +public class SchemaRecordList { + + static class AggDeserializer extends JsonDeserializer> { + + @Override + public List deserialize(JsonParser jp, DeserializationContext context) + throws IOException { + + List values = Collections.emptyList(); + + JsonNode rootNode = jp.getCodec().readTree(jp); + JsonNode buckets = rootNode.get("aggregations").get("distinct_values").get("buckets"); + + if (JsonNodeType.ARRAY.equals(buckets.getNodeType())) { + values = new ArrayList<>(buckets.size()); + Iterator iter = buckets.elements(); + while (iter.hasNext()) { + JsonNode bucket = iter.next(); + String value = bucket.get("key").asText(); + values.add(value); + } + } + + return values; + } + } +} diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeOnlySchemaRecordList.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeOnlySchemaRecordList.java index faa7e2bbf..a0f15d45e 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeOnlySchemaRecordList.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeOnlySchemaRecordList.java @@ -126,29 +126,4 @@ public ScopeOnlySchemaRecordList deserialize(JsonParser jp, DeserializationConte return new ScopeOnlySchemaRecordList(records, scrollID); } } - - static class AggDeserializer extends JsonDeserializer> { - - @Override - public List deserialize(JsonParser jp, DeserializationContext context) - throws IOException, JsonProcessingException { - - List values = Collections.emptyList(); - - JsonNode rootNode = jp.getCodec().readTree(jp); - JsonNode buckets = rootNode.get("aggregations").get("distinct_values").get("buckets"); - - if(JsonNodeType.ARRAY.equals(buckets.getNodeType())) { - values = new ArrayList<>(buckets.size()); - Iterator iter = buckets.elements(); - while(iter.hasNext()) { - JsonNode bucket = iter.next(); - String value = bucket.get("key").asText(); - values.add(value); - } - } - - return values; - } - } } diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java index e1e1d6966..70eaa4c0f 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java @@ -12,6 +12,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang3.tuple.Pair; import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; @@ -42,7 +43,7 @@ public void testPutEverythingCached() { spyService.put(metrics); // add to bloom filter cache - spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); + spyService._addToBloomFilter(spyService._fracture(metrics).get(0), AbstractSchemaService.bloomFilter); assertTrue(count.get() == metrics.size()); spyService.put(metrics); // count should be same since we are re-reading cached value @@ -62,12 +63,12 @@ public void testPutPartialCached() { spyService.put(metrics); // 1st metric cached - spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); + spyService._addToBloomFilter(spyService._fracture(metrics).get(0), AbstractSchemaService.bloomFilter); assertTrue(count.get() == metrics.size()); // 1st metric already in cache (partial case scenario), and now 2nd metric will also be added to cache. // Total number of metrics in cache = metric1.size() and metric2.size() spyService.put(new ArrayList<>(total)); - spyService._addToBloomFilter(spyService._fracture(new ArrayList<>(total)).get(0)); + spyService._addToBloomFilter(spyService._fracture(new ArrayList<>(total)).get(0), AbstractSchemaService.bloomFilter); assertTrue(count.get() == total.size()); } @@ -97,7 +98,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { count.addAndGet(metrics.size()); return null; } - }).when(spyService).implementationSpecificPut(Mockito.anyListOf(Metric.class),Mockito.anySetOf(String.class)); + }).when(spyService).implementationSpecificPut(Mockito.any(), Mockito.any(), Mockito.any()); return spyService; } From 774622753ee80fbb79c69b76c8448f3e9285bf29 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Thu, 19 Jul 2018 15:16:45 -0700 Subject: [PATCH 09/27] Use Set for scope/metric names --- .../dva/argus/service/schema/AbstractSchemaService.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java index 84312ed15..b1a71a52f 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java @@ -114,7 +114,7 @@ public void put(List metrics) { List metricsToPut = new ArrayList<>(metrics.size()); Set scopesToPut = new HashSet<>(metrics.size()); - List scopesAndMetricsNamesToPut = new ArrayList<>(metrics.size()); + Set scopesAndMetricsNamesToPut = new HashSet<>(metrics.size()); for(Metric metric : metrics) { // check metric schema bloom filter @@ -159,7 +159,8 @@ public void put(List metrics) { } } - implementationSpecificPut(metricsToPut, scopesToPut, scopesAndMetricsNamesToPut); + List scopesAndMetricsToPut = new ArrayList<>(scopesAndMetricsNamesToPut); + implementationSpecificPut(metricsToPut, scopesToPut, scopesAndMetricsToPut); } /* From e08121b19acde3d2c62503496fcdb514d18e8587 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Thu, 19 Jul 2018 18:45:33 -0700 Subject: [PATCH 10/27] Query new schema+metric index --- .../dva/argus/entity/MetricSchemaRecordQuery.java | 12 ++++++++++++ .../service/schema/ElasticSearchSchemaService.java | 14 +++++++++++--- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/MetricSchemaRecordQuery.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/MetricSchemaRecordQuery.java index fcd0c292a..f3e584a29 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/MetricSchemaRecordQuery.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/MetricSchemaRecordQuery.java @@ -186,6 +186,18 @@ public boolean isQueryOnlyOnScope() { return false; } + public boolean isQueryOnlyOnScopeAndMetric() { + + if (Objects.equals(this.namespace, REGEX_MATCH_ANY) && + Objects.equals(this.tagKey, REGEX_MATCH_ANY) && + Objects.equals(this.tagValue, REGEX_MATCH_ANY)) + { + return true; + } + + return false; + } + public void setScanFrom(MetricSchemaRecord scanFrom) { this.scanFrom = scanFrom; } diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index 748dcf9a1..196c61491 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -454,6 +454,11 @@ public List getUnique(MetricSchemaRecordQuery query, RecordT indexName = SCOPE_INDEX_NAME; typeName = SCOPE_TYPE_NAME; } + else if (query.isQueryOnlyOnScopeAndMetric()) + { + indexName = SCOPE_AND_METRIC_INDEX_NAME; + typeName = SCOPE_AND_METRIC_TYPE_NAME; + } String requestUrl = new StringBuilder().append("/") .append(indexName) @@ -471,10 +476,13 @@ public List getUnique(MetricSchemaRecordQuery query, RecordT List records = SchemaService.constructMetricSchemaRecordsForType( toEntity(str, new TypeReference>() {}), type); - if (query.isQueryOnlyOnScope()) { + if (query.isQueryOnlyOnScope() && RecordType.SCOPE.equals(type)) { _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_COUNT, 1, tags); _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + } else if (query.isQueryOnlyOnScopeAndMetric()) { + _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); } else { _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); @@ -1227,7 +1235,7 @@ public enum Property { /** Replication factor for scopenames */ ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.num.replicas.for.scope.index", "1"), /** Shard count for scopenames */ - ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.shards.count.for.scope.index", "10"), + ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.shards.count.for.scope.index", "6"), /** The no. of records to batch for bulk indexing requests. * https://www.elastic.co/guide/en/elasticsearch/guide/current/indexing-performance.html#_using_and_sizing_bulk_requests */ @@ -1243,7 +1251,7 @@ public enum Property { /** Replication factor for scope and metric names */ ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.num.replicas.for.scope.metric.index", "1"), /** Shard count for scope and metric names */ - ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.shards.count.for.scope.metric.index", "10"), + ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.shards.count.for.scope.metric.index", "6"), /** Name of scope and metric only index */ ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME("service.property.schema.elasticsearch.scope.metric.index.name", "scopemetricnames"), From e292303c936384649d924ed63e45727cdb3eda5a Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Fri, 20 Jul 2018 11:10:19 -0700 Subject: [PATCH 11/27] CR Fixes --- .../service/schema/ElasticSearchSchemaService.java | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index 196c61491..7da781ba6 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -51,7 +51,6 @@ import com.salesforce.dva.argus.service.MonitorService; import com.salesforce.dva.argus.service.MonitorService.Counter; import com.salesforce.dva.argus.service.SchemaService; -import com.salesforce.dva.argus.service.schema.AsyncHbaseSchemaService.Property; import com.salesforce.dva.argus.service.schema.ElasticSearchSchemaService.PutResponse.Item; import com.salesforce.dva.argus.service.schema.MetricSchemaRecordList.HashAlgorithm; import com.salesforce.dva.argus.system.SystemAssert; @@ -675,11 +674,11 @@ private List _analyzedTokens(String query) { } } - private void _upsert(List records, String indexName, String indexType, BloomFilter bloomFilter) { + private void _upsert(List records, String indexName, String docType, BloomFilter bloomFilter) { String requestUrl = new StringBuilder().append("/") .append(indexName) .append("/") - .append(indexType) + .append(docType) .append("/") .append("_bulk") .toString(); @@ -1249,14 +1248,14 @@ public enum Property { ELASTICSEARCH_SCOPE_TYPE_NAME("service.property.schema.elasticsearch.scope.type.name", "scope_type"), /** Replication factor for scope and metric names */ - ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.num.replicas.for.scope.metric.index", "1"), + ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.num.replicas.for.scopeandmetric.index", "1"), /** Shard count for scope and metric names */ - ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.shards.count.for.scope.metric.index", "6"), + ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.shards.count.for.scopeandmetric.index", "6"), /** Name of scope and metric only index */ - ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME("service.property.schema.elasticsearch.scope.metric.index.name", "scopemetricnames"), + ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME("service.property.schema.elasticsearch.scopeandmetric.index.name", "scopemetricnames"), /** Type within scope and metric only index */ - ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME("service.property.schema.elasticsearch.scope.metric.type.name", "scopemetric_type"); + ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME("service.property.schema.elasticsearch.scopeandmetric.type.name", "scopemetric_type"); private final String _name; private final String _defaultValue; From 5fd8c045c56015ebc97da01f8ff00a865e8fe3f2 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Fri, 20 Jul 2018 11:15:55 -0700 Subject: [PATCH 12/27] CR Fixes2 --- .../argus/service/schema/ElasticSearchSchemaService.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index 7da781ba6..f138dc046 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -19,7 +19,11 @@ import java.util.function.Supplier; import com.google.common.hash.BloomFilter; -import com.salesforce.dva.argus.entity.*; +import com.salesforce.dva.argus.entity.KeywordQuery; +import com.salesforce.dva.argus.entity.Metric; +import com.salesforce.dva.argus.entity.MetricSchemaRecord; +import com.salesforce.dva.argus.entity.MetricSchemaRecordQuery; +import com.salesforce.dva.argus.entity.ScopeOnlySchemaRecord; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.HttpStatus; From 9477f13102e1ea88de76d1fed291e7d4d5e0db9a Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Fri, 20 Jul 2018 11:16:33 -0700 Subject: [PATCH 13/27] CR Fixes3 --- .../salesforce/dva/argus/service/CallbackServiceTest.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/CallbackServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/CallbackServiceTest.java index 8a48c2a82..5f9234ad9 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/CallbackServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/CallbackServiceTest.java @@ -4,7 +4,10 @@ import java.util.stream.IntStream; import com.salesforce.dva.argus.AbstractTest; -import com.salesforce.dva.argus.entity.*; +import com.salesforce.dva.argus.entity.Alert; +import com.salesforce.dva.argus.entity.Metric; +import com.salesforce.dva.argus.entity.Notification; +import com.salesforce.dva.argus.entity.Trigger; import com.salesforce.dva.argus.service.alert.DefaultAlertService.NotificationContext; import com.salesforce.dva.argus.service.alert.notifier.CallbackNotifier; import org.junit.Test; From 0bae67ea1fb05a91378bd8ff010ef0bb4f458079 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Fri, 20 Jul 2018 11:40:32 -0700 Subject: [PATCH 14/27] CR Fixes4 --- .../argus/service/schema/AbstractSchemaService.java | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java index b1a71a52f..6d01a8a76 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java @@ -297,16 +297,20 @@ public enum Property { BLOOMFILTER_ERROR_RATE("service.property.schema.bloomfilter.error.rate", "0.00001"), /* - * Estimated Filter Size using https://hur.st/bloomfilter/?n=1000000&p=1.0E-5&m=&k= - * 2.86MiB + * Estimated Filter Size using bloomFilter 1 million entries + * https://hur.st/bloomfilter/?n=1000000&p=1.0E-5&m=&k= 2.86MiB + * Storing in a Set 100K entries with avg length of 15 chars would be 100K * 15 * 2 B = 30B * 100K = 3 MB + * If # of entries is 1 million, then it would be 30 MB resulting in savings in space. */ BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.only.expected.number.insertions", "1000000"), BLOOMFILTER_SCOPE_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.only.error.rate", "0.00001"), /* - * Estimated Filter Size using https://hur.st/bloomfilter/?n=10000000&p=1.0E-5&m=&k= - * 28.56MiB + * Estimated Filter Size using bloomFilter 10 million entries + * https://hur.st/bloomfilter/?n=10000000&p=1.0E-5&m=&k= 28.56MiB + * Storing in a Set 1M entries with avg length of 30 chars would be 1M * 30 * 2 B = 60B * 1M = 60 MB + * If # of entries is 10 million, then it would be 600 MB resulting in savings in space. */ BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.and.metric.only.expected.number.insertions", "10000000"), From cdbc4491543e4fbdd5f812f671c31fd011cd52a8 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Tue, 24 Jul 2018 16:19:30 -0700 Subject: [PATCH 15/27] Address CR Comments --- .../ScopeAndMetricOnlySchemaRecord.java | 112 +++++++ .../service/schema/AbstractSchemaService.java | 10 +- .../schema/AsyncHbaseSchemaService.java | 3 +- .../schema/ElasticSearchSchemaService.java | 126 +++++++- .../ScopeAndMetricOnlySchemaRecordList.java | 129 ++++++++ .../schema/ScopeOnlySchemaRecordList.java | 6 +- .../schema/AbstractSchemaServiceTest.java | 155 ++++++--- .../ElasticSearchSchemaServiceTest.java | 296 ++++++++++++++++-- 8 files changed, 744 insertions(+), 93 deletions(-) create mode 100644 ArgusCore/src/main/java/com/salesforce/dva/argus/entity/ScopeAndMetricOnlySchemaRecord.java create mode 100644 ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeAndMetricOnlySchemaRecordList.java diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/ScopeAndMetricOnlySchemaRecord.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/ScopeAndMetricOnlySchemaRecord.java new file mode 100644 index 000000000..45d882f15 --- /dev/null +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/ScopeAndMetricOnlySchemaRecord.java @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2016, Salesforce.com, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of Salesforce.com nor the names of its contributors may + * be used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +package com.salesforce.dva.argus.entity; + +import com.google.common.base.Objects; + +import java.text.MessageFormat; + +/** + * Represents a search result row for scope and metric name only discovery queries. + * + * @author Naveen Reddy Karri (nkarri@salesforce.com) + */ +public class ScopeAndMetricOnlySchemaRecord { + + private String scope; + private String metric; + + public ScopeAndMetricOnlySchemaRecord(String scope, String metric) { + + setScope(scope); + setMetric(metric); + } + + public String getScope() { + return scope; + } + + public void setScope(String scope) { + this.scope = scope; + } + + public String getMetric() { + return metric; + } + + public void setMetric(String metric) { + this.metric = metric; + } + + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + + result = prime * result + ((scope == null) ? 0 : scope.hashCode()); + result = prime * result + ((metric == null) ? 0 : metric.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + + ScopeAndMetricOnlySchemaRecord other = (ScopeAndMetricOnlySchemaRecord) obj; + + return Objects.equal(scope, other.scope) && Objects.equal(metric, other.metric); + } + + @Override + public String toString() { + return MessageFormat.format("ScopeAndMetricOnlySchemaRecord (Scope = {0} Metric = {1}", scope, metric); + } + + public static String print(ScopeAndMetricOnlySchemaRecord msr) { + + StringBuilder sb = new StringBuilder(msr.getScope()); + sb.append(":"); + sb.append(msr.getMetric()); + + return sb.toString(); + } +} +/* Copyright (c) 2016, Salesforce.com, Inc. All rights reserved. */ diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java index 6d01a8a76..af851a8f1 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java @@ -15,6 +15,7 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -114,7 +115,7 @@ public void put(List metrics) { List metricsToPut = new ArrayList<>(metrics.size()); Set scopesToPut = new HashSet<>(metrics.size()); - Set scopesAndMetricsNamesToPut = new HashSet<>(metrics.size()); + Set> scopesAndMetricsNamesToPut = new HashSet<>(metrics.size()); for(Metric metric : metrics) { // check metric schema bloom filter @@ -155,12 +156,11 @@ public void put(List metrics) { key = constructScopeAndMetricOnlyKey(scopeName, metricName); found = bloomFilterScopeAndMetricOnly.mightContain(key); if(!found) { - scopesAndMetricsNamesToPut.add(new Metric(scopeName, metricName)); + scopesAndMetricsNamesToPut.add(Pair.of(scopeName, metricName)); } } - List scopesAndMetricsToPut = new ArrayList<>(scopesAndMetricsNamesToPut); - implementationSpecificPut(metricsToPut, scopesToPut, scopesAndMetricsToPut); + implementationSpecificPut(metricsToPut, scopesToPut, scopesAndMetricsNamesToPut); } /* @@ -171,7 +171,7 @@ public void put(List metrics) { * @param scopesAndMetricNames The scope and metric names that that will be written to a separate index. */ protected abstract void implementationSpecificPut(List metrics, Set scopeNames, - List scopesAndMetricNames); + Set> scopesAndMetricNames); @Override public void dispose() { diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AsyncHbaseSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AsyncHbaseSchemaService.java index c32b3f518..a843a1c5a 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AsyncHbaseSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AsyncHbaseSchemaService.java @@ -47,6 +47,7 @@ import com.stumbleupon.async.Deferred; import com.stumbleupon.async.TimeoutException; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hbase.util.Bytes; import org.hbase.async.CompareFilter.CompareOp; import org.hbase.async.FilterList; @@ -193,7 +194,7 @@ private String _plusOneNConstructRowKey(MetricSchemaRecord record, String table @Override protected void implementationSpecificPut(List metrics, Set scopeNames, - List scopesAndMetricNames) { + Set> scopesAndMetricNames) { requireNotDisposed(); SystemAssert.requireArgument(metrics != null, "Metric list cannot be null."); diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index f138dc046..6b08391ce 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -19,11 +19,8 @@ import java.util.function.Supplier; import com.google.common.hash.BloomFilter; -import com.salesforce.dva.argus.entity.KeywordQuery; -import com.salesforce.dva.argus.entity.Metric; -import com.salesforce.dva.argus.entity.MetricSchemaRecord; -import com.salesforce.dva.argus.entity.MetricSchemaRecordQuery; -import com.salesforce.dva.argus.entity.ScopeOnlySchemaRecord; +import com.salesforce.dva.argus.entity.*; +import org.apache.commons.lang3.tuple.Pair; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.HttpStatus; @@ -85,6 +82,7 @@ public class ElasticSearchSchemaService extends AbstractSchemaService { private final ObjectMapper _mapper; private final ObjectMapper _scopeOnlyMapper; + private final ObjectMapper _scopeAndMetricOnlyMapper; private Logger _logger = LoggerFactory.getLogger(getClass()); private final MonitorService _monitorService; @@ -105,6 +103,7 @@ public ElasticSearchSchemaService(SystemConfiguration config, MonitorService mon _monitorService = monitorService; _mapper = _createObjectMapper(); _scopeOnlyMapper = _createScopeOnlyObjectMapper(); + _scopeAndMetricOnlyMapper = _createScopeAndMetricOnlyObjectMapper(); SCOPE_INDEX_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getName(), Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getDefaultValue()); @@ -231,7 +230,7 @@ public Properties getServiceProperties() { @Override protected void implementationSpecificPut(List metrics, Set scopeNames, - List scopesAndMetricNames) { + Set> scopesAndMetricNames) { SystemAssert.requireArgument(metrics != null, "Metrics list cannot be null."); _logger.info("{} new metrics need to be indexed on ES.", metrics.size()); @@ -241,7 +240,7 @@ protected void implementationSpecificPut(List metrics, Set scope for(List records : fracturedList) { if(!records.isEmpty()) { - _upsert(records, INDEX_NAME, TYPE_NAME, bloomFilter); + _upsert(records); } } @@ -275,16 +274,16 @@ protected void implementationSpecificPut(List metrics, Set scope _logger.info("{} new scope and metric names need to be indexed on ES.", scopesAndMetricNames.size()); start = System.currentTimeMillis(); - List> fracturedScopesAndMetricsList = _fracture(scopesAndMetricNames); + List> fracturedScopesAndMetricsList = _fractureScopeAndMetrics(scopesAndMetricNames); - for(List records : fracturedScopesAndMetricsList) { + for(List records : fracturedScopesAndMetricsList) { if(!records.isEmpty()) { - _upsert(records, SCOPE_AND_METRIC_INDEX_NAME, SCOPE_AND_METRIC_TYPE_NAME, bloomFilterScopeAndMetricOnly); + _upsertScopeAndMetrics(records); } } count = 0; - for(List records : fracturedScopesAndMetricsList) { + for(List records : fracturedScopesAndMetricsList) { count += records.size(); } @@ -328,6 +327,29 @@ protected List> _fracture(List metrics) { return fracturedList; } + /* Convert the given list of scope and metric names to a list of scope and metric only schema records. + * At the same time, fracture the records list if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. + */ + protected List> _fractureScopeAndMetrics(Set> scopesAndMetricNames) { + List> fracturedList = new ArrayList<>(); + + List records = new ArrayList<>(_bulkIndexingSize); + for(Pair scopeAndMetric : scopesAndMetricNames) { + records.add(new ScopeAndMetricOnlySchemaRecord(scopeAndMetric.getLeft(), scopeAndMetric.getRight())); + + if(records.size() == _bulkIndexingSize) { + fracturedList.add(records); + records = new ArrayList<>(_bulkIndexingSize); + } + } + + if(!records.isEmpty()) { + fracturedList.add(records); + } + + return fracturedList; + } + /* Convert the given list of scopes to a list of scope only schema records. At the same time, fracture the records list * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. */ @@ -678,11 +700,11 @@ private List _analyzedTokens(String query) { } } - private void _upsert(List records, String indexName, String docType, BloomFilter bloomFilter) { + private void _upsert(List records) { String requestUrl = new StringBuilder().append("/") - .append(indexName) + .append(INDEX_NAME) .append("/") - .append(docType) + .append(TYPE_NAME) .append("/") .append("_bulk") .toString(); @@ -722,13 +744,64 @@ private void _upsert(List records, String indexName, String } } //add to bloom filter - _addToBloomFilter(records, bloomFilter); + _addToBloomFilter(records); } catch(IOException e) { throw new SystemException("Failed to parse reponse of put metrics. The response was: " + strResponse, e); } } + private void _upsertScopeAndMetrics(List records) { + String requestUrl = new StringBuilder().append("/") + .append(SCOPE_AND_METRIC_INDEX_NAME) + .append("/") + .append(SCOPE_AND_METRIC_TYPE_NAME) + .append("/") + .append("_bulk") + .toString(); + + String strResponse = ""; + + ScopeAndMetricOnlySchemaRecordList recordList = new ScopeAndMetricOnlySchemaRecordList(records, _idgenHashAlgo); + + try { + String requestBody = _scopeAndMetricOnlyMapper.writeValueAsString(recordList); + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); + strResponse = extractResponse(response); + } catch (IOException e) { + //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? + throw new SystemException(e); + } + + try { + PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); + //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. + if(putResponse.errors) { + List recordsToRemove = new ArrayList<>(); + for(Item item : putResponse.items) { + if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { + _logger.warn("Failed to index scope. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); + recordsToRemove.add(recordList.getRecord(item.create._id)); + } + + if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { + _logger.warn("Scope Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); + recordsToRemove.add(recordList.getRecord(item.index._id)); + } + } + if(recordsToRemove.size() != 0) { + _logger.info("{} records were not written to ES", recordsToRemove.size()); + records.removeAll(recordsToRemove); + } + } + //add to bloom filter + _addToBloomFilterScopeAndMetricOnly(records); + + } catch(IOException e) { + throw new SystemException("Failed to parse reponse of put scope names. The response was: " + strResponse, e); + } + } + private void _upsertScopes(List records) { String requestUrl = new StringBuilder().append("/") .append(SCOPE_INDEX_NAME) @@ -780,7 +853,7 @@ private void _upsertScopes(List records) { } } - protected void _addToBloomFilter(List records, BloomFilter bloomFilter){ + protected void _addToBloomFilter(List records){ _logger.info("Adding {} records into bloom filter.", records.size()); for(MetricSchemaRecord record : records) { String key = constructKey(record.getScope(), record.getMetric(), record.getTagKey(), record.getTagValue(), record.getNamespace()); @@ -788,6 +861,14 @@ protected void _addToBloomFilter(List records, BloomFilter records){ + _logger.info("Adding {} records into scope and metric only bloom filter.", records.size()); + for(ScopeAndMetricOnlySchemaRecord record : records) { + String key = constructScopeAndMetricOnlyKey(record.getScope(), record.getMetric()); + bloomFilterScopeAndMetricOnly.put(key); + } + } + protected void _addToBloomFilterScopeOnly(List records){ _logger.info("Adding {} records into scope only bloom filter.", records.size()); for(ScopeOnlySchemaRecord record : records) { @@ -1039,6 +1120,19 @@ private ObjectMapper _createObjectMapper() { return mapper; } + private ObjectMapper _createScopeAndMetricOnlyObjectMapper() { + ObjectMapper mapper = new ObjectMapper(); + + mapper.setSerializationInclusion(Include.NON_NULL); + SimpleModule module = new SimpleModule(); + module.addSerializer(ScopeAndMetricOnlySchemaRecordList.class, new ScopeAndMetricOnlySchemaRecordList.Serializer()); + module.addDeserializer(ScopeAndMetricOnlySchemaRecordList.class, new ScopeAndMetricOnlySchemaRecordList.Deserializer()); + module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); + mapper.registerModule(module); + + return mapper; + } + private ObjectMapper _createScopeOnlyObjectMapper() { ObjectMapper mapper = new ObjectMapper(); diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeAndMetricOnlySchemaRecordList.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeAndMetricOnlySchemaRecordList.java new file mode 100644 index 000000000..d75d1e2f8 --- /dev/null +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeAndMetricOnlySchemaRecordList.java @@ -0,0 +1,129 @@ +package com.salesforce.dva.argus.service.schema; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import com.salesforce.dva.argus.entity.ScopeAndMetricOnlySchemaRecord; +import org.apache.commons.codec.digest.DigestUtils; + +import com.fasterxml.jackson.annotation.JsonInclude.Include; +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.fasterxml.jackson.databind.node.JsonNodeType; +import com.salesforce.dva.argus.service.SchemaService.RecordType; +import com.salesforce.dva.argus.service.schema.MetricSchemaRecordList.HashAlgorithm; + +import net.openhft.hashing.LongHashFunction; + +/** + * Represents a list of scope and metric names from discovery queries. + * Internally it has a mapping from hash id of scope and metric names to the actual scope and metric names. + * + * @author Naveen Reddy Karri (nkarri@salesforce.com) + */ +public class ScopeAndMetricOnlySchemaRecordList { + + private Map _idToSchemaRecordMap = new HashMap<>(); + private String _scrollID; + + public ScopeAndMetricOnlySchemaRecordList(List records, String scrollID) { + int count = 0; + for(ScopeAndMetricOnlySchemaRecord record : records) { + _idToSchemaRecordMap.put(String.valueOf(count++), record); + } + setScrollID(scrollID); + } + + public ScopeAndMetricOnlySchemaRecordList(List records, HashAlgorithm algorithm) { + for(ScopeAndMetricOnlySchemaRecord record : records) { + String id = null; + String scopeAndMetricName = ScopeAndMetricOnlySchemaRecord.print(record); + if(HashAlgorithm.MD5.equals(algorithm)) { + id = DigestUtils.md5Hex(scopeAndMetricName); + } else { + id = String.valueOf(LongHashFunction.xx().hashChars(scopeAndMetricName)); + } + _idToSchemaRecordMap.put(id, record); + } + } + + public List getRecords() { + return new ArrayList<>(_idToSchemaRecordMap.values()); + } + + public String getScrollID() { + return _scrollID; + } + + public void setScrollID(String scrollID) { + this._scrollID = scrollID; + } + + ScopeAndMetricOnlySchemaRecord getRecord(String id) { + return _idToSchemaRecordMap.get(id); + } + + static class Serializer extends JsonSerializer { + + @Override + public void serialize(ScopeAndMetricOnlySchemaRecordList list, JsonGenerator jgen, SerializerProvider provider) + throws IOException { + + ObjectMapper mapper = new ObjectMapper(); + mapper.setSerializationInclusion(Include.NON_NULL); + + for(Map.Entry entry : list._idToSchemaRecordMap.entrySet()) { + jgen.writeRaw("{ \"index\" : {\"_id\" : \"" + entry.getKey() + "\"}}"); + jgen.writeRaw(System.lineSeparator()); + String fieldsData = mapper.writeValueAsString(entry.getValue()); + String timeStampField = "\"mts\":" + System.currentTimeMillis(); + jgen.writeRaw(fieldsData.substring(0, fieldsData.length()-1) + "," + timeStampField + "}"); + jgen.writeRaw(System.lineSeparator()); + } + } + } + + static class Deserializer extends JsonDeserializer { + + @Override + public ScopeAndMetricOnlySchemaRecordList deserialize(JsonParser jp, DeserializationContext context) + throws IOException { + + String scrollID = null; + List records = Collections.emptyList(); + + JsonNode rootNode = jp.getCodec().readTree(jp); + if(rootNode.has("_scroll_id")) { + scrollID = rootNode.get("_scroll_id").asText(); + } + JsonNode hits = rootNode.get("hits").get("hits"); + + if(JsonNodeType.ARRAY.equals(hits.getNodeType())) { + records = new ArrayList<>(hits.size()); + Iterator iter = hits.elements(); + while(iter.hasNext()) { + JsonNode hit = iter.next(); + JsonNode source = hit.get("_source"); + + JsonNode scopeNode = source.get(RecordType.SCOPE.getName()); + JsonNode metricNode = source.get(RecordType.METRIC.getName()); + + records.add(new ScopeAndMetricOnlySchemaRecord(scopeNode.asText(), metricNode.asText())); + } + } + + return new ScopeAndMetricOnlySchemaRecordList(records, scrollID); + } + } +} \ No newline at end of file diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeOnlySchemaRecordList.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeOnlySchemaRecordList.java index a0f15d45e..d34c61273 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeOnlySchemaRecordList.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeOnlySchemaRecordList.java @@ -55,7 +55,7 @@ public ScopeOnlySchemaRecordList(List records, HashAlgori } else { id = String.valueOf(LongHashFunction.xx().hashChars(scopeOnly)); } - _idToSchemaRecordMap.put(id, new ScopeOnlySchemaRecord(scopeOnly)); + _idToSchemaRecordMap.put(id, record); } } @@ -79,7 +79,7 @@ static class Serializer extends JsonSerializer { @Override public void serialize(ScopeOnlySchemaRecordList list, JsonGenerator jgen, SerializerProvider provider) - throws IOException, JsonProcessingException { + throws IOException { ObjectMapper mapper = new ObjectMapper(); mapper.setSerializationInclusion(Include.NON_NULL); @@ -99,7 +99,7 @@ static class Deserializer extends JsonDeserializer { @Override public ScopeOnlySchemaRecordList deserialize(JsonParser jp, DeserializationContext context) - throws IOException, JsonProcessingException { + throws IOException { String scrollID = null; List records = Collections.emptyList(); diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java index 70eaa4c0f..920bc72a8 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java @@ -1,5 +1,6 @@ package com.salesforce.dva.argus.service.schema; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.lang.reflect.Field; @@ -33,75 +34,159 @@ * */ public class AbstractSchemaServiceTest extends AbstractTest { - + + private int scopesCount = 0; + private int scopeAndMetricsCount = 0; + private int metricsCount = 0; + @Test public void testPutEverythingCached() { List metrics = createRandomMetrics("test-scope", "test-metric", 10); + + metrics.addAll(createRandomMetrics(null, null, 10)); + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - final AtomicInteger count = new AtomicInteger(); - ElasticSearchSchemaService spyService = _initializeSpyService(service, count); - + + ElasticSearchSchemaService spyService = _initializeSpyService(service); + spyService.put(metrics); + + Set scopeNames = new HashSet<>(); + Set> scopeAndMetricNames = new HashSet<>(); + + for(Metric m : metrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } + + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); + // add to bloom filter cache - spyService._addToBloomFilter(spyService._fracture(metrics).get(0), AbstractSchemaService.bloomFilter); - assertTrue(count.get() == metrics.size()); + spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); + spyService._addToBloomFilterScopeAndMetricOnly(spyService._fractureScopeAndMetrics(scopeAndMetricNames).get(0)); + spyService._addToBloomFilterScopeOnly(spyService._fractureScopes(scopeNames).get(0)); + spyService.put(metrics); // count should be same since we are re-reading cached value - assertTrue(count.get() == metrics.size()); + + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); } @Test public void testPutPartialCached() { List metrics = createRandomMetrics("test-scope", "test-metric", 10); - List newMetrics = createRandomMetrics("test-scope", "test-metric1", 5); - Set total = new HashSet<>(metrics); - total.addAll(newMetrics); - + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - final AtomicInteger count = new AtomicInteger(); - ElasticSearchSchemaService spyService = _initializeSpyService(service, count); - + ElasticSearchSchemaService spyService = _initializeSpyService(service); + + spyService.put(metrics); + + Set scopeNames = new HashSet<>(); + Set> scopeAndMetricNames = new HashSet<>(); + + for(Metric m : metrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } + + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); + + // add to bloom filter cache + spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); + spyService._addToBloomFilterScopeAndMetricOnly(spyService._fractureScopeAndMetrics(scopeAndMetricNames).get(0)); + spyService._addToBloomFilterScopeOnly(spyService._fractureScopes(scopeNames).get(0)); + + List newMetrics = createRandomMetrics(null, null, 10); + + // 1st metric already in cache (partial case scenario), and now we call put with both list of metrics + + initCounters(); spyService.put(metrics); - // 1st metric cached - spyService._addToBloomFilter(spyService._fracture(metrics).get(0), AbstractSchemaService.bloomFilter); - assertTrue(count.get() == metrics.size()); - // 1st metric already in cache (partial case scenario), and now 2nd metric will also be added to cache. - // Total number of metrics in cache = metric1.size() and metric2.size() - spyService.put(new ArrayList<>(total)); - spyService._addToBloomFilter(spyService._fracture(new ArrayList<>(total)).get(0), AbstractSchemaService.bloomFilter); - assertTrue(count.get() == total.size()); + spyService.put(newMetrics); + + scopeNames.clear(); + scopeAndMetricNames.clear(); + + for(Metric m : newMetrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } + + assertEquals(metricsCount, newMetrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); } @Test public void testPutNothingCached() { List metrics = createRandomMetrics("test-scope", "test-metric", 10); - List newMetrics = createRandomMetrics("test-scope", "test-metric1", 5); - + + metrics.addAll(createRandomMetrics(null, null, 10)); + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - final AtomicInteger count = new AtomicInteger(); - ElasticSearchSchemaService spyService = _initializeSpyService(service, count); - + ElasticSearchSchemaService spyService = _initializeSpyService(service); + spyService.put(metrics); - assertTrue(count.get() == metrics.size()); - spyService.put(newMetrics); - assertTrue(count.get() == metrics.size() + newMetrics.size()); + + Set scopeNames = new HashSet<>(); + Set> scopeAndMetricNames = new HashSet<>(); + + for(Metric m : metrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } + + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); + + spyService.put(metrics); + + assertEquals(metricsCount, 2 * metrics.size()); + assertEquals(scopeAndMetricsCount, 2 * scopeAndMetricNames.size()); + assertEquals(scopesCount, 2 * scopeNames.size()); } - private ElasticSearchSchemaService _initializeSpyService(ElasticSearchSchemaService service, final AtomicInteger count) { + private ElasticSearchSchemaService _initializeSpyService(ElasticSearchSchemaService service) { ElasticSearchSchemaService spyService = Mockito.spy(service); - + initCounters(); + Mockito.doAnswer(new Answer() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { @SuppressWarnings("unchecked") List metrics = List.class.cast(invocation.getArguments()[0]); - count.addAndGet(metrics.size()); - return null; + + Set scopeNames = Set.class.cast(invocation.getArguments()[1]); + + Set> scopeAndMetricNames = Set.class.cast(invocation.getArguments()[2]); + + scopesCount += scopeNames.size(); + scopeAndMetricsCount += scopeAndMetricNames.size(); + metricsCount += metrics.size(); + + return null; } }).when(spyService).implementationSpecificPut(Mockito.any(), Mockito.any(), Mockito.any()); return spyService; } - + + private void initCounters() { + scopesCount = 0; + scopeAndMetricsCount = 0; + metricsCount = 0; + } + @Test public void getNumHoursUntilNextFlushBloomFilter() { ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java index 1247b3f52..e12cffe90 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java @@ -1,9 +1,14 @@ package com.salesforce.dva.argus.service.schema; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.JsonElement; +import com.google.gson.JsonParser; import com.salesforce.dva.argus.AbstractTest; import com.salesforce.dva.argus.entity.MetricSchemaRecordQuery; import com.salesforce.dva.argus.service.SchemaService; import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; import org.elasticsearch.client.RestClient; import org.junit.Test; import org.mockito.*; @@ -20,37 +25,180 @@ public class ElasticSearchSchemaServiceTest extends AbstractTest { private RestClient restClient; - private String reply = "{\n" + - "\t\"took\": 1,\n" + - "\t\"timed_out\": false,\n" + - "\t\"_shards\": {\n" + - "\t\t\"total\": 6,\n" + - "\t\t\"successful\": 6,\n" + - "\t\t\"failed\": 0\n" + - "\t},\n" + - "\t\"hits\": {\n" + - "\t\t\"total\": 426,\n" + - "\t\t\"max_score\": 0.0,\n" + - "\t\t\"hits\": []\n" + - "\t},\n" + - "\t\"aggregations\": {\n" + - "\t\t\"distinct_values\": {\n" + - "\t\t\t\"doc_count_error_upper_bound\": 0,\n" + - "\t\t\t\"sum_other_doc_count\": 424,\n" + - "\t\t\t\"buckets\": [\n" + - "\t\t\t\t{\n" + - "\t\t\t\t\t\"key\": \"system.name1\\n\",\n" + - "\t\t\t\t\t\"doc_count\": 1\n" + - "\t\t\t\t},\n" + - "\t\t\t\t{\n" + - "\t\t\t\t\t\"key\": \"system.name2\\n\",\n" + - "\t\t\t\t\t\"doc_count\": 1\n" + - "\t\t\t\t}\n" + - "\t\t\t]\n" + - "\t\t}\n" + - "\t}\n" + - "}"; - + private String reply = String.join("\n", + "{", + " \"took\": 1,", + " \"timed_out\": false,", + " \"_shards\": {", + " \"total\": 6,", + " \"successful\": 6,", + " \"failed\": 0", + " },", + " \"hits\": {", + " \"total\": 426,", + " \"max_score\": 0.0,", + " \"hits\": []", + " },", + " \"aggregations\": {", + " \"distinct_values\": {", + " \"doc_count_error_upper_bound\": 0,", + " \"sum_other_doc_count\": 424,", + " \"buckets\": [", + " {", + " \"key\": \"system.name1\\n\",", + " \"doc_count\": 1", + " },", + " {", + " \"key\": \"system.name2\\n\",", + " \"doc_count\": 1", + " }", + " ]", + " }", + " }", + "}"); + + private String scopeQuery = String.join("\n", + "{", + " \"query\": {", + " \"bool\": {", + " \"filter\": [", + " {", + " \"regexp\": {", + " \"scope.raw\": \"system.*\"", + " }", + " }", + " ]", + " }", + " },", + " \"size\": 0,", + " \"aggs\": {", + " \"distinct_values\": {", + " \"terms\": {", + " \"field\": \"scope.raw\",", + " \"order\": {", + " \"_term\": \"asc\"", + " },", + " \"size\": 10000,", + " \"execution_hint\": \"map\"", + " }", + " }", + " }", + "}"); + + private String scopeAndMetricQuery = String.join("\n", + "{", + " \"query\": {", + " \"bool\": {", + " \"filter\": [", + " {", + " \"regexp\": {", + " \"metric.raw\": \"argus.*\"", + " }", + " },", + " {", + " \"regexp\": {", + " \"scope.raw\": \"system\"", + " }", + " }", + " ]", + " }", + " },", + " \"size\": 0,", + " \"aggs\": {", + " \"distinct_values\": {", + " \"terms\": {", + " \"field\": \"metric.raw\",", + " \"order\": {", + " \"_term\": \"asc\"", + " },", + " \"size\": 10000,", + " \"execution_hint\": \"map\"", + " }", + " }", + " }", + "}"); + + private String metricQuery1 = String.join("\n", + "{", + " \"query\": {", + " \"bool\": {", + " \"filter\": [", + " {", + " \"regexp\": {", + " \"metric.raw\": \"argus\"", + " }", + " },", + " {", + " \"regexp\": {", + " \"scope.raw\": \"system\"", + " }", + " },", + " {", + " \"regexp\": {", + " \"tagk.raw\": \"device\"", + " }", + " },", + " {", + " \"regexp\": {", + " \"tagv.raw\": \"abc.*\"", + " }", + " }", + " ]", + " }", + " },", + " \"size\": 0,", + " \"aggs\": {", + " \"distinct_values\": {", + " \"terms\": {", + " \"field\": \"tagv.raw\",", + " \"order\": {", + " \"_term\": \"asc\"", + " },", + " \"size\": 10000,", + " \"execution_hint\": \"map\"", + " }", + " }", + " }", + "}"); + + private String metricQuery2 = String.join("\n", + "{", + " \"query\": {", + " \"bool\": {", + " \"filter\": [", + " {", + " \"regexp\": {", + " \"metric.raw\": \"argus\"", + " }", + " },", + " {", + " \"regexp\": {", + " \"scope.raw\": \"system\"", + " }", + " },", + " {", + " \"regexp\": {", + " \"namespace.raw\": \"common.*\"", + " }", + " }", + " ]", + " }", + " },", + " \"size\": 0,", + " \"aggs\": {", + " \"distinct_values\": {", + " \"terms\": {", + " \"field\": \"namespace.raw\",", + " \"order\": {", + " \"_term\": \"asc\"", + " },", + " \"size\": 10000,", + " \"execution_hint\": \"map\"", + " }", + " }", + " }", + "}"); + @Test public void testGetUniqueUsingScopeSchemaIndex() throws IOException { @@ -76,14 +224,17 @@ public void testGetUniqueUsingScopeSchemaIndex() throws IOException { verify(restClient, times(1)).performRequest(any(), requestUrlCaptor.capture(), any(), queryJsonCaptor.capture()); String requestUrl = requestUrlCaptor.getValue(); + String queryJson = convertToPrettyJson(EntityUtils.toString(queryJsonCaptor.getValue())); + assertEquals(scopeQuery, queryJson); assertEquals("/scopenames/scope_type/_search", requestUrl); assertTrue(queryForScope.isQueryOnlyOnScope()); + assertTrue(queryForScope.isQueryOnlyOnScopeAndMetric()); } @Test - public void testGetUniqueUsingMetricSchemaIndex() throws IOException { + public void testGetUniqueUsingScopeAndMetricSchemaIndex() throws IOException { MetricSchemaRecordQuery queryForMetric = new MetricSchemaRecordQuery.MetricSchemaRecordQueryBuilder().scope("system") .metric("argus*") @@ -93,7 +244,7 @@ public void testGetUniqueUsingMetricSchemaIndex() throws IOException { .limit(2) .build(); - SchemaService.RecordType scopeType = SchemaService.RecordType.SCOPE; + SchemaService.RecordType scopeType = SchemaService.RecordType.METRIC; ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); @@ -107,10 +258,89 @@ public void testGetUniqueUsingMetricSchemaIndex() throws IOException { verify(restClient, times(1)).performRequest(any(), requestUrlCaptor.capture(), any(), queryJsonCaptor.capture()); String requestUrl = requestUrlCaptor.getValue(); + String queryJson = convertToPrettyJson(EntityUtils.toString(queryJsonCaptor.getValue())); + + assertEquals(scopeAndMetricQuery, queryJson); + assertEquals("/scopemetricnames/scopemetric_type/_search", requestUrl); + + assertFalse(queryForMetric.isQueryOnlyOnScope()); + assertTrue(queryForMetric.isQueryOnlyOnScopeAndMetric()); + } + + @Test + public void testGetUniqueUsingMetricSchemaIndex1() throws IOException { + MetricSchemaRecordQuery queryForMetric = new MetricSchemaRecordQuery.MetricSchemaRecordQueryBuilder().scope("system") + .metric("argus") + .tagKey("device") + .tagValue("abc*") + .namespace("*") + .limit(2) + .build(); + + SchemaService.RecordType scopeType = SchemaService.RecordType.TAGV; + + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); + + ElasticSearchSchemaService spyService = _initializeSpyService(service, reply); + + spyService.getUnique(queryForMetric, scopeType); + + ArgumentCaptor requestUrlCaptor = ArgumentCaptor.forClass(String.class); + ArgumentCaptor queryJsonCaptor = ArgumentCaptor.forClass(StringEntity.class); + + verify(restClient, times(1)).performRequest(any(), requestUrlCaptor.capture(), any(), queryJsonCaptor.capture()); + + String requestUrl = requestUrlCaptor.getValue(); + String queryJson = convertToPrettyJson(EntityUtils.toString(queryJsonCaptor.getValue())); + + assertEquals(metricQuery1, queryJson); assertEquals("/metadata_index/metadata_type/_search", requestUrl); assertFalse(queryForMetric.isQueryOnlyOnScope()); + assertFalse(queryForMetric.isQueryOnlyOnScopeAndMetric()); + } + + @Test + public void testGetUniqueUsingMetricSchemaIndex2() throws IOException { + + MetricSchemaRecordQuery queryForMetric = new MetricSchemaRecordQuery.MetricSchemaRecordQueryBuilder().scope("system") + .metric("argus") + .tagKey("*") + .tagValue("*") + .namespace("common*") + .limit(2) + .build(); + + SchemaService.RecordType scopeType = SchemaService.RecordType.NAMESPACE; + + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); + + ElasticSearchSchemaService spyService = _initializeSpyService(service, reply); + + spyService.getUnique(queryForMetric, scopeType); + + ArgumentCaptor requestUrlCaptor = ArgumentCaptor.forClass(String.class); + ArgumentCaptor queryJsonCaptor = ArgumentCaptor.forClass(StringEntity.class); + + verify(restClient, times(1)).performRequest(any(), requestUrlCaptor.capture(), any(), queryJsonCaptor.capture()); + + String requestUrl = requestUrlCaptor.getValue(); + String queryJson = convertToPrettyJson(EntityUtils.toString(queryJsonCaptor.getValue())); + + assertEquals(metricQuery2, queryJson); + assertEquals("/metadata_index/metadata_type/_search", requestUrl); + + assertFalse(queryForMetric.isQueryOnlyOnScope()); + assertFalse(queryForMetric.isQueryOnlyOnScopeAndMetric()); + } + + private String convertToPrettyJson(String jsonString) { + JsonParser parser = new JsonParser(); + Gson gson = new GsonBuilder().setPrettyPrinting().create(); + + JsonElement el = parser.parse(jsonString); + return gson.toJson(el); // done } private ElasticSearchSchemaService _initializeSpyService(ElasticSearchSchemaService service, String reply) { From 942124854121be34810a8319a8f423138cde6100 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Tue, 24 Jul 2018 16:25:59 -0700 Subject: [PATCH 16/27] Javadoc fixes --- .../dva/argus/service/schema/ElasticSearchSchemaService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index 6b08391ce..3f1ab2272 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -1059,7 +1059,7 @@ protected void setRestClient(RestClient restClient) this._esRestClient = restClient; } - /** Helper to process the response. + /** Helper to process the response. * Throws a SystemException when the http status code is outsdie of the range 200 - 300. * @param response ES response * @return Stringified response From aaa82a4766f9e260bc5690f78eaa7b81a9a091ad Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Tue, 24 Jul 2018 17:01:21 -0700 Subject: [PATCH 17/27] CR Fixes --- .../service/schema/AbstractSchemaService.java | 750 ++--- .../schema/ElasticSearchSchemaService.java | 2889 +++++++++-------- .../schema/AbstractSchemaServiceTest.java | 266 +- .../ElasticSearchSchemaServiceTest.java | 2 +- 4 files changed, 1956 insertions(+), 1951 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java index af851a8f1..8f78303af 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java @@ -37,379 +37,379 @@ * @author Dilip Devaraj (ddevaraj@salesforce.com) */ public abstract class AbstractSchemaService extends DefaultService implements SchemaService { - private static final long POLL_INTERVAL_MS = 10 * 60 * 1000L; - private static final int DAY_IN_SECONDS = 24 * 60 * 60; - private static final int HOUR_IN_SECONDS = 60 * 60; - - /* Have three separate bloom filters one for metrics schema, one only for scope names schema and one only for scope name and metric name schema. - * Since scopes will continue to repeat more often on subsequent kafka batch reads, we can easily check this from the bloom filter for scopes only. - * Hence we can avoid the extra call to populate scopenames index on ES in subsequent Kafka reads. - * The same logic applies to scope name and metric name schema. - */ - protected static BloomFilter bloomFilter; - protected static BloomFilter bloomFilterScopeOnly; - protected static BloomFilter bloomFilterScopeAndMetricOnly; - private Random rand = new Random(); - private int randomNumber = rand.nextInt(); - private int bloomFilterExpectedNumberInsertions; - private double bloomFilterErrorRate; - private int bloomFilterScopeOnlyExpectedNumberInsertions; - private double bloomFilterScopeOnlyErrorRate; - private int bloomFilterScopeAndMetricOnlyExpectedNumberInsertions; - private double bloomFilterScopeAndMetricOnlyErrorRate; - private final Logger _logger = LoggerFactory.getLogger(getClass()); - private final Thread _bloomFilterMonitorThread; - protected final boolean _syncPut; - private int bloomFilterFlushHourToStartAt; - private ScheduledExecutorService scheduledExecutorService; - - protected AbstractSchemaService(SystemConfiguration config) { - super(config); - - bloomFilterExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS.getName(), - Property.BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); - bloomFilterErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_ERROR_RATE.getName(), - Property.BLOOMFILTER_ERROR_RATE.getDefaultValue())); - - bloomFilterScopeOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), - Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); - bloomFilterScopeOnlyErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_ERROR_RATE.getName(), - Property.BLOOMFILTER_SCOPE_ONLY_ERROR_RATE.getDefaultValue())); - - bloomFilterScopeAndMetricOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), - Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); - bloomFilterScopeAndMetricOnlyErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE.getName(), - Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE.getDefaultValue())); - - bloomFilter = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterExpectedNumberInsertions , bloomFilterErrorRate); - - bloomFilterScopeOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterScopeOnlyExpectedNumberInsertions , bloomFilterScopeOnlyErrorRate); - bloomFilterScopeAndMetricOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), - bloomFilterScopeAndMetricOnlyExpectedNumberInsertions , bloomFilterScopeAndMetricOnlyErrorRate); - - _syncPut = Boolean.parseBoolean( - config.getValue(Property.SYNC_PUT.getName(), Property.SYNC_PUT.getDefaultValue())); - - _bloomFilterMonitorThread = new Thread(new BloomFilterMonitorThread(), "bloom-filter-monitor"); - _bloomFilterMonitorThread.start(); - - bloomFilterFlushHourToStartAt = Integer.parseInt(config.getValue(Property.BLOOM_FILTER_FLUSH_HOUR_TO_START_AT.getName(), - Property.BLOOM_FILTER_FLUSH_HOUR_TO_START_AT.getDefaultValue())); - createScheduledExecutorService(bloomFilterFlushHourToStartAt); - } - - @Override - public void put(Metric metric) { - requireNotDisposed(); - SystemAssert.requireArgument(metric != null, "Metric cannot be null."); - put(Arrays.asList(metric)); - } - - @Override - public void put(List metrics) { - requireNotDisposed(); - SystemAssert.requireArgument(metrics != null, "Metric list cannot be null."); - - // Create a list of metricsToPut that do not exist on the BLOOMFILTER and then call implementation - // specific put with only those subset of metricsToPut. - List metricsToPut = new ArrayList<>(metrics.size()); - Set scopesToPut = new HashSet<>(metrics.size()); - - Set> scopesAndMetricsNamesToPut = new HashSet<>(metrics.size()); - - for(Metric metric : metrics) { - // check metric schema bloom filter - if(metric.getTags().isEmpty()) { - // if metric does not have tags - String key = constructKey(metric, null); - boolean found = bloomFilter.mightContain(key); - if(!found) { - metricsToPut.add(metric); - } - } else { - // if metric has tags - boolean newTags = false; - for(Entry tagEntry : metric.getTags().entrySet()) { - String key = constructKey(metric, tagEntry); - boolean found = bloomFilter.mightContain(key); - if(!found) { - newTags = true; - } - } - - if(newTags) { - metricsToPut.add(metric); - } - } - - String scopeName = metric.getScope(); - String metricName = metric.getMetric(); - - // Check scope only bloom filter - String key = constructScopeOnlyKey(scopeName); - boolean found = bloomFilterScopeOnly.mightContain(key); - if(!found) { - scopesToPut.add(scopeName); - } - - // Check scope and metric only bloom filter - key = constructScopeAndMetricOnlyKey(scopeName, metricName); - found = bloomFilterScopeAndMetricOnly.mightContain(key); - if(!found) { - scopesAndMetricsNamesToPut.add(Pair.of(scopeName, metricName)); - } - } - - implementationSpecificPut(metricsToPut, scopesToPut, scopesAndMetricsNamesToPut); - } - - /* - * Calls the implementation specific write for indexing the records - * - * @param metrics The metrics metadata that will be written to a separate index. - * @param scopeNames The scope names that that will be written to a separate index. - * @param scopesAndMetricNames The scope and metric names that that will be written to a separate index. - */ - protected abstract void implementationSpecificPut(List metrics, Set scopeNames, - Set> scopesAndMetricNames); - - @Override - public void dispose() { - requireNotDisposed(); - if (_bloomFilterMonitorThread != null && _bloomFilterMonitorThread.isAlive()) { - _logger.info("Stopping bloom filter monitor thread."); - _bloomFilterMonitorThread.interrupt(); - _logger.info("Bloom filter monitor thread interrupted."); - try { - _logger.info("Waiting for bloom filter monitor thread to terminate."); - _bloomFilterMonitorThread.join(); - } catch (InterruptedException ex) { - _logger.warn("Bloom filter monitor thread was interrupted while shutting down."); - } - _logger.info("System monitoring stopped."); - } else { - _logger.info("Requested shutdown of bloom filter monitor thread aborted, as it is not yet running."); - } - shutdownScheduledExecutorService(); - } - - @Override - public abstract Properties getServiceProperties(); - - @Override - public abstract List get(MetricSchemaRecordQuery query); - - @Override - public abstract List getUnique(MetricSchemaRecordQuery query, RecordType type); - - @Override - public abstract List keywordSearch(KeywordQuery query); - - protected String constructKey(Metric metric, Entry tagEntry) { - StringBuilder sb = new StringBuilder(metric.getScope()); - sb.append('\0').append(metric.getMetric()); - - if(metric.getNamespace() != null) { - sb.append('\0').append(metric.getNamespace()); - } - - if(tagEntry != null) { - sb.append('\0').append(tagEntry.getKey()).append('\0').append(tagEntry.getValue()); - } - - // Add randomness for each instance of bloom filter running on different - // schema clients to reduce probability of false positives that metric schemas are not written to ES - sb.append('\0').append(randomNumber); - - return sb.toString(); - } - - protected String constructKey(String scope, String metric, String tagk, String tagv, String namespace) { - - StringBuilder sb = new StringBuilder(scope); - - if(!StringUtils.isEmpty(metric)) { - sb.append('\0').append(metric); - } - - if(!StringUtils.isEmpty(namespace)) { - sb.append('\0').append(namespace); - } - - if(!StringUtils.isEmpty(tagk)) { - sb.append('\0').append(tagk); - } - - if(!StringUtils.isEmpty(tagv)) { - sb.append('\0').append(tagv); - } - - // Add randomness for each instance of bloom filter running on different - // schema clients to reduce probability of false positives that metric schemas are not written to ES - sb.append('\0').append(randomNumber); - - return sb.toString(); - } - - protected String constructScopeOnlyKey(String scope) { - - return constructKey(scope, null, null, null, null); - } - - protected String constructScopeAndMetricOnlyKey(String scope, String metric) { - - return constructKey(scope, metric, null, null, null); - } - - private void createScheduledExecutorService(int targetHourToStartAt){ - scheduledExecutorService = Executors.newScheduledThreadPool(1); - int initialDelayInSeconds = getNumHoursUntilTargetHour(targetHourToStartAt) * HOUR_IN_SECONDS; - BloomFilterFlushThread bloomFilterFlushThread = new BloomFilterFlushThread(); - scheduledExecutorService.scheduleAtFixedRate(bloomFilterFlushThread, initialDelayInSeconds, DAY_IN_SECONDS, TimeUnit.SECONDS); - } - - private void shutdownScheduledExecutorService(){ - _logger.info("Shutting down scheduled bloom filter flush executor service"); - scheduledExecutorService.shutdown(); - try { - scheduledExecutorService.awaitTermination(10, TimeUnit.SECONDS); - } catch (InterruptedException ex) { - _logger.warn("Shutdown of executor service was interrupted."); - Thread.currentThread().interrupt(); - } - } - - protected int getNumHoursUntilTargetHour(int targetHour){ - _logger.info("Initialized bloom filter flushing out, at {} hour of day", targetHour); - Calendar calendar = Calendar.getInstance(); - int hour = calendar.get(Calendar.HOUR_OF_DAY); - return hour < targetHour ? (targetHour - hour) : (targetHour + 24 - hour); - } - - /** - * The set of implementation specific configuration properties. - * - * @author Bhinav Sura (bhinav.sura@salesforce.com) - */ - public enum Property { - SYNC_PUT("service.property.schema.sync.put", "false"), - BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.expected.number.insertions", "40"), - BLOOMFILTER_ERROR_RATE("service.property.schema.bloomfilter.error.rate", "0.00001"), - - /* - * Estimated Filter Size using bloomFilter 1 million entries - * https://hur.st/bloomfilter/?n=1000000&p=1.0E-5&m=&k= 2.86MiB - * Storing in a Set 100K entries with avg length of 15 chars would be 100K * 15 * 2 B = 30B * 100K = 3 MB - * If # of entries is 1 million, then it would be 30 MB resulting in savings in space. - */ - - BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.only.expected.number.insertions", "1000000"), - BLOOMFILTER_SCOPE_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.only.error.rate", "0.00001"), - - /* - * Estimated Filter Size using bloomFilter 10 million entries - * https://hur.st/bloomfilter/?n=10000000&p=1.0E-5&m=&k= 28.56MiB - * Storing in a Set 1M entries with avg length of 30 chars would be 1M * 30 * 2 B = 60B * 1M = 60 MB - * If # of entries is 10 million, then it would be 600 MB resulting in savings in space. - */ - - BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.and.metric.only.expected.number.insertions", "10000000"), - BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.and.metric.only.error.rate", "0.00001"), - - /* - * Have a different configured flush start hour for different machines to prevent thundering herd problem. - */ - BLOOM_FILTER_FLUSH_HOUR_TO_START_AT("service.property.schema.bloomfilter.flush.hour.to.start.at","2"); - - private final String _name; - private final String _defaultValue; - - private Property(String name, String defaultValue) { - _name = name; - _defaultValue = defaultValue; - } - - /** - * Returns the property name. - * - * @return The property name. - */ - public String getName() { - return _name; - } - - /** - * Returns the default value for the property. - * - * @return The default value. - */ - public String getDefaultValue() { - return _defaultValue; - } - } - - - //~ Inner Classes ******************************************************************************************************************************** - - /** - * Bloom Filter monitoring thread. - * - * @author Dilip Devaraj (ddevaraj@salesforce.com) - */ - private class BloomFilterMonitorThread implements Runnable { - @Override - public void run() { - _logger.info("Initialized random number for bloom filter key = {}", randomNumber); - while (!Thread.currentThread().isInterrupted()) { - _sleepForPollPeriod(); - if (!Thread.currentThread().isInterrupted()) { - try { - _checkBloomFilterUsage(); - } catch (Exception ex) { - _logger.warn("Exception occurred while checking bloom filter usage.", ex); - } - } - } - } - - private void _checkBloomFilterUsage() { - _logger.info("Metrics Bloom approx no. elements = {}", bloomFilter.approximateElementCount()); - _logger.info("Metrics Bloom expected error rate = {}", bloomFilter.expectedFpp()); - _logger.info("Scope only Bloom approx no. elements = {}", bloomFilterScopeOnly.approximateElementCount()); - _logger.info("Scope only Bloom expected error rate = {}", bloomFilterScopeOnly.expectedFpp()); - _logger.info("Scope and metric only Bloom approx no. elements = {}", bloomFilterScopeAndMetricOnly.approximateElementCount()); - _logger.info("Scope and metric only Bloom expected error rate = {}", bloomFilterScopeAndMetricOnly.expectedFpp()); - } - - private void _sleepForPollPeriod() { - try { - _logger.info("Sleeping for {}s before checking bloom filter statistics.", POLL_INTERVAL_MS / 1000); - Thread.sleep(POLL_INTERVAL_MS); - } catch (InterruptedException ex) { - _logger.warn("AbstractSchemaService memory monitor thread was interrupted."); - Thread.currentThread().interrupt(); - } - } - } - - private class BloomFilterFlushThread implements Runnable { - @Override - public void run() { - try{ - _flushBloomFilter(); - } catch (Exception ex) { - _logger.warn("Exception occurred while flushing bloom filter.", ex); - } - } - - private void _flushBloomFilter() { - _logger.info("Flushing out bloom filter entries"); - bloomFilter = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterExpectedNumberInsertions , bloomFilterErrorRate); - bloomFilterScopeOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterScopeOnlyExpectedNumberInsertions , bloomFilterScopeOnlyErrorRate); - bloomFilterScopeAndMetricOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), - bloomFilterScopeAndMetricOnlyExpectedNumberInsertions , bloomFilterScopeAndMetricOnlyErrorRate); - /* Don't need explicit synchronization to prevent slowness majority of the time*/ - randomNumber = rand.nextInt(); - } - } + private static final long POLL_INTERVAL_MS = 10 * 60 * 1000L; + private static final int DAY_IN_SECONDS = 24 * 60 * 60; + private static final int HOUR_IN_SECONDS = 60 * 60; + + /* Have three separate bloom filters one for metrics schema, one only for scope names schema and one only for scope name and metric name schema. + * Since scopes will continue to repeat more often on subsequent kafka batch reads, we can easily check this from the bloom filter for scopes only. + * Hence we can avoid the extra call to populate scopenames index on ES in subsequent Kafka reads. + * The same logic applies to scope name and metric name schema. + */ + protected static BloomFilter bloomFilter; + protected static BloomFilter bloomFilterScopeOnly; + protected static BloomFilter bloomFilterScopeAndMetricOnly; + private Random rand = new Random(); + private int randomNumber = rand.nextInt(); + private int bloomFilterExpectedNumberInsertions; + private double bloomFilterErrorRate; + private int bloomFilterScopeOnlyExpectedNumberInsertions; + private double bloomFilterScopeOnlyErrorRate; + private int bloomFilterScopeAndMetricOnlyExpectedNumberInsertions; + private double bloomFilterScopeAndMetricOnlyErrorRate; + private final Logger _logger = LoggerFactory.getLogger(getClass()); + private final Thread _bloomFilterMonitorThread; + protected final boolean _syncPut; + private int bloomFilterFlushHourToStartAt; + private ScheduledExecutorService scheduledExecutorService; + + protected AbstractSchemaService(SystemConfiguration config) { + super(config); + + bloomFilterExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS.getName(), + Property.BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); + bloomFilterErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_ERROR_RATE.getName(), + Property.BLOOMFILTER_ERROR_RATE.getDefaultValue())); + + bloomFilterScopeOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), + Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); + bloomFilterScopeOnlyErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_ERROR_RATE.getName(), + Property.BLOOMFILTER_SCOPE_ONLY_ERROR_RATE.getDefaultValue())); + + bloomFilterScopeAndMetricOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), + Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); + bloomFilterScopeAndMetricOnlyErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE.getName(), + Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE.getDefaultValue())); + + bloomFilter = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterExpectedNumberInsertions , bloomFilterErrorRate); + + bloomFilterScopeOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterScopeOnlyExpectedNumberInsertions , bloomFilterScopeOnlyErrorRate); + bloomFilterScopeAndMetricOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), + bloomFilterScopeAndMetricOnlyExpectedNumberInsertions , bloomFilterScopeAndMetricOnlyErrorRate); + + _syncPut = Boolean.parseBoolean( + config.getValue(Property.SYNC_PUT.getName(), Property.SYNC_PUT.getDefaultValue())); + + _bloomFilterMonitorThread = new Thread(new BloomFilterMonitorThread(), "bloom-filter-monitor"); + _bloomFilterMonitorThread.start(); + + bloomFilterFlushHourToStartAt = Integer.parseInt(config.getValue(Property.BLOOM_FILTER_FLUSH_HOUR_TO_START_AT.getName(), + Property.BLOOM_FILTER_FLUSH_HOUR_TO_START_AT.getDefaultValue())); + createScheduledExecutorService(bloomFilterFlushHourToStartAt); + } + + @Override + public void put(Metric metric) { + requireNotDisposed(); + SystemAssert.requireArgument(metric != null, "Metric cannot be null."); + put(Arrays.asList(metric)); + } + + @Override + public void put(List metrics) { + requireNotDisposed(); + SystemAssert.requireArgument(metrics != null, "Metric list cannot be null."); + + // Create a list of metricsToPut that do not exist on the BLOOMFILTER and then call implementation + // specific put with only those subset of metricsToPut. + List metricsToPut = new ArrayList<>(metrics.size()); + Set scopesToPut = new HashSet<>(metrics.size()); + + Set> scopesAndMetricsNamesToPut = new HashSet<>(metrics.size()); + + for(Metric metric : metrics) { + // check metric schema bloom filter + if(metric.getTags().isEmpty()) { + // if metric does not have tags + String key = constructKey(metric, null); + boolean found = bloomFilter.mightContain(key); + if(!found) { + metricsToPut.add(metric); + } + } else { + // if metric has tags + boolean newTags = false; + for(Entry tagEntry : metric.getTags().entrySet()) { + String key = constructKey(metric, tagEntry); + boolean found = bloomFilter.mightContain(key); + if(!found) { + newTags = true; + } + } + + if(newTags) { + metricsToPut.add(metric); + } + } + + String scopeName = metric.getScope(); + String metricName = metric.getMetric(); + + // Check scope only bloom filter + String key = constructScopeOnlyKey(scopeName); + boolean found = bloomFilterScopeOnly.mightContain(key); + if(!found) { + scopesToPut.add(scopeName); + } + + // Check scope and metric only bloom filter + key = constructScopeAndMetricOnlyKey(scopeName, metricName); + found = bloomFilterScopeAndMetricOnly.mightContain(key); + if(!found) { + scopesAndMetricsNamesToPut.add(Pair.of(scopeName, metricName)); + } + } + + implementationSpecificPut(metricsToPut, scopesToPut, scopesAndMetricsNamesToPut); + } + + /* + * Calls the implementation specific write for indexing the records + * + * @param metrics The metrics metadata that will be written to a separate index. + * @param scopeNames The scope names that will be written to a separate index. + * @param scopesAndMetricNames The scope and metric names that will be written to a separate index. + */ + protected abstract void implementationSpecificPut(List metrics, Set scopeNames, + Set> scopesAndMetricNames); + + @Override + public void dispose() { + requireNotDisposed(); + if (_bloomFilterMonitorThread != null && _bloomFilterMonitorThread.isAlive()) { + _logger.info("Stopping bloom filter monitor thread."); + _bloomFilterMonitorThread.interrupt(); + _logger.info("Bloom filter monitor thread interrupted."); + try { + _logger.info("Waiting for bloom filter monitor thread to terminate."); + _bloomFilterMonitorThread.join(); + } catch (InterruptedException ex) { + _logger.warn("Bloom filter monitor thread was interrupted while shutting down."); + } + _logger.info("System monitoring stopped."); + } else { + _logger.info("Requested shutdown of bloom filter monitor thread aborted, as it is not yet running."); + } + shutdownScheduledExecutorService(); + } + + @Override + public abstract Properties getServiceProperties(); + + @Override + public abstract List get(MetricSchemaRecordQuery query); + + @Override + public abstract List getUnique(MetricSchemaRecordQuery query, RecordType type); + + @Override + public abstract List keywordSearch(KeywordQuery query); + + protected String constructKey(Metric metric, Entry tagEntry) { + StringBuilder sb = new StringBuilder(metric.getScope()); + sb.append('\0').append(metric.getMetric()); + + if(metric.getNamespace() != null) { + sb.append('\0').append(metric.getNamespace()); + } + + if(tagEntry != null) { + sb.append('\0').append(tagEntry.getKey()).append('\0').append(tagEntry.getValue()); + } + + // Add randomness for each instance of bloom filter running on different + // schema clients to reduce probability of false positives that metric schemas are not written to ES + sb.append('\0').append(randomNumber); + + return sb.toString(); + } + + protected String constructKey(String scope, String metric, String tagk, String tagv, String namespace) { + + StringBuilder sb = new StringBuilder(scope); + + if(!StringUtils.isEmpty(metric)) { + sb.append('\0').append(metric); + } + + if(!StringUtils.isEmpty(namespace)) { + sb.append('\0').append(namespace); + } + + if(!StringUtils.isEmpty(tagk)) { + sb.append('\0').append(tagk); + } + + if(!StringUtils.isEmpty(tagv)) { + sb.append('\0').append(tagv); + } + + // Add randomness for each instance of bloom filter running on different + // schema clients to reduce probability of false positives that metric schemas are not written to ES + sb.append('\0').append(randomNumber); + + return sb.toString(); + } + + protected String constructScopeOnlyKey(String scope) { + + return constructKey(scope, null, null, null, null); + } + + protected String constructScopeAndMetricOnlyKey(String scope, String metric) { + + return constructKey(scope, metric, null, null, null); + } + + private void createScheduledExecutorService(int targetHourToStartAt){ + scheduledExecutorService = Executors.newScheduledThreadPool(1); + int initialDelayInSeconds = getNumHoursUntilTargetHour(targetHourToStartAt) * HOUR_IN_SECONDS; + BloomFilterFlushThread bloomFilterFlushThread = new BloomFilterFlushThread(); + scheduledExecutorService.scheduleAtFixedRate(bloomFilterFlushThread, initialDelayInSeconds, DAY_IN_SECONDS, TimeUnit.SECONDS); + } + + private void shutdownScheduledExecutorService(){ + _logger.info("Shutting down scheduled bloom filter flush executor service"); + scheduledExecutorService.shutdown(); + try { + scheduledExecutorService.awaitTermination(10, TimeUnit.SECONDS); + } catch (InterruptedException ex) { + _logger.warn("Shutdown of executor service was interrupted."); + Thread.currentThread().interrupt(); + } + } + + protected int getNumHoursUntilTargetHour(int targetHour){ + _logger.info("Initialized bloom filter flushing out, at {} hour of day", targetHour); + Calendar calendar = Calendar.getInstance(); + int hour = calendar.get(Calendar.HOUR_OF_DAY); + return hour < targetHour ? (targetHour - hour) : (targetHour + 24 - hour); + } + + /** + * The set of implementation specific configuration properties. + * + * @author Bhinav Sura (bhinav.sura@salesforce.com) + */ + public enum Property { + SYNC_PUT("service.property.schema.sync.put", "false"), + BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.expected.number.insertions", "40"), + BLOOMFILTER_ERROR_RATE("service.property.schema.bloomfilter.error.rate", "0.00001"), + + /* + * Estimated Filter Size using bloomFilter 1 million entries + * https://hur.st/bloomfilter/?n=1000000&p=1.0E-5&m=&k= 2.86MiB + * Storing in a Set 100K entries with avg length of 15 chars would be 100K * 15 * 2 B = 30B * 100K = 3 MB + * If # of entries is 1 million, then it would be 30 MB resulting in savings in space. + */ + + BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.only.expected.number.insertions", "1000000"), + BLOOMFILTER_SCOPE_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.only.error.rate", "0.00001"), + + /* + * Estimated Filter Size using bloomFilter 10 million entries + * https://hur.st/bloomfilter/?n=10000000&p=1.0E-5&m=&k= 28.56MiB + * Storing in a Set 1M entries with avg length of 30 chars would be 1M * 30 * 2 B = 60B * 1M = 60 MB + * If # of entries is 10 million, then it would be 600 MB resulting in savings in space. + */ + + BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.and.metric.only.expected.number.insertions", "10000000"), + BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.and.metric.only.error.rate", "0.00001"), + + /* + * Have a different configured flush start hour for different machines to prevent thundering herd problem. + */ + BLOOM_FILTER_FLUSH_HOUR_TO_START_AT("service.property.schema.bloomfilter.flush.hour.to.start.at","2"); + + private final String _name; + private final String _defaultValue; + + private Property(String name, String defaultValue) { + _name = name; + _defaultValue = defaultValue; + } + + /** + * Returns the property name. + * + * @return The property name. + */ + public String getName() { + return _name; + } + + /** + * Returns the default value for the property. + * + * @return The default value. + */ + public String getDefaultValue() { + return _defaultValue; + } + } + + + //~ Inner Classes ******************************************************************************************************************************** + + /** + * Bloom Filter monitoring thread. + * + * @author Dilip Devaraj (ddevaraj@salesforce.com) + */ + private class BloomFilterMonitorThread implements Runnable { + @Override + public void run() { + _logger.info("Initialized random number for bloom filter key = {}", randomNumber); + while (!Thread.currentThread().isInterrupted()) { + _sleepForPollPeriod(); + if (!Thread.currentThread().isInterrupted()) { + try { + _checkBloomFilterUsage(); + } catch (Exception ex) { + _logger.warn("Exception occurred while checking bloom filter usage.", ex); + } + } + } + } + + private void _checkBloomFilterUsage() { + _logger.info("Metrics Bloom approx no. elements = {}", bloomFilter.approximateElementCount()); + _logger.info("Metrics Bloom expected error rate = {}", bloomFilter.expectedFpp()); + _logger.info("Scope only Bloom approx no. elements = {}", bloomFilterScopeOnly.approximateElementCount()); + _logger.info("Scope only Bloom expected error rate = {}", bloomFilterScopeOnly.expectedFpp()); + _logger.info("Scope and metric only Bloom approx no. elements = {}", bloomFilterScopeAndMetricOnly.approximateElementCount()); + _logger.info("Scope and metric only Bloom expected error rate = {}", bloomFilterScopeAndMetricOnly.expectedFpp()); + } + + private void _sleepForPollPeriod() { + try { + _logger.info("Sleeping for {}s before checking bloom filter statistics.", POLL_INTERVAL_MS / 1000); + Thread.sleep(POLL_INTERVAL_MS); + } catch (InterruptedException ex) { + _logger.warn("AbstractSchemaService memory monitor thread was interrupted."); + Thread.currentThread().interrupt(); + } + } + } + + private class BloomFilterFlushThread implements Runnable { + @Override + public void run() { + try{ + _flushBloomFilter(); + } catch (Exception ex) { + _logger.warn("Exception occurred while flushing bloom filter.", ex); + } + } + + private void _flushBloomFilter() { + _logger.info("Flushing out bloom filter entries"); + bloomFilter = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterExpectedNumberInsertions , bloomFilterErrorRate); + bloomFilterScopeOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterScopeOnlyExpectedNumberInsertions , bloomFilterScopeOnlyErrorRate); + bloomFilterScopeAndMetricOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), + bloomFilterScopeAndMetricOnlyExpectedNumberInsertions , bloomFilterScopeAndMetricOnlyErrorRate); + /* Don't need explicit synchronization to prevent slowness majority of the time*/ + randomNumber = rand.nextInt(); + } + } } diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index 3f1ab2272..d7841f3d8 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -19,7 +19,12 @@ import java.util.function.Supplier; import com.google.common.hash.BloomFilter; -import com.salesforce.dva.argus.entity.*; +import com.salesforce.dva.argus.entity.KeywordQuery; +import com.salesforce.dva.argus.entity.Metric; +import com.salesforce.dva.argus.entity.MetricSchemaRecord; +import com.salesforce.dva.argus.entity.MetricSchemaRecordQuery; +import com.salesforce.dva.argus.entity.ScopeAndMetricOnlySchemaRecord; +import com.salesforce.dva.argus.entity.ScopeOnlySchemaRecord; import org.apache.commons.lang3.tuple.Pair; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -66,1451 +71,1451 @@ @Singleton public class ElasticSearchSchemaService extends AbstractSchemaService { - private static String SCOPE_INDEX_NAME; - private static String SCOPE_TYPE_NAME; - - private static String SCOPE_AND_METRIC_INDEX_NAME; - private static String SCOPE_AND_METRIC_TYPE_NAME; - - private static final String INDEX_NAME = "metadata_index"; - private static final String TYPE_NAME = "metadata_type"; - private static final String KEEP_SCROLL_CONTEXT_OPEN_FOR = "1m"; - private static final int INDEX_MAX_RESULT_WINDOW = 10000; - private static final int MAX_RETRY_TIMEOUT = 300 * 1000; - private static final String FIELD_TYPE_TEXT = "text"; - private static final String FIELD_TYPE_DATE ="date"; - - private final ObjectMapper _mapper; - private final ObjectMapper _scopeOnlyMapper; - private final ObjectMapper _scopeAndMetricOnlyMapper; - - private Logger _logger = LoggerFactory.getLogger(getClass()); - private final MonitorService _monitorService; - private RestClient _esRestClient; - private final int _replicationFactor; - private final int _numShards; - private final int _replicationFactorForScopeIndex; - private final int _numShardsForScopeIndex; - private final int _replicationFactorForScopeAndMetricIndex; - private final int _numShardsForScopeAndMetricIndex; - private final int _bulkIndexingSize; - private HashAlgorithm _idgenHashAlgo; - - @Inject - public ElasticSearchSchemaService(SystemConfiguration config, MonitorService monitorService) { - super(config); - - _monitorService = monitorService; - _mapper = _createObjectMapper(); - _scopeOnlyMapper = _createScopeOnlyObjectMapper(); - _scopeAndMetricOnlyMapper = _createScopeAndMetricOnlyObjectMapper(); - - SCOPE_INDEX_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getName(), - Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getDefaultValue()); - SCOPE_TYPE_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_TYPE_NAME.getName(), - Property.ELASTICSEARCH_SCOPE_TYPE_NAME.getDefaultValue()); - - SCOPE_AND_METRIC_INDEX_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME.getName(), - Property.ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME.getDefaultValue()); - SCOPE_AND_METRIC_TYPE_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME.getName(), - Property.ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME.getDefaultValue()); - - String algorithm = config.getValue(Property.ELASTICSEARCH_IDGEN_HASH_ALGO.getName(), Property.ELASTICSEARCH_IDGEN_HASH_ALGO.getDefaultValue()); - try { - _idgenHashAlgo = HashAlgorithm.fromString(algorithm); - } catch(IllegalArgumentException e) { - _logger.warn("{} is not supported by this service. Valid values are: {}.", algorithm, Arrays.asList(HashAlgorithm.values())); - _idgenHashAlgo = HashAlgorithm.MD5; - } - - _logger.info("Using {} for Elasticsearch document id generation.", _idgenHashAlgo); - - _replicationFactor = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS.getName(), Property.ELASTICSEARCH_NUM_REPLICAS.getDefaultValue())); - - _numShards = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT.getName(), Property.ELASTICSEARCH_SHARDS_COUNT.getDefaultValue())); - - _replicationFactorForScopeIndex = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX.getName(), Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX.getDefaultValue())); - - _numShardsForScopeIndex = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX.getName(), Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX.getDefaultValue())); - - _replicationFactorForScopeAndMetricIndex = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX.getDefaultValue())); - - _numShardsForScopeAndMetricIndex = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX.getDefaultValue())); - - _bulkIndexingSize = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getName(), Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getDefaultValue())); - - String[] nodes = config.getValue(Property.ELASTICSEARCH_ENDPOINT.getName(), Property.ELASTICSEARCH_ENDPOINT.getDefaultValue()).split(","); - HttpHost[] httpHosts = new HttpHost[nodes.length]; - - for(int i=0; i _createMappingsNode()); - - _createIndexIfNotExists(SCOPE_INDEX_NAME, _replicationFactorForScopeIndex, _numShardsForScopeIndex, - () -> _createScopeMappingsNode()); - - _createIndexIfNotExists(SCOPE_AND_METRIC_INDEX_NAME, _replicationFactorForScopeAndMetricIndex, - _numShardsForScopeAndMetricIndex, () -> _createScopeAndMetricMappingsNode()); - } - - - @Override - public void dispose() { - super.dispose(); - try { - _esRestClient.close(); - _logger.info("Shutdown of ElasticSearch RESTClient complete"); - } catch (IOException e) { - _logger.warn("ElasticSearch RestClient failed to shutdown properly.", e); - } - } - - @Override - public Properties getServiceProperties() { - Properties serviceProps = new Properties(); - - for (Property property : Property.values()) { - serviceProps.put(property.getName(), property.getDefaultValue()); - } - return serviceProps; - } - - @Override - protected void implementationSpecificPut(List metrics, Set scopeNames, - Set> scopesAndMetricNames) { - SystemAssert.requireArgument(metrics != null, "Metrics list cannot be null."); - - _logger.info("{} new metrics need to be indexed on ES.", metrics.size()); - - long start = System.currentTimeMillis(); - List> fracturedList = _fracture(metrics); - - for(List records : fracturedList) { - if(!records.isEmpty()) { - _upsert(records); - } - } - - int count = 0; - for(List records : fracturedList) { - count += records.size(); - } - - _monitorService.modifyCounter(MonitorService.Counter.SCHEMARECORDS_WRITTEN, count, null); - _monitorService.modifyCounter(MonitorService.Counter.SCHEMARECORDS_WRITE_LATENCY, (System.currentTimeMillis() - start), null); - - _logger.info("{} new scopes need to be indexed on ES.", scopeNames.size()); - - start = System.currentTimeMillis(); - List> fracturedScopesList = _fractureScopes(scopeNames); - - for(List records : fracturedScopesList) { - if(!records.isEmpty()) { - _upsertScopes(records); - } - } - - count = 0; - for(List records : fracturedScopesList) { - count += records.size(); - } - - _monitorService.modifyCounter(MonitorService.Counter.SCOPENAMES_WRITTEN, count, null); - _monitorService.modifyCounter(MonitorService.Counter.SCOPENAMES_WRITE_LATENCY, (System.currentTimeMillis() - start), null); - - _logger.info("{} new scope and metric names need to be indexed on ES.", scopesAndMetricNames.size()); - - start = System.currentTimeMillis(); - List> fracturedScopesAndMetricsList = _fractureScopeAndMetrics(scopesAndMetricNames); - - for(List records : fracturedScopesAndMetricsList) { - if(!records.isEmpty()) { - _upsertScopeAndMetrics(records); - } - } - - count = 0; - for(List records : fracturedScopesAndMetricsList) { - count += records.size(); - } - - _monitorService.modifyCounter(MonitorService.Counter.SCOPEANDMETRICNAMES_WRITTEN, count, null); - _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_WRITE_LATENCY, (System.currentTimeMillis() - start), null); - } - - /* Convert the given list of metrics to a list of metric schema records. At the same time, fracture the records list - * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. - */ - protected List> _fracture(List metrics) { - List> fracturedList = new ArrayList<>(); - - List records = new ArrayList<>(_bulkIndexingSize); - for(Metric metric : metrics) { - if(metric.getTags().isEmpty()) { - MetricSchemaRecord msr = new MetricSchemaRecord(metric.getScope(), metric.getMetric()); - msr.setNamespace(metric.getNamespace()); - records.add(msr); - if(records.size() == _bulkIndexingSize) { - fracturedList.add(records); - records = new ArrayList<>(_bulkIndexingSize); - } - continue; - } - - for(Map.Entry entry : metric.getTags().entrySet()) { - records.add(new MetricSchemaRecord(metric.getNamespace(), metric.getScope(), metric.getMetric(), - entry.getKey(), entry.getValue())); - if(records.size() == _bulkIndexingSize) { - fracturedList.add(records); - records = new ArrayList<>(_bulkIndexingSize); - } - } - } - - if(!records.isEmpty()) { - fracturedList.add(records); - } - - return fracturedList; - } - - /* Convert the given list of scope and metric names to a list of scope and metric only schema records. - * At the same time, fracture the records list if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. - */ - protected List> _fractureScopeAndMetrics(Set> scopesAndMetricNames) { - List> fracturedList = new ArrayList<>(); - - List records = new ArrayList<>(_bulkIndexingSize); - for(Pair scopeAndMetric : scopesAndMetricNames) { - records.add(new ScopeAndMetricOnlySchemaRecord(scopeAndMetric.getLeft(), scopeAndMetric.getRight())); - - if(records.size() == _bulkIndexingSize) { - fracturedList.add(records); - records = new ArrayList<>(_bulkIndexingSize); - } - } - - if(!records.isEmpty()) { - fracturedList.add(records); - } - - return fracturedList; - } - - /* Convert the given list of scopes to a list of scope only schema records. At the same time, fracture the records list - * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. - */ - protected List> _fractureScopes(Set scopeNames) { - List> fracturedList = new ArrayList<>(); - - List records = new ArrayList<>(_bulkIndexingSize); - for(String scope : scopeNames) { - records.add(new ScopeOnlySchemaRecord(scope)); - - if(records.size() == _bulkIndexingSize) { - fracturedList.add(records); - records = new ArrayList<>(_bulkIndexingSize); - } - } - - if(!records.isEmpty()) { - fracturedList.add(records); - } - - return fracturedList; - } - - @Override - public List get(MetricSchemaRecordQuery query) { - requireNotDisposed(); - SystemAssert.requireArgument(query != null, "MetricSchemaRecordQuery cannot be null."); - long size = (long) query.getLimit() * query.getPage(); - SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, - "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); - - - Map tags = new HashMap<>(); - tags.put("type", "REGEXP_WITHOUT_AGGREGATION"); - long start = System.currentTimeMillis(); - boolean scroll = false; - StringBuilder sb = new StringBuilder().append("/") - .append(INDEX_NAME) - .append("/") - .append(TYPE_NAME) - .append("/") - .append("_search"); - - int from = 0, scrollSize; - if(query.getLimit() * query.getPage() > 10000) { - sb.append("?scroll=").append(KEEP_SCROLL_CONTEXT_OPEN_FOR); - scroll = true; - int total = query.getLimit() * query.getPage(); - scrollSize = (int) (total / (total / 10000 + 1)); - } else { - from = query.getLimit() * (query.getPage() - 1); - scrollSize = query.getLimit(); - } - - String requestUrl = sb.toString(); - String queryJson = _constructTermQuery(query, from, scrollSize); - - try { - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); - - MetricSchemaRecordList list = toEntity(extractResponse(response), new TypeReference() {}); - - if(scroll) { - requestUrl = new StringBuilder().append("/").append("_search").append("/").append("scroll").toString(); - List records = new LinkedList<>(list.getRecords()); - - while(true) { - String scrollID = list.getScrollID(); - - Map requestBody = new HashMap<>(); - requestBody.put("scroll_id", scrollID); - requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); - - response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), - new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); - - list = toEntity(extractResponse(response), new TypeReference() {}); - records.addAll(list.getRecords()); - - if(records.size() >= query.getLimit() * query.getPage() || list.getRecords().size() < scrollSize) { - break; - } - } - - int fromIndex = query.getLimit() * (query.getPage() - 1); - if(records.size() <= fromIndex) { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return Collections.emptyList(); - } - - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return records.subList(fromIndex, records.size()); - - } else { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return list.getRecords(); - } - - } catch (UnsupportedEncodingException | JsonProcessingException e) { - throw new SystemException("Search failed.", e); - } catch (IOException e) { - throw new SystemException("IOException when trying to perform ES request.", e); - } - } - - @Override - public List getUnique(MetricSchemaRecordQuery query, RecordType type) { - requireNotDisposed(); - SystemAssert.requireArgument(query != null, "MetricSchemaRecordQuery cannot be null."); - long size = (long) query.getLimit() * query.getPage(); - SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, - "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); - - - Map tags = new HashMap<>(); - tags.put("type", "REGEXP_WITH_AGGREGATION"); - long start = System.currentTimeMillis(); - - String indexName = INDEX_NAME; - String typeName = TYPE_NAME; - - if (query.isQueryOnlyOnScope() && RecordType.SCOPE.equals(type)) - { - indexName = SCOPE_INDEX_NAME; - typeName = SCOPE_TYPE_NAME; - } - else if (query.isQueryOnlyOnScopeAndMetric()) - { - indexName = SCOPE_AND_METRIC_INDEX_NAME; - typeName = SCOPE_AND_METRIC_TYPE_NAME; - } - - String requestUrl = new StringBuilder().append("/") - .append(indexName) - .append("/") - .append(typeName) - .append("/") - .append("_search") - .toString(); - - String queryJson = _constructTermAggregationQuery(query, type); - try { - - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); - String str = extractResponse(response); - List records = SchemaService.constructMetricSchemaRecordsForType( - toEntity(str, new TypeReference>() {}), type); - - if (query.isQueryOnlyOnScope() && RecordType.SCOPE.equals(type)) { - _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - - } else if (query.isQueryOnlyOnScopeAndMetric()) { - _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - } else { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - } - - - int fromIndex = query.getLimit() * (query.getPage() - 1); - if(records.size() <= fromIndex) { - return Collections.emptyList(); - } - - if(records.size() < query.getLimit() * query.getPage()) { - return records.subList(fromIndex, records.size()); - } else { - return records.subList(fromIndex, query.getLimit() * query.getPage()); - } - } catch (IOException e) { - throw new SystemException(e); - } - } - - @Override - public List keywordSearch(KeywordQuery kq) { - requireNotDisposed(); - SystemAssert.requireArgument(kq != null, "Query cannot be null."); - SystemAssert.requireArgument(kq.getQuery() != null || kq.getType() != null, "Either the query string or the type must not be null."); - - long size = (long) kq.getLimit() * kq.getPage(); - SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, - "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); - - - Map tags = new HashMap<>(); - tags.put("type", "FTS_WITH_AGGREGATION"); - long start = System.currentTimeMillis(); - StringBuilder sb = new StringBuilder().append("/") - .append(INDEX_NAME) - .append("/") - .append(TYPE_NAME) - .append("/") - .append("_search"); - try { - - if(kq.getQuery() != null) { - - int from = 0, scrollSize = 0; - boolean scroll = false;; - if(kq.getLimit() * kq.getPage() > 10000) { - sb.append("?scroll=").append(KEEP_SCROLL_CONTEXT_OPEN_FOR); - scroll = true; - int total = kq.getLimit() * kq.getPage(); - scrollSize = (int) (total / (total / 10000 + 1)); - } else { - from = kq.getLimit() * (kq.getPage() - 1); - scrollSize = kq.getLimit(); - } - - List tokens = _analyzedTokens(kq.getQuery()); - String queryJson = _constructQueryStringQuery(tokens, from, scrollSize); - String requestUrl = sb.toString(); - - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); - String strResponse = extractResponse(response); - MetricSchemaRecordList list = toEntity(strResponse, new TypeReference() {}); - - if(scroll) { - requestUrl = new StringBuilder().append("/").append("_search").append("/").append("scroll").toString(); - List records = new LinkedList<>(list.getRecords()); - - while(true) { - Map requestBody = new HashMap<>(); - requestBody.put("scroll_id", list.getScrollID()); - requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); - - response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), - new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); - - list = toEntity(extractResponse(response), new TypeReference() {}); - - records.addAll(list.getRecords()); - - if(records.size() >= kq.getLimit() * kq.getPage() || list.getRecords().size() < scrollSize) { - break; - } - } - - int fromIndex = kq.getLimit() * (kq.getPage() - 1); - if(records.size() <= fromIndex) { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return Collections.emptyList(); - } - - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return records.subList(fromIndex, records.size()); - - } else { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return list.getRecords(); - } - - - } else { - Map> tokensMap = new HashMap<>(); - - List tokens = _analyzedTokens(kq.getScope()); - if(!tokens.isEmpty()) { - tokensMap.put(RecordType.SCOPE, tokens); - } - - tokens = _analyzedTokens(kq.getMetric()); - if(!tokens.isEmpty()) { - tokensMap.put(RecordType.METRIC, tokens); - } - - tokens = _analyzedTokens(kq.getTagKey()); - if(!tokens.isEmpty()) { - tokensMap.put(RecordType.TAGK, tokens); - } - - tokens = _analyzedTokens(kq.getTagValue()); - if(!tokens.isEmpty()) { - tokensMap.put(RecordType.TAGV, tokens); - } - - tokens = _analyzedTokens(kq.getNamespace()); - if(!tokens.isEmpty()) { - tokensMap.put(RecordType.NAMESPACE, tokens); - } - - String queryJson = _constructQueryStringQuery(kq, tokensMap); - String requestUrl = sb.toString(); - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); - String strResponse = extractResponse(response); - - List records = SchemaService.constructMetricSchemaRecordsForType( - toEntity(strResponse, new TypeReference>() {}), kq.getType()); - - int fromIndex = kq.getLimit() * (kq.getPage() - 1); - if(records.size() <= fromIndex) { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return Collections.emptyList(); - } - - if(records.size() < kq.getLimit() * kq.getPage()) { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return records.subList(fromIndex, records.size()); - } else { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return records.subList(fromIndex, kq.getLimit() * kq.getPage()); - } - - } - - } catch (IOException e) { - throw new SystemException(e); - } - } - - private List _analyzedTokens(String query) { - - if(!SchemaService.containsFilter(query)) { - return Collections.emptyList(); - } - - List tokens = new ArrayList<>(); - - String requestUrl = new StringBuilder("/").append(INDEX_NAME).append("/_analyze").toString(); - - String requestBody = "{\"analyzer\" : \"metadata_analyzer\", \"text\": \"" + query + "\" }"; - - try { - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); - String strResponse = extractResponse(response); - JsonNode tokensNode = _mapper.readTree(strResponse).get("tokens"); - if(tokensNode.isArray()) { - for(JsonNode tokenNode : tokensNode) { - tokens.add(tokenNode.get("token").asText()); - } - } - - return tokens; - } catch (IOException e) { - throw new SystemException(e); - } - } - - private void _upsert(List records) { - String requestUrl = new StringBuilder().append("/") - .append(INDEX_NAME) - .append("/") - .append(TYPE_NAME) - .append("/") - .append("_bulk") - .toString(); - - String strResponse = ""; - - MetricSchemaRecordList msrList = new MetricSchemaRecordList(records, _idgenHashAlgo); - try { - String requestBody = _mapper.writeValueAsString(msrList); - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); - strResponse = extractResponse(response); - - } catch (IOException e) { - //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? - throw new SystemException(e); - } - - try { - PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); - //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. - if(putResponse.errors) { - List recordsToRemove = new ArrayList<>(); - for(Item item : putResponse.items) { - if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { - _logger.warn("Failed to index metric. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); - recordsToRemove.add(msrList.getRecord(item.create._id)); - } - - if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { - _logger.warn("Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); - recordsToRemove.add(msrList.getRecord(item.index._id)); - } - } - if(recordsToRemove.size() != 0) { - _logger.info("{} records were not written to ES", recordsToRemove.size()); - records.removeAll(recordsToRemove); - } - } - //add to bloom filter - _addToBloomFilter(records); - - } catch(IOException e) { - throw new SystemException("Failed to parse reponse of put metrics. The response was: " + strResponse, e); - } - } - - private void _upsertScopeAndMetrics(List records) { - String requestUrl = new StringBuilder().append("/") - .append(SCOPE_AND_METRIC_INDEX_NAME) - .append("/") - .append(SCOPE_AND_METRIC_TYPE_NAME) - .append("/") - .append("_bulk") - .toString(); - - String strResponse = ""; - - ScopeAndMetricOnlySchemaRecordList recordList = new ScopeAndMetricOnlySchemaRecordList(records, _idgenHashAlgo); - - try { - String requestBody = _scopeAndMetricOnlyMapper.writeValueAsString(recordList); - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); - strResponse = extractResponse(response); - } catch (IOException e) { - //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? - throw new SystemException(e); - } - - try { - PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); - //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. - if(putResponse.errors) { - List recordsToRemove = new ArrayList<>(); - for(Item item : putResponse.items) { - if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { - _logger.warn("Failed to index scope. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); - recordsToRemove.add(recordList.getRecord(item.create._id)); - } - - if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { - _logger.warn("Scope Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); - recordsToRemove.add(recordList.getRecord(item.index._id)); - } - } - if(recordsToRemove.size() != 0) { - _logger.info("{} records were not written to ES", recordsToRemove.size()); - records.removeAll(recordsToRemove); - } - } - //add to bloom filter - _addToBloomFilterScopeAndMetricOnly(records); - - } catch(IOException e) { - throw new SystemException("Failed to parse reponse of put scope names. The response was: " + strResponse, e); - } - } - - private void _upsertScopes(List records) { - String requestUrl = new StringBuilder().append("/") - .append(SCOPE_INDEX_NAME) - .append("/") - .append(SCOPE_TYPE_NAME) - .append("/") - .append("_bulk") - .toString(); - - String strResponse = ""; - - ScopeOnlySchemaRecordList scopeOnlySchemaRecordList = new ScopeOnlySchemaRecordList(records, _idgenHashAlgo); - - try { - String requestBody = _scopeOnlyMapper.writeValueAsString(scopeOnlySchemaRecordList); - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); - strResponse = extractResponse(response); - } catch (IOException e) { - //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? - throw new SystemException(e); - } - - try { - PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); - //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. - if(putResponse.errors) { - List recordsToRemove = new ArrayList<>(); - for(Item item : putResponse.items) { - if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { - _logger.warn("Failed to index scope. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); - recordsToRemove.add(scopeOnlySchemaRecordList.getRecord(item.create._id)); - } - - if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { - _logger.warn("Scope Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); - recordsToRemove.add(scopeOnlySchemaRecordList.getRecord(item.index._id)); - } - } - if(recordsToRemove.size() != 0) { - _logger.info("{} records were not written to ES", recordsToRemove.size()); - records.removeAll(recordsToRemove); - } - } - //add to bloom filter - _addToBloomFilterScopeOnly(records); - - } catch(IOException e) { - throw new SystemException("Failed to parse reponse of put scope names. The response was: " + strResponse, e); - } - } - - protected void _addToBloomFilter(List records){ - _logger.info("Adding {} records into bloom filter.", records.size()); - for(MetricSchemaRecord record : records) { - String key = constructKey(record.getScope(), record.getMetric(), record.getTagKey(), record.getTagValue(), record.getNamespace()); - bloomFilter.put(key); - } - } - - protected void _addToBloomFilterScopeAndMetricOnly(List records){ - _logger.info("Adding {} records into scope and metric only bloom filter.", records.size()); - for(ScopeAndMetricOnlySchemaRecord record : records) { - String key = constructScopeAndMetricOnlyKey(record.getScope(), record.getMetric()); - bloomFilterScopeAndMetricOnly.put(key); - } - } - - protected void _addToBloomFilterScopeOnly(List records){ - _logger.info("Adding {} records into scope only bloom filter.", records.size()); - for(ScopeOnlySchemaRecord record : records) { - String key = constructScopeOnlyKey(record.getScope()); - bloomFilterScopeOnly.put(key); - } - } - - private String _constructTermAggregationQuery(MetricSchemaRecordQuery query, RecordType type) { - ObjectMapper mapper = new ObjectMapper(); - ObjectNode queryNode = _constructQueryNode(query, mapper); - - long size = query.getLimit() * query.getPage(); - SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, - "(limit * page) must be greater than 0 and less than Integer.MAX_VALUE"); - - ObjectNode aggsNode = _constructAggsNode(type, Math.max(size, 10000), mapper); - - ObjectNode rootNode = mapper.createObjectNode(); - rootNode.put("query", queryNode); - rootNode.put("size", 0); - rootNode.put("aggs", aggsNode); - - return rootNode.toString(); - } - - private String _constructTermQuery(MetricSchemaRecordQuery query, int from, int size) { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode queryNode = _constructQueryNode(query, mapper); - - ObjectNode rootNode = _mapper.createObjectNode(); - rootNode.put("query", queryNode); - rootNode.put("from", from); - rootNode.put("size", size); - - return rootNode.toString(); - } - - private ObjectNode _constructSimpleQueryStringNode(List tokens, RecordType... types) { - - if(tokens.isEmpty()) { - return null; - } - - ObjectMapper mapper = new ObjectMapper(); - - StringBuilder queryString = new StringBuilder(); - for(String token : tokens) { - queryString.append('+').append(token).append(' '); - } - queryString.replace(queryString.length() - 1, queryString.length(), "*"); - - ObjectNode node = mapper.createObjectNode(); - ArrayNode fieldsNode = mapper.createArrayNode(); - for(RecordType type : types) { - fieldsNode.add(type.getName()); - } - node.put("fields", fieldsNode); - node.put("query", queryString.toString()); - - ObjectNode simpleQueryStringNode = mapper.createObjectNode(); - simpleQueryStringNode.put("simple_query_string", node); - - return simpleQueryStringNode; - } - - private String _constructQueryStringQuery(List tokens, int from, int size) { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode simpleQueryStringNode = _constructSimpleQueryStringNode(tokens, RecordType.values()); - - ObjectNode rootNode = mapper.createObjectNode(); - rootNode.put("query", simpleQueryStringNode); - rootNode.put("from", from); - rootNode.put("size", size); - - return rootNode.toString(); - } - - private String _constructQueryStringQuery(KeywordQuery kq, Map> tokensMap) { - ObjectMapper mapper = new ObjectMapper(); - - ArrayNode filterNodes = mapper.createArrayNode(); - for(Map.Entry> entry : tokensMap.entrySet()) { - ObjectNode simpleQueryStringNode = _constructSimpleQueryStringNode(entry.getValue(), entry.getKey()); - filterNodes.add(simpleQueryStringNode); - } - - ObjectNode boolNode = mapper.createObjectNode(); - boolNode.put("filter", filterNodes); - - ObjectNode queryNode = mapper.createObjectNode(); - queryNode.put("bool", boolNode); - - ObjectNode rootNode = mapper.createObjectNode(); - rootNode.put("query", queryNode); - rootNode.put("size", 0); - - long size = kq.getLimit() * kq.getPage(); - SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, - "(limit * page) must be greater than 0 and less than Integer.MAX_VALUE"); - rootNode.put("aggs", _constructAggsNode(kq.getType(), Math.max(size, 10000), mapper)); - - return rootNode.toString(); - - } - - private ObjectNode _constructQueryNode(MetricSchemaRecordQuery query, ObjectMapper mapper) { - ArrayNode filterNodes = mapper.createArrayNode(); - if(SchemaService.containsFilter(query.getMetric())) { - ObjectNode node = mapper.createObjectNode(); - ObjectNode regexpNode = mapper.createObjectNode(); - regexpNode.put(RecordType.METRIC.getName() + ".raw", SchemaService.convertToRegex(query.getMetric())); - node.put("regexp", regexpNode); - filterNodes.add(node); - } - - if(SchemaService.containsFilter(query.getScope())) { - ObjectNode node = mapper.createObjectNode(); - ObjectNode regexpNode = mapper.createObjectNode(); - regexpNode.put(RecordType.SCOPE.getName() + ".raw", SchemaService.convertToRegex(query.getScope())); - node.put("regexp", regexpNode); - filterNodes.add(node); - } - - if(SchemaService.containsFilter(query.getTagKey())) { - ObjectNode node = mapper.createObjectNode(); - ObjectNode regexpNode = mapper.createObjectNode(); - regexpNode.put(RecordType.TAGK.getName() + ".raw", SchemaService.convertToRegex(query.getTagKey())); - node.put("regexp", regexpNode); - filterNodes.add(node); - } - - if(SchemaService.containsFilter(query.getTagValue())) { - ObjectNode node = mapper.createObjectNode(); - ObjectNode regexpNode = mapper.createObjectNode(); - regexpNode.put(RecordType.TAGV.getName() + ".raw", SchemaService.convertToRegex(query.getTagValue())); - node.put("regexp", regexpNode); - filterNodes.add(node); - } - - if(SchemaService.containsFilter(query.getNamespace())) { - ObjectNode node = mapper.createObjectNode(); - ObjectNode regexpNode = mapper.createObjectNode(); - regexpNode.put(RecordType.NAMESPACE.getName() + ".raw", SchemaService.convertToRegex(query.getNamespace())); - node.put("regexp", regexpNode); - filterNodes.add(node); - } - - ObjectNode boolNode = mapper.createObjectNode(); - boolNode.put("filter", filterNodes); - - ObjectNode queryNode = mapper.createObjectNode(); - queryNode.put("bool", boolNode); - return queryNode; - } - - private ObjectNode _constructAggsNode(RecordType type, long limit, ObjectMapper mapper) { - - ObjectNode termsNode = mapper.createObjectNode(); - termsNode.put("field", type.getName() + ".raw"); - termsNode.put("order", mapper.createObjectNode().put("_term", "asc")); - termsNode.put("size", limit); - termsNode.put("execution_hint", "map"); - - ObjectNode distinctValuesNode = mapper.createObjectNode(); - distinctValuesNode.put("terms", termsNode); - - ObjectNode aggsNode = mapper.createObjectNode(); - aggsNode.put("distinct_values", distinctValuesNode); - return aggsNode; - } - - - /* Helper method to convert JSON String representation to the corresponding Java entity. */ - private T toEntity(String content, TypeReference type) { - try { - return _mapper.readValue(content, type); - } catch (IOException ex) { - throw new SystemException(ex); - } - } - - /* Method to change the rest client. Used for testing. */ - protected void setRestClient(RestClient restClient) - { - this._esRestClient = restClient; - } - - /** Helper to process the response. - * Throws a SystemException when the http status code is outsdie of the range 200 - 300. - * @param response ES response - * @return Stringified response - */ - protected String extractResponse(Response response) { - requireArgument(response != null, "HttpResponse object cannot be null."); - - int status = response.getStatusLine().getStatusCode(); - String strResponse = extractStringResponse(response); - - if ((status < HttpStatus.SC_OK) || (status >= HttpStatus.SC_MULTIPLE_CHOICES)) { - throw new SystemException("Status code: " + status + " . Error occurred. " + strResponse); - } else { - return strResponse; - } - } - - private String extractStringResponse(Response content) { - requireArgument(content != null, "Response content is null."); - - String result; - HttpEntity entity = null; - - try { - entity = content.getEntity(); - if (entity == null) { - result = ""; - } else { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - - entity.writeTo(baos); - result = baos.toString("UTF-8"); - } - return result; - } catch (IOException ex) { - throw new SystemException(ex); - } finally { - if (entity != null) { - try { - EntityUtils.consume(entity); - } catch (IOException ex) { - _logger.warn("Failed to close entity stream.", ex); - } - } - } - } - - private ObjectMapper _createObjectMapper() { - ObjectMapper mapper = new ObjectMapper(); - - mapper.setSerializationInclusion(Include.NON_NULL); - SimpleModule module = new SimpleModule(); - module.addSerializer(MetricSchemaRecordList.class, new MetricSchemaRecordList.Serializer()); - module.addDeserializer(MetricSchemaRecordList.class, new MetricSchemaRecordList.Deserializer()); - module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); - mapper.registerModule(module); - - return mapper; - } - - private ObjectMapper _createScopeAndMetricOnlyObjectMapper() { - ObjectMapper mapper = new ObjectMapper(); - - mapper.setSerializationInclusion(Include.NON_NULL); - SimpleModule module = new SimpleModule(); - module.addSerializer(ScopeAndMetricOnlySchemaRecordList.class, new ScopeAndMetricOnlySchemaRecordList.Serializer()); - module.addDeserializer(ScopeAndMetricOnlySchemaRecordList.class, new ScopeAndMetricOnlySchemaRecordList.Deserializer()); - module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); - mapper.registerModule(module); - - return mapper; - } - - private ObjectMapper _createScopeOnlyObjectMapper() { - ObjectMapper mapper = new ObjectMapper(); - - mapper.setSerializationInclusion(Include.NON_NULL); - SimpleModule module = new SimpleModule(); - module.addSerializer(ScopeOnlySchemaRecordList.class, new ScopeOnlySchemaRecordList.Serializer()); - module.addDeserializer(ScopeOnlySchemaRecordList.class, new ScopeOnlySchemaRecordList.Deserializer()); - module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); - mapper.registerModule(module); - - return mapper; - } - - private ObjectNode _createSettingsNode(int replicationFactor, int numShards) { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode metadataAnalyzer = mapper.createObjectNode(); - metadataAnalyzer.put("tokenizer", "metadata_tokenizer"); - metadataAnalyzer.put("filter", mapper.createArrayNode().add("lowercase")); - - ObjectNode analyzerNode = mapper.createObjectNode(); - analyzerNode.put("metadata_analyzer", metadataAnalyzer); - - ObjectNode tokenizerNode = mapper.createObjectNode(); - tokenizerNode.put("metadata_tokenizer", mapper.createObjectNode().put("type", "pattern").put("pattern", "([^\\p{L}\\d]+)|(?<=[\\p{L}&&[^\\p{Lu}]])(?=\\p{Lu})|(?<=\\p{Lu})(?=\\p{Lu}[\\p{L}&&[^\\p{Lu}]])")); - - ObjectNode analysisNode = mapper.createObjectNode(); - analysisNode.put("analyzer", analyzerNode); - analysisNode.put("tokenizer", tokenizerNode); - - ObjectNode indexNode = mapper.createObjectNode(); - indexNode.put("max_result_window", INDEX_MAX_RESULT_WINDOW); - indexNode.put("number_of_replicas", replicationFactor); - indexNode.put("number_of_shards", numShards); - - ObjectNode settingsNode = mapper.createObjectNode(); - settingsNode.put("analysis", analysisNode); - settingsNode.put("index", indexNode); - - return settingsNode; - } - - private ObjectNode _createMappingsNode() { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode propertiesNode = mapper.createObjectNode(); - propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - propertiesNode.put(RecordType.METRIC.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - propertiesNode.put(RecordType.TAGK.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - propertiesNode.put(RecordType.TAGV.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - propertiesNode.put(RecordType.NAMESPACE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - - propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); - - ObjectNode typeNode = mapper.createObjectNode(); - typeNode.put("properties", propertiesNode); - - ObjectNode mappingsNode = mapper.createObjectNode(); - mappingsNode.put(TYPE_NAME, typeNode); - return mappingsNode; - } - - private ObjectNode _createScopeAndMetricMappingsNode() { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode propertiesNode = mapper.createObjectNode(); - propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - propertiesNode.put(RecordType.METRIC.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - - propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); - propertiesNode.put("cts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + private static String SCOPE_INDEX_NAME; + private static String SCOPE_TYPE_NAME; + + private static String SCOPE_AND_METRIC_INDEX_NAME; + private static String SCOPE_AND_METRIC_TYPE_NAME; + + private static final String INDEX_NAME = "metadata_index"; + private static final String TYPE_NAME = "metadata_type"; + private static final String KEEP_SCROLL_CONTEXT_OPEN_FOR = "1m"; + private static final int INDEX_MAX_RESULT_WINDOW = 10000; + private static final int MAX_RETRY_TIMEOUT = 300 * 1000; + private static final String FIELD_TYPE_TEXT = "text"; + private static final String FIELD_TYPE_DATE ="date"; + + private final ObjectMapper _mapper; + private final ObjectMapper _scopeOnlyMapper; + private final ObjectMapper _scopeAndMetricOnlyMapper; + + private Logger _logger = LoggerFactory.getLogger(getClass()); + private final MonitorService _monitorService; + private RestClient _esRestClient; + private final int _replicationFactor; + private final int _numShards; + private final int _replicationFactorForScopeIndex; + private final int _numShardsForScopeIndex; + private final int _replicationFactorForScopeAndMetricIndex; + private final int _numShardsForScopeAndMetricIndex; + private final int _bulkIndexingSize; + private HashAlgorithm _idgenHashAlgo; + + @Inject + public ElasticSearchSchemaService(SystemConfiguration config, MonitorService monitorService) { + super(config); + + _monitorService = monitorService; + _mapper = _createObjectMapper(); + _scopeOnlyMapper = _createScopeOnlyObjectMapper(); + _scopeAndMetricOnlyMapper = _createScopeAndMetricOnlyObjectMapper(); + + SCOPE_INDEX_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getName(), + Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getDefaultValue()); + SCOPE_TYPE_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_TYPE_NAME.getName(), + Property.ELASTICSEARCH_SCOPE_TYPE_NAME.getDefaultValue()); + + SCOPE_AND_METRIC_INDEX_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME.getName(), + Property.ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME.getDefaultValue()); + SCOPE_AND_METRIC_TYPE_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME.getName(), + Property.ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME.getDefaultValue()); + + String algorithm = config.getValue(Property.ELASTICSEARCH_IDGEN_HASH_ALGO.getName(), Property.ELASTICSEARCH_IDGEN_HASH_ALGO.getDefaultValue()); + try { + _idgenHashAlgo = HashAlgorithm.fromString(algorithm); + } catch(IllegalArgumentException e) { + _logger.warn("{} is not supported by this service. Valid values are: {}.", algorithm, Arrays.asList(HashAlgorithm.values())); + _idgenHashAlgo = HashAlgorithm.MD5; + } + + _logger.info("Using {} for Elasticsearch document id generation.", _idgenHashAlgo); + + _replicationFactor = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS.getName(), Property.ELASTICSEARCH_NUM_REPLICAS.getDefaultValue())); + + _numShards = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT.getName(), Property.ELASTICSEARCH_SHARDS_COUNT.getDefaultValue())); + + _replicationFactorForScopeIndex = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX.getName(), Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX.getDefaultValue())); + + _numShardsForScopeIndex = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX.getName(), Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX.getDefaultValue())); + + _replicationFactorForScopeAndMetricIndex = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX.getDefaultValue())); + + _numShardsForScopeAndMetricIndex = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX.getDefaultValue())); + + _bulkIndexingSize = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getName(), Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getDefaultValue())); + + String[] nodes = config.getValue(Property.ELASTICSEARCH_ENDPOINT.getName(), Property.ELASTICSEARCH_ENDPOINT.getDefaultValue()).split(","); + HttpHost[] httpHosts = new HttpHost[nodes.length]; + + for(int i=0; i _createMappingsNode()); + + _createIndexIfNotExists(SCOPE_INDEX_NAME, _replicationFactorForScopeIndex, _numShardsForScopeIndex, + () -> _createScopeMappingsNode()); + + _createIndexIfNotExists(SCOPE_AND_METRIC_INDEX_NAME, _replicationFactorForScopeAndMetricIndex, + _numShardsForScopeAndMetricIndex, () -> _createScopeAndMetricMappingsNode()); + } + + + @Override + public void dispose() { + super.dispose(); + try { + _esRestClient.close(); + _logger.info("Shutdown of ElasticSearch RESTClient complete"); + } catch (IOException e) { + _logger.warn("ElasticSearch RestClient failed to shutdown properly.", e); + } + } + + @Override + public Properties getServiceProperties() { + Properties serviceProps = new Properties(); + + for (Property property : Property.values()) { + serviceProps.put(property.getName(), property.getDefaultValue()); + } + return serviceProps; + } + + @Override + protected void implementationSpecificPut(List metrics, Set scopeNames, + Set> scopesAndMetricNames) { + SystemAssert.requireArgument(metrics != null, "Metrics list cannot be null."); + + _logger.info("{} new metrics need to be indexed on ES.", metrics.size()); + + long start = System.currentTimeMillis(); + List> fracturedList = _fracture(metrics); + + for(List records : fracturedList) { + if(!records.isEmpty()) { + _upsert(records); + } + } + + int count = 0; + for(List records : fracturedList) { + count += records.size(); + } + + _monitorService.modifyCounter(MonitorService.Counter.SCHEMARECORDS_WRITTEN, count, null); + _monitorService.modifyCounter(MonitorService.Counter.SCHEMARECORDS_WRITE_LATENCY, (System.currentTimeMillis() - start), null); + + _logger.info("{} new scopes need to be indexed on ES.", scopeNames.size()); + + start = System.currentTimeMillis(); + List> fracturedScopesList = _fractureScopes(scopeNames); + + for(List records : fracturedScopesList) { + if(!records.isEmpty()) { + _upsertScopes(records); + } + } + + count = 0; + for(List records : fracturedScopesList) { + count += records.size(); + } + + _monitorService.modifyCounter(MonitorService.Counter.SCOPENAMES_WRITTEN, count, null); + _monitorService.modifyCounter(MonitorService.Counter.SCOPENAMES_WRITE_LATENCY, (System.currentTimeMillis() - start), null); + + _logger.info("{} new scope and metric names need to be indexed on ES.", scopesAndMetricNames.size()); + + start = System.currentTimeMillis(); + List> fracturedScopesAndMetricsList = _fractureScopeAndMetrics(scopesAndMetricNames); + + for(List records : fracturedScopesAndMetricsList) { + if(!records.isEmpty()) { + _upsertScopeAndMetrics(records); + } + } + + count = 0; + for(List records : fracturedScopesAndMetricsList) { + count += records.size(); + } + + _monitorService.modifyCounter(MonitorService.Counter.SCOPEANDMETRICNAMES_WRITTEN, count, null); + _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_WRITE_LATENCY, (System.currentTimeMillis() - start), null); + } + + /* Convert the given list of metrics to a list of metric schema records. At the same time, fracture the records list + * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. + */ + protected List> _fracture(List metrics) { + List> fracturedList = new ArrayList<>(); + + List records = new ArrayList<>(_bulkIndexingSize); + for(Metric metric : metrics) { + if(metric.getTags().isEmpty()) { + MetricSchemaRecord msr = new MetricSchemaRecord(metric.getScope(), metric.getMetric()); + msr.setNamespace(metric.getNamespace()); + records.add(msr); + if(records.size() == _bulkIndexingSize) { + fracturedList.add(records); + records = new ArrayList<>(_bulkIndexingSize); + } + continue; + } + + for(Map.Entry entry : metric.getTags().entrySet()) { + records.add(new MetricSchemaRecord(metric.getNamespace(), metric.getScope(), metric.getMetric(), + entry.getKey(), entry.getValue())); + if(records.size() == _bulkIndexingSize) { + fracturedList.add(records); + records = new ArrayList<>(_bulkIndexingSize); + } + } + } + + if(!records.isEmpty()) { + fracturedList.add(records); + } + + return fracturedList; + } + + /* Convert the given list of scope and metric names to a list of scope and metric only schema records. + * At the same time, fracture the records list if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. + */ + protected List> _fractureScopeAndMetrics(Set> scopesAndMetricNames) { + List> fracturedList = new ArrayList<>(); + + List records = new ArrayList<>(_bulkIndexingSize); + for(Pair scopeAndMetric : scopesAndMetricNames) { + records.add(new ScopeAndMetricOnlySchemaRecord(scopeAndMetric.getLeft(), scopeAndMetric.getRight())); + + if(records.size() == _bulkIndexingSize) { + fracturedList.add(records); + records = new ArrayList<>(_bulkIndexingSize); + } + } + + if(!records.isEmpty()) { + fracturedList.add(records); + } + + return fracturedList; + } + + /* Convert the given list of scopes to a list of scope only schema records. At the same time, fracture the records list + * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. + */ + protected List> _fractureScopes(Set scopeNames) { + List> fracturedList = new ArrayList<>(); + + List records = new ArrayList<>(_bulkIndexingSize); + for(String scope : scopeNames) { + records.add(new ScopeOnlySchemaRecord(scope)); + + if(records.size() == _bulkIndexingSize) { + fracturedList.add(records); + records = new ArrayList<>(_bulkIndexingSize); + } + } + + if(!records.isEmpty()) { + fracturedList.add(records); + } + + return fracturedList; + } + + @Override + public List get(MetricSchemaRecordQuery query) { + requireNotDisposed(); + SystemAssert.requireArgument(query != null, "MetricSchemaRecordQuery cannot be null."); + long size = (long) query.getLimit() * query.getPage(); + SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, + "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); + + + Map tags = new HashMap<>(); + tags.put("type", "REGEXP_WITHOUT_AGGREGATION"); + long start = System.currentTimeMillis(); + boolean scroll = false; + StringBuilder sb = new StringBuilder().append("/") + .append(INDEX_NAME) + .append("/") + .append(TYPE_NAME) + .append("/") + .append("_search"); + + int from = 0, scrollSize; + if(query.getLimit() * query.getPage() > 10000) { + sb.append("?scroll=").append(KEEP_SCROLL_CONTEXT_OPEN_FOR); + scroll = true; + int total = query.getLimit() * query.getPage(); + scrollSize = (int) (total / (total / 10000 + 1)); + } else { + from = query.getLimit() * (query.getPage() - 1); + scrollSize = query.getLimit(); + } + + String requestUrl = sb.toString(); + String queryJson = _constructTermQuery(query, from, scrollSize); + + try { + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); + + MetricSchemaRecordList list = toEntity(extractResponse(response), new TypeReference() {}); + + if(scroll) { + requestUrl = new StringBuilder().append("/").append("_search").append("/").append("scroll").toString(); + List records = new LinkedList<>(list.getRecords()); + + while(true) { + String scrollID = list.getScrollID(); + + Map requestBody = new HashMap<>(); + requestBody.put("scroll_id", scrollID); + requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); + + response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), + new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); + + list = toEntity(extractResponse(response), new TypeReference() {}); + records.addAll(list.getRecords()); + + if(records.size() >= query.getLimit() * query.getPage() || list.getRecords().size() < scrollSize) { + break; + } + } + + int fromIndex = query.getLimit() * (query.getPage() - 1); + if(records.size() <= fromIndex) { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return Collections.emptyList(); + } + + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return records.subList(fromIndex, records.size()); + + } else { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return list.getRecords(); + } + + } catch (UnsupportedEncodingException | JsonProcessingException e) { + throw new SystemException("Search failed.", e); + } catch (IOException e) { + throw new SystemException("IOException when trying to perform ES request.", e); + } + } + + @Override + public List getUnique(MetricSchemaRecordQuery query, RecordType type) { + requireNotDisposed(); + SystemAssert.requireArgument(query != null, "MetricSchemaRecordQuery cannot be null."); + long size = (long) query.getLimit() * query.getPage(); + SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, + "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); + + + Map tags = new HashMap<>(); + tags.put("type", "REGEXP_WITH_AGGREGATION"); + long start = System.currentTimeMillis(); + + String indexName = INDEX_NAME; + String typeName = TYPE_NAME; + + if (query.isQueryOnlyOnScope() && RecordType.SCOPE.equals(type)) + { + indexName = SCOPE_INDEX_NAME; + typeName = SCOPE_TYPE_NAME; + } + else if (query.isQueryOnlyOnScopeAndMetric()) + { + indexName = SCOPE_AND_METRIC_INDEX_NAME; + typeName = SCOPE_AND_METRIC_TYPE_NAME; + } + + String requestUrl = new StringBuilder().append("/") + .append(indexName) + .append("/") + .append(typeName) + .append("/") + .append("_search") + .toString(); + + String queryJson = _constructTermAggregationQuery(query, type); + try { + + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); + String str = extractResponse(response); + List records = SchemaService.constructMetricSchemaRecordsForType( + toEntity(str, new TypeReference>() {}), type); + + if (query.isQueryOnlyOnScope() && RecordType.SCOPE.equals(type)) { + _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + + } else if (query.isQueryOnlyOnScopeAndMetric()) { + _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + } else { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + } + + + int fromIndex = query.getLimit() * (query.getPage() - 1); + if(records.size() <= fromIndex) { + return Collections.emptyList(); + } + + if(records.size() < query.getLimit() * query.getPage()) { + return records.subList(fromIndex, records.size()); + } else { + return records.subList(fromIndex, query.getLimit() * query.getPage()); + } + } catch (IOException e) { + throw new SystemException(e); + } + } + + @Override + public List keywordSearch(KeywordQuery kq) { + requireNotDisposed(); + SystemAssert.requireArgument(kq != null, "Query cannot be null."); + SystemAssert.requireArgument(kq.getQuery() != null || kq.getType() != null, "Either the query string or the type must not be null."); + + long size = (long) kq.getLimit() * kq.getPage(); + SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, + "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); + + + Map tags = new HashMap<>(); + tags.put("type", "FTS_WITH_AGGREGATION"); + long start = System.currentTimeMillis(); + StringBuilder sb = new StringBuilder().append("/") + .append(INDEX_NAME) + .append("/") + .append(TYPE_NAME) + .append("/") + .append("_search"); + try { + + if(kq.getQuery() != null) { + + int from = 0, scrollSize = 0; + boolean scroll = false;; + if(kq.getLimit() * kq.getPage() > 10000) { + sb.append("?scroll=").append(KEEP_SCROLL_CONTEXT_OPEN_FOR); + scroll = true; + int total = kq.getLimit() * kq.getPage(); + scrollSize = (int) (total / (total / 10000 + 1)); + } else { + from = kq.getLimit() * (kq.getPage() - 1); + scrollSize = kq.getLimit(); + } + + List tokens = _analyzedTokens(kq.getQuery()); + String queryJson = _constructQueryStringQuery(tokens, from, scrollSize); + String requestUrl = sb.toString(); + + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); + String strResponse = extractResponse(response); + MetricSchemaRecordList list = toEntity(strResponse, new TypeReference() {}); + + if(scroll) { + requestUrl = new StringBuilder().append("/").append("_search").append("/").append("scroll").toString(); + List records = new LinkedList<>(list.getRecords()); + + while(true) { + Map requestBody = new HashMap<>(); + requestBody.put("scroll_id", list.getScrollID()); + requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); + + response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), + new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); + + list = toEntity(extractResponse(response), new TypeReference() {}); + + records.addAll(list.getRecords()); + + if(records.size() >= kq.getLimit() * kq.getPage() || list.getRecords().size() < scrollSize) { + break; + } + } + + int fromIndex = kq.getLimit() * (kq.getPage() - 1); + if(records.size() <= fromIndex) { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return Collections.emptyList(); + } + + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return records.subList(fromIndex, records.size()); + + } else { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return list.getRecords(); + } + + + } else { + Map> tokensMap = new HashMap<>(); + + List tokens = _analyzedTokens(kq.getScope()); + if(!tokens.isEmpty()) { + tokensMap.put(RecordType.SCOPE, tokens); + } + + tokens = _analyzedTokens(kq.getMetric()); + if(!tokens.isEmpty()) { + tokensMap.put(RecordType.METRIC, tokens); + } + + tokens = _analyzedTokens(kq.getTagKey()); + if(!tokens.isEmpty()) { + tokensMap.put(RecordType.TAGK, tokens); + } + + tokens = _analyzedTokens(kq.getTagValue()); + if(!tokens.isEmpty()) { + tokensMap.put(RecordType.TAGV, tokens); + } + + tokens = _analyzedTokens(kq.getNamespace()); + if(!tokens.isEmpty()) { + tokensMap.put(RecordType.NAMESPACE, tokens); + } + + String queryJson = _constructQueryStringQuery(kq, tokensMap); + String requestUrl = sb.toString(); + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); + String strResponse = extractResponse(response); + + List records = SchemaService.constructMetricSchemaRecordsForType( + toEntity(strResponse, new TypeReference>() {}), kq.getType()); + + int fromIndex = kq.getLimit() * (kq.getPage() - 1); + if(records.size() <= fromIndex) { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return Collections.emptyList(); + } + + if(records.size() < kq.getLimit() * kq.getPage()) { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return records.subList(fromIndex, records.size()); + } else { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return records.subList(fromIndex, kq.getLimit() * kq.getPage()); + } + + } + + } catch (IOException e) { + throw new SystemException(e); + } + } + + private List _analyzedTokens(String query) { + + if(!SchemaService.containsFilter(query)) { + return Collections.emptyList(); + } + + List tokens = new ArrayList<>(); + + String requestUrl = new StringBuilder("/").append(INDEX_NAME).append("/_analyze").toString(); + + String requestBody = "{\"analyzer\" : \"metadata_analyzer\", \"text\": \"" + query + "\" }"; + + try { + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); + String strResponse = extractResponse(response); + JsonNode tokensNode = _mapper.readTree(strResponse).get("tokens"); + if(tokensNode.isArray()) { + for(JsonNode tokenNode : tokensNode) { + tokens.add(tokenNode.get("token").asText()); + } + } + + return tokens; + } catch (IOException e) { + throw new SystemException(e); + } + } + + private void _upsert(List records) { + String requestUrl = new StringBuilder().append("/") + .append(INDEX_NAME) + .append("/") + .append(TYPE_NAME) + .append("/") + .append("_bulk") + .toString(); + + String strResponse = ""; + + MetricSchemaRecordList msrList = new MetricSchemaRecordList(records, _idgenHashAlgo); + try { + String requestBody = _mapper.writeValueAsString(msrList); + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); + strResponse = extractResponse(response); + + } catch (IOException e) { + //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? + throw new SystemException(e); + } + + try { + PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); + //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. + if(putResponse.errors) { + List recordsToRemove = new ArrayList<>(); + for(Item item : putResponse.items) { + if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { + _logger.warn("Failed to index metric. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); + recordsToRemove.add(msrList.getRecord(item.create._id)); + } + + if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { + _logger.warn("Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); + recordsToRemove.add(msrList.getRecord(item.index._id)); + } + } + if(recordsToRemove.size() != 0) { + _logger.info("{} records were not written to ES", recordsToRemove.size()); + records.removeAll(recordsToRemove); + } + } + //add to bloom filter + _addToBloomFilter(records); + + } catch(IOException e) { + throw new SystemException("Failed to parse reponse of put metrics. The response was: " + strResponse, e); + } + } + + private void _upsertScopeAndMetrics(List records) { + String requestUrl = new StringBuilder().append("/") + .append(SCOPE_AND_METRIC_INDEX_NAME) + .append("/") + .append(SCOPE_AND_METRIC_TYPE_NAME) + .append("/") + .append("_bulk") + .toString(); + + String strResponse = ""; + + ScopeAndMetricOnlySchemaRecordList recordList = new ScopeAndMetricOnlySchemaRecordList(records, _idgenHashAlgo); + + try { + String requestBody = _scopeAndMetricOnlyMapper.writeValueAsString(recordList); + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); + strResponse = extractResponse(response); + } catch (IOException e) { + //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? + throw new SystemException(e); + } + + try { + PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); + //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. + if(putResponse.errors) { + List recordsToRemove = new ArrayList<>(); + for(Item item : putResponse.items) { + if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { + _logger.warn("Failed to index scope. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); + recordsToRemove.add(recordList.getRecord(item.create._id)); + } + + if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { + _logger.warn("Scope Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); + recordsToRemove.add(recordList.getRecord(item.index._id)); + } + } + if(recordsToRemove.size() != 0) { + _logger.info("{} records were not written to ES", recordsToRemove.size()); + records.removeAll(recordsToRemove); + } + } + //add to bloom filter + _addToBloomFilterScopeAndMetricOnly(records); + + } catch(IOException e) { + throw new SystemException("Failed to parse reponse of put scope names. The response was: " + strResponse, e); + } + } + + private void _upsertScopes(List records) { + String requestUrl = new StringBuilder().append("/") + .append(SCOPE_INDEX_NAME) + .append("/") + .append(SCOPE_TYPE_NAME) + .append("/") + .append("_bulk") + .toString(); + + String strResponse = ""; + + ScopeOnlySchemaRecordList scopeOnlySchemaRecordList = new ScopeOnlySchemaRecordList(records, _idgenHashAlgo); + + try { + String requestBody = _scopeOnlyMapper.writeValueAsString(scopeOnlySchemaRecordList); + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); + strResponse = extractResponse(response); + } catch (IOException e) { + //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? + throw new SystemException(e); + } + + try { + PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); + //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. + if(putResponse.errors) { + List recordsToRemove = new ArrayList<>(); + for(Item item : putResponse.items) { + if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { + _logger.warn("Failed to index scope. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); + recordsToRemove.add(scopeOnlySchemaRecordList.getRecord(item.create._id)); + } + + if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { + _logger.warn("Scope Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); + recordsToRemove.add(scopeOnlySchemaRecordList.getRecord(item.index._id)); + } + } + if(recordsToRemove.size() != 0) { + _logger.info("{} records were not written to ES", recordsToRemove.size()); + records.removeAll(recordsToRemove); + } + } + //add to bloom filter + _addToBloomFilterScopeOnly(records); + + } catch(IOException e) { + throw new SystemException("Failed to parse reponse of put scope names. The response was: " + strResponse, e); + } + } + + protected void _addToBloomFilter(List records){ + _logger.info("Adding {} records into bloom filter.", records.size()); + for(MetricSchemaRecord record : records) { + String key = constructKey(record.getScope(), record.getMetric(), record.getTagKey(), record.getTagValue(), record.getNamespace()); + bloomFilter.put(key); + } + } + + protected void _addToBloomFilterScopeAndMetricOnly(List records){ + _logger.info("Adding {} records into scope and metric only bloom filter.", records.size()); + for(ScopeAndMetricOnlySchemaRecord record : records) { + String key = constructScopeAndMetricOnlyKey(record.getScope(), record.getMetric()); + bloomFilterScopeAndMetricOnly.put(key); + } + } + + protected void _addToBloomFilterScopeOnly(List records){ + _logger.info("Adding {} records into scope only bloom filter.", records.size()); + for(ScopeOnlySchemaRecord record : records) { + String key = constructScopeOnlyKey(record.getScope()); + bloomFilterScopeOnly.put(key); + } + } + + private String _constructTermAggregationQuery(MetricSchemaRecordQuery query, RecordType type) { + ObjectMapper mapper = new ObjectMapper(); + ObjectNode queryNode = _constructQueryNode(query, mapper); + + long size = query.getLimit() * query.getPage(); + SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, + "(limit * page) must be greater than 0 and less than Integer.MAX_VALUE"); + + ObjectNode aggsNode = _constructAggsNode(type, Math.max(size, 10000), mapper); + + ObjectNode rootNode = mapper.createObjectNode(); + rootNode.put("query", queryNode); + rootNode.put("size", 0); + rootNode.put("aggs", aggsNode); + + return rootNode.toString(); + } + + private String _constructTermQuery(MetricSchemaRecordQuery query, int from, int size) { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode queryNode = _constructQueryNode(query, mapper); + + ObjectNode rootNode = _mapper.createObjectNode(); + rootNode.put("query", queryNode); + rootNode.put("from", from); + rootNode.put("size", size); + + return rootNode.toString(); + } + + private ObjectNode _constructSimpleQueryStringNode(List tokens, RecordType... types) { + + if(tokens.isEmpty()) { + return null; + } + + ObjectMapper mapper = new ObjectMapper(); + + StringBuilder queryString = new StringBuilder(); + for(String token : tokens) { + queryString.append('+').append(token).append(' '); + } + queryString.replace(queryString.length() - 1, queryString.length(), "*"); + + ObjectNode node = mapper.createObjectNode(); + ArrayNode fieldsNode = mapper.createArrayNode(); + for(RecordType type : types) { + fieldsNode.add(type.getName()); + } + node.put("fields", fieldsNode); + node.put("query", queryString.toString()); + + ObjectNode simpleQueryStringNode = mapper.createObjectNode(); + simpleQueryStringNode.put("simple_query_string", node); + + return simpleQueryStringNode; + } + + private String _constructQueryStringQuery(List tokens, int from, int size) { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode simpleQueryStringNode = _constructSimpleQueryStringNode(tokens, RecordType.values()); + + ObjectNode rootNode = mapper.createObjectNode(); + rootNode.put("query", simpleQueryStringNode); + rootNode.put("from", from); + rootNode.put("size", size); + + return rootNode.toString(); + } + + private String _constructQueryStringQuery(KeywordQuery kq, Map> tokensMap) { + ObjectMapper mapper = new ObjectMapper(); + + ArrayNode filterNodes = mapper.createArrayNode(); + for(Map.Entry> entry : tokensMap.entrySet()) { + ObjectNode simpleQueryStringNode = _constructSimpleQueryStringNode(entry.getValue(), entry.getKey()); + filterNodes.add(simpleQueryStringNode); + } + + ObjectNode boolNode = mapper.createObjectNode(); + boolNode.put("filter", filterNodes); + + ObjectNode queryNode = mapper.createObjectNode(); + queryNode.put("bool", boolNode); + + ObjectNode rootNode = mapper.createObjectNode(); + rootNode.put("query", queryNode); + rootNode.put("size", 0); + + long size = kq.getLimit() * kq.getPage(); + SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, + "(limit * page) must be greater than 0 and less than Integer.MAX_VALUE"); + rootNode.put("aggs", _constructAggsNode(kq.getType(), Math.max(size, 10000), mapper)); + + return rootNode.toString(); + + } + + private ObjectNode _constructQueryNode(MetricSchemaRecordQuery query, ObjectMapper mapper) { + ArrayNode filterNodes = mapper.createArrayNode(); + if(SchemaService.containsFilter(query.getMetric())) { + ObjectNode node = mapper.createObjectNode(); + ObjectNode regexpNode = mapper.createObjectNode(); + regexpNode.put(RecordType.METRIC.getName() + ".raw", SchemaService.convertToRegex(query.getMetric())); + node.put("regexp", regexpNode); + filterNodes.add(node); + } + + if(SchemaService.containsFilter(query.getScope())) { + ObjectNode node = mapper.createObjectNode(); + ObjectNode regexpNode = mapper.createObjectNode(); + regexpNode.put(RecordType.SCOPE.getName() + ".raw", SchemaService.convertToRegex(query.getScope())); + node.put("regexp", regexpNode); + filterNodes.add(node); + } + + if(SchemaService.containsFilter(query.getTagKey())) { + ObjectNode node = mapper.createObjectNode(); + ObjectNode regexpNode = mapper.createObjectNode(); + regexpNode.put(RecordType.TAGK.getName() + ".raw", SchemaService.convertToRegex(query.getTagKey())); + node.put("regexp", regexpNode); + filterNodes.add(node); + } + + if(SchemaService.containsFilter(query.getTagValue())) { + ObjectNode node = mapper.createObjectNode(); + ObjectNode regexpNode = mapper.createObjectNode(); + regexpNode.put(RecordType.TAGV.getName() + ".raw", SchemaService.convertToRegex(query.getTagValue())); + node.put("regexp", regexpNode); + filterNodes.add(node); + } + + if(SchemaService.containsFilter(query.getNamespace())) { + ObjectNode node = mapper.createObjectNode(); + ObjectNode regexpNode = mapper.createObjectNode(); + regexpNode.put(RecordType.NAMESPACE.getName() + ".raw", SchemaService.convertToRegex(query.getNamespace())); + node.put("regexp", regexpNode); + filterNodes.add(node); + } + + ObjectNode boolNode = mapper.createObjectNode(); + boolNode.put("filter", filterNodes); + + ObjectNode queryNode = mapper.createObjectNode(); + queryNode.put("bool", boolNode); + return queryNode; + } + + private ObjectNode _constructAggsNode(RecordType type, long limit, ObjectMapper mapper) { + + ObjectNode termsNode = mapper.createObjectNode(); + termsNode.put("field", type.getName() + ".raw"); + termsNode.put("order", mapper.createObjectNode().put("_term", "asc")); + termsNode.put("size", limit); + termsNode.put("execution_hint", "map"); + + ObjectNode distinctValuesNode = mapper.createObjectNode(); + distinctValuesNode.put("terms", termsNode); + + ObjectNode aggsNode = mapper.createObjectNode(); + aggsNode.put("distinct_values", distinctValuesNode); + return aggsNode; + } + + + /* Helper method to convert JSON String representation to the corresponding Java entity. */ + private T toEntity(String content, TypeReference type) { + try { + return _mapper.readValue(content, type); + } catch (IOException ex) { + throw new SystemException(ex); + } + } + + /* Method to change the rest client. Used for testing. */ + protected void setRestClient(RestClient restClient) + { + this._esRestClient = restClient; + } + + /** Helper to process the response. + * Throws a SystemException when the http status code is outsdie of the range 200 - 300. + * @param response ES response + * @return Stringified response + */ + protected String extractResponse(Response response) { + requireArgument(response != null, "HttpResponse object cannot be null."); + + int status = response.getStatusLine().getStatusCode(); + String strResponse = extractStringResponse(response); + + if ((status < HttpStatus.SC_OK) || (status >= HttpStatus.SC_MULTIPLE_CHOICES)) { + throw new SystemException("Status code: " + status + " . Error occurred. " + strResponse); + } else { + return strResponse; + } + } + + private String extractStringResponse(Response content) { + requireArgument(content != null, "Response content is null."); + + String result; + HttpEntity entity = null; + + try { + entity = content.getEntity(); + if (entity == null) { + result = ""; + } else { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + entity.writeTo(baos); + result = baos.toString("UTF-8"); + } + return result; + } catch (IOException ex) { + throw new SystemException(ex); + } finally { + if (entity != null) { + try { + EntityUtils.consume(entity); + } catch (IOException ex) { + _logger.warn("Failed to close entity stream.", ex); + } + } + } + } + + private ObjectMapper _createObjectMapper() { + ObjectMapper mapper = new ObjectMapper(); + + mapper.setSerializationInclusion(Include.NON_NULL); + SimpleModule module = new SimpleModule(); + module.addSerializer(MetricSchemaRecordList.class, new MetricSchemaRecordList.Serializer()); + module.addDeserializer(MetricSchemaRecordList.class, new MetricSchemaRecordList.Deserializer()); + module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); + mapper.registerModule(module); + + return mapper; + } + + private ObjectMapper _createScopeAndMetricOnlyObjectMapper() { + ObjectMapper mapper = new ObjectMapper(); + + mapper.setSerializationInclusion(Include.NON_NULL); + SimpleModule module = new SimpleModule(); + module.addSerializer(ScopeAndMetricOnlySchemaRecordList.class, new ScopeAndMetricOnlySchemaRecordList.Serializer()); + module.addDeserializer(ScopeAndMetricOnlySchemaRecordList.class, new ScopeAndMetricOnlySchemaRecordList.Deserializer()); + module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); + mapper.registerModule(module); + + return mapper; + } + + private ObjectMapper _createScopeOnlyObjectMapper() { + ObjectMapper mapper = new ObjectMapper(); + + mapper.setSerializationInclusion(Include.NON_NULL); + SimpleModule module = new SimpleModule(); + module.addSerializer(ScopeOnlySchemaRecordList.class, new ScopeOnlySchemaRecordList.Serializer()); + module.addDeserializer(ScopeOnlySchemaRecordList.class, new ScopeOnlySchemaRecordList.Deserializer()); + module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); + mapper.registerModule(module); + + return mapper; + } + + private ObjectNode _createSettingsNode(int replicationFactor, int numShards) { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode metadataAnalyzer = mapper.createObjectNode(); + metadataAnalyzer.put("tokenizer", "metadata_tokenizer"); + metadataAnalyzer.put("filter", mapper.createArrayNode().add("lowercase")); + + ObjectNode analyzerNode = mapper.createObjectNode(); + analyzerNode.put("metadata_analyzer", metadataAnalyzer); + + ObjectNode tokenizerNode = mapper.createObjectNode(); + tokenizerNode.put("metadata_tokenizer", mapper.createObjectNode().put("type", "pattern").put("pattern", "([^\\p{L}\\d]+)|(?<=[\\p{L}&&[^\\p{Lu}]])(?=\\p{Lu})|(?<=\\p{Lu})(?=\\p{Lu}[\\p{L}&&[^\\p{Lu}]])")); + + ObjectNode analysisNode = mapper.createObjectNode(); + analysisNode.put("analyzer", analyzerNode); + analysisNode.put("tokenizer", tokenizerNode); + + ObjectNode indexNode = mapper.createObjectNode(); + indexNode.put("max_result_window", INDEX_MAX_RESULT_WINDOW); + indexNode.put("number_of_replicas", replicationFactor); + indexNode.put("number_of_shards", numShards); + + ObjectNode settingsNode = mapper.createObjectNode(); + settingsNode.put("analysis", analysisNode); + settingsNode.put("index", indexNode); + + return settingsNode; + } + + private ObjectNode _createMappingsNode() { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode propertiesNode = mapper.createObjectNode(); + propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.METRIC.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.TAGK.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.TAGV.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.NAMESPACE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + + propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + + ObjectNode typeNode = mapper.createObjectNode(); + typeNode.put("properties", propertiesNode); + + ObjectNode mappingsNode = mapper.createObjectNode(); + mappingsNode.put(TYPE_NAME, typeNode); + return mappingsNode; + } + + private ObjectNode _createScopeAndMetricMappingsNode() { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode propertiesNode = mapper.createObjectNode(); + propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.METRIC.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + + propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + propertiesNode.put("cts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); - ObjectNode typeNode = mapper.createObjectNode(); - typeNode.put("properties", propertiesNode); - - ObjectNode mappingsNode = mapper.createObjectNode(); - mappingsNode.put(SCOPE_AND_METRIC_TYPE_NAME, typeNode); + ObjectNode typeNode = mapper.createObjectNode(); + typeNode.put("properties", propertiesNode); + + ObjectNode mappingsNode = mapper.createObjectNode(); + mappingsNode.put(SCOPE_AND_METRIC_TYPE_NAME, typeNode); - return mappingsNode; - } + return mappingsNode; + } - private ObjectNode _createScopeMappingsNode() { - ObjectMapper mapper = new ObjectMapper(); + private ObjectNode _createScopeMappingsNode() { + ObjectMapper mapper = new ObjectMapper(); - ObjectNode propertiesNode = mapper.createObjectNode(); - propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - - propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); - propertiesNode.put("cts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); - - ObjectNode typeNode = mapper.createObjectNode(); - typeNode.put("properties", propertiesNode); - - ObjectNode mappingsNode = mapper.createObjectNode(); - mappingsNode.put(SCOPE_TYPE_NAME, typeNode); - - return mappingsNode; - } - - private ObjectNode _createFieldNode(String type) { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode fieldNode = mapper.createObjectNode(); - fieldNode.put("type", type); - fieldNode.put("analyzer", "metadata_analyzer"); - ObjectNode keywordNode = mapper.createObjectNode(); - keywordNode.put("type", "keyword"); - ObjectNode fieldsNode = mapper.createObjectNode(); - fieldsNode.put("raw", keywordNode); - fieldNode.put("fields", fieldsNode); - return fieldNode; - } - - private ObjectNode _createFieldNodeNoAnalyzer(String type) { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode fieldNode = mapper.createObjectNode(); - fieldNode.put("type", type); - return fieldNode; - } - - private void _createIndexIfNotExists(String indexName, int replicationFactor, int numShards, - Supplier createMappingsNode) { - try { - Response response = _esRestClient.performRequest(HttpMethod.HEAD.getName(), "/" + indexName); - boolean indexExists = response.getStatusLine().getStatusCode() == HttpStatus.SC_OK ? true : false; - - if(!indexExists) { - _logger.info("Index [" + indexName + "] does not exist. Will create one."); - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode rootNode = mapper.createObjectNode(); - rootNode.put("settings", _createSettingsNode(replicationFactor, numShards)); - rootNode.put("mappings", createMappingsNode.get()); - - String settingsAndMappingsJson = rootNode.toString(); - String requestUrl = new StringBuilder().append("/").append(indexName).toString(); - - response = _esRestClient.performRequest(HttpMethod.PUT.getName(), requestUrl, Collections.emptyMap(), new StringEntity(settingsAndMappingsJson)); - extractResponse(response); - } - } catch (Exception e) { - _logger.error("Failed to check/create {} index. ElasticSearchSchemaService may not function. {}", - indexName, e); - } - } - - /** - * Enumeration of supported HTTP methods. - * - * @author Bhinav Sura (bhinav.sura@salesforce.com) - */ - private enum HttpMethod { - - /** POST operation. */ - POST("POST"), - /** PUT operation. */ - PUT("PUT"), - /** HEAD operation. */ - HEAD("HEAD"); - - private String name; - - HttpMethod(String name) { - this.setName(name); - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - } - - - /** - * The set of implementation specific configuration properties. - * - * @author Bhinav Sura (bhinav.sura@salesforce.com) - */ - public enum Property { - - ELASTICSEARCH_ENDPOINT("service.property.schema.elasticsearch.endpoint", "http://localhost:9200,http://localhost:9201"), - /** Connection timeout for ES REST client. */ - ELASTICSEARCH_ENDPOINT_CONNECTION_TIMEOUT("service.property.schema.elasticsearch.endpoint.connection.timeout", "10000"), - /** Socket connection timeout for ES REST client. */ - ELASTICSEARCH_ENDPOINT_SOCKET_TIMEOUT("service.property.schema.elasticsearch.endpoint.socket.timeout", "10000"), - /** Connection count for ES REST client. */ - ELASTICSEARCH_CONNECTION_COUNT("service.property.schema.elasticsearch.connection.count", "10"), - /** Replication factor for metadata_index. */ - ELASTICSEARCH_NUM_REPLICAS("service.property.schema.elasticsearch.num.replicas", "1"), - /** Shard count for metadata_index. */ - ELASTICSEARCH_SHARDS_COUNT("service.property.schema.elasticsearch.shards.count", "10"), - /** Replication factor for scopenames */ - ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.num.replicas.for.scope.index", "1"), - /** Shard count for scopenames */ - ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.shards.count.for.scope.index", "6"), - /** The no. of records to batch for bulk indexing requests. - * https://www.elastic.co/guide/en/elasticsearch/guide/current/indexing-performance.html#_using_and_sizing_bulk_requests - */ - ELASTICSEARCH_INDEXING_BATCH_SIZE("service.property.schema.elasticsearch.indexing.batch.size", "10000"), - /** The hashing algorithm to use for generating document id. */ - ELASTICSEARCH_IDGEN_HASH_ALGO("service.property.schema.elasticsearch.idgen.hash.algo", "MD5"), - - /** Name of scope only index */ - ELASTICSEARCH_SCOPE_INDEX_NAME("service.property.schema.elasticsearch.scope.index.name", "scopenames"), - /** Type within scope only index */ - ELASTICSEARCH_SCOPE_TYPE_NAME("service.property.schema.elasticsearch.scope.type.name", "scope_type"), - - /** Replication factor for scope and metric names */ - ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.num.replicas.for.scopeandmetric.index", "1"), - /** Shard count for scope and metric names */ - ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.shards.count.for.scopeandmetric.index", "6"), - - /** Name of scope and metric only index */ - ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME("service.property.schema.elasticsearch.scopeandmetric.index.name", "scopemetricnames"), - /** Type within scope and metric only index */ - ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME("service.property.schema.elasticsearch.scopeandmetric.type.name", "scopemetric_type"); - - private final String _name; - private final String _defaultValue; - - private Property(String name, String defaultValue) { - _name = name; - _defaultValue = defaultValue; - } - - /** - * Returns the property name. - * - * @return The property name. - */ - public String getName() { - return _name; - } - - /** - * Returns the default value for the property. - * - * @return The default value. - */ - public String getDefaultValue() { - return _defaultValue; - } - } - - static class PutResponse { - private int took; - private boolean errors; - private List items; - - public PutResponse() {} - - public int getTook() { - return took; - } - - public void setTook(int took) { - this.took = took; - } - - public boolean isErrors() { - return errors; - } - - public void setErrors(boolean errors) { - this.errors = errors; - } - - public List getItems() { - return items; - } - - public void setItems(List items) { - this.items = items; - } - - @JsonIgnoreProperties(ignoreUnknown = true) - static class Item { - private CreateItem create; - private CreateItem index; - - public Item() {} - - public CreateItem getCreate() { - return create; - } - - public void setCreate(CreateItem create) { - this.create = create; - } - - public CreateItem getIndex() { - return index; - } - - public void setIndex(CreateItem index) { - this.index = index; - } - } - - @JsonIgnoreProperties(ignoreUnknown = true) - static class CreateItem { - private String _index; - private String _type; - private String _id; - private int status; - private Error error; - - public CreateItem() {} - - public String get_index() { - return _index; - } - - public void set_index(String _index) { - this._index = _index; - } - - public String get_type() { - return _type; - } - - public void set_type(String _type) { - this._type = _type; - } - - public String get_id() { - return _id; - } - - public void set_id(String _id) { - this._id = _id; - } - - public int getStatus() { - return status; - } - - public void setStatus(int status) { - this.status = status; - } - - public Error getError() { - return error; - } - - public void setError(Error error) { - this.error = error; - } - } - - @JsonIgnoreProperties(ignoreUnknown = true) - static class Error { - private String type; - private String reason; - - public Error() {} - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public String getReason() { - return reason; - } - - public void setReason(String reason) { - this.reason = reason; - } - } - } + ObjectNode propertiesNode = mapper.createObjectNode(); + propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + + propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + propertiesNode.put("cts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + + ObjectNode typeNode = mapper.createObjectNode(); + typeNode.put("properties", propertiesNode); + + ObjectNode mappingsNode = mapper.createObjectNode(); + mappingsNode.put(SCOPE_TYPE_NAME, typeNode); + + return mappingsNode; + } + + private ObjectNode _createFieldNode(String type) { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode fieldNode = mapper.createObjectNode(); + fieldNode.put("type", type); + fieldNode.put("analyzer", "metadata_analyzer"); + ObjectNode keywordNode = mapper.createObjectNode(); + keywordNode.put("type", "keyword"); + ObjectNode fieldsNode = mapper.createObjectNode(); + fieldsNode.put("raw", keywordNode); + fieldNode.put("fields", fieldsNode); + return fieldNode; + } + + private ObjectNode _createFieldNodeNoAnalyzer(String type) { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode fieldNode = mapper.createObjectNode(); + fieldNode.put("type", type); + return fieldNode; + } + + private void _createIndexIfNotExists(String indexName, int replicationFactor, int numShards, + Supplier createMappingsNode) { + try { + Response response = _esRestClient.performRequest(HttpMethod.HEAD.getName(), "/" + indexName); + boolean indexExists = response.getStatusLine().getStatusCode() == HttpStatus.SC_OK ? true : false; + + if(!indexExists) { + _logger.info("Index [" + indexName + "] does not exist. Will create one."); + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode rootNode = mapper.createObjectNode(); + rootNode.put("settings", _createSettingsNode(replicationFactor, numShards)); + rootNode.put("mappings", createMappingsNode.get()); + + String settingsAndMappingsJson = rootNode.toString(); + String requestUrl = new StringBuilder().append("/").append(indexName).toString(); + + response = _esRestClient.performRequest(HttpMethod.PUT.getName(), requestUrl, Collections.emptyMap(), new StringEntity(settingsAndMappingsJson)); + extractResponse(response); + } + } catch (Exception e) { + _logger.error("Failed to check/create {} index. ElasticSearchSchemaService may not function. {}", + indexName, e); + } + } + + /** + * Enumeration of supported HTTP methods. + * + * @author Bhinav Sura (bhinav.sura@salesforce.com) + */ + private enum HttpMethod { + + /** POST operation. */ + POST("POST"), + /** PUT operation. */ + PUT("PUT"), + /** HEAD operation. */ + HEAD("HEAD"); + + private String name; + + HttpMethod(String name) { + this.setName(name); + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + + /** + * The set of implementation specific configuration properties. + * + * @author Bhinav Sura (bhinav.sura@salesforce.com) + */ + public enum Property { + + ELASTICSEARCH_ENDPOINT("service.property.schema.elasticsearch.endpoint", "http://localhost:9200,http://localhost:9201"), + /** Connection timeout for ES REST client. */ + ELASTICSEARCH_ENDPOINT_CONNECTION_TIMEOUT("service.property.schema.elasticsearch.endpoint.connection.timeout", "10000"), + /** Socket connection timeout for ES REST client. */ + ELASTICSEARCH_ENDPOINT_SOCKET_TIMEOUT("service.property.schema.elasticsearch.endpoint.socket.timeout", "10000"), + /** Connection count for ES REST client. */ + ELASTICSEARCH_CONNECTION_COUNT("service.property.schema.elasticsearch.connection.count", "10"), + /** Replication factor for metadata_index. */ + ELASTICSEARCH_NUM_REPLICAS("service.property.schema.elasticsearch.num.replicas", "1"), + /** Shard count for metadata_index. */ + ELASTICSEARCH_SHARDS_COUNT("service.property.schema.elasticsearch.shards.count", "10"), + /** Replication factor for scopenames */ + ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.num.replicas.for.scope.index", "1"), + /** Shard count for scopenames */ + ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.shards.count.for.scope.index", "6"), + /** The no. of records to batch for bulk indexing requests. + * https://www.elastic.co/guide/en/elasticsearch/guide/current/indexing-performance.html#_using_and_sizing_bulk_requests + */ + ELASTICSEARCH_INDEXING_BATCH_SIZE("service.property.schema.elasticsearch.indexing.batch.size", "10000"), + /** The hashing algorithm to use for generating document id. */ + ELASTICSEARCH_IDGEN_HASH_ALGO("service.property.schema.elasticsearch.idgen.hash.algo", "MD5"), + + /** Name of scope only index */ + ELASTICSEARCH_SCOPE_INDEX_NAME("service.property.schema.elasticsearch.scope.index.name", "scopenames"), + /** Type within scope only index */ + ELASTICSEARCH_SCOPE_TYPE_NAME("service.property.schema.elasticsearch.scope.type.name", "scope_type"), + + /** Replication factor for scope and metric names */ + ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.num.replicas.for.scopeandmetric.index", "1"), + /** Shard count for scope and metric names */ + ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.shards.count.for.scopeandmetric.index", "6"), + + /** Name of scope and metric only index */ + ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME("service.property.schema.elasticsearch.scopeandmetric.index.name", "scopemetricnames"), + /** Type within scope and metric only index */ + ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME("service.property.schema.elasticsearch.scopeandmetric.type.name", "scopemetric_type"); + + private final String _name; + private final String _defaultValue; + + private Property(String name, String defaultValue) { + _name = name; + _defaultValue = defaultValue; + } + + /** + * Returns the property name. + * + * @return The property name. + */ + public String getName() { + return _name; + } + + /** + * Returns the default value for the property. + * + * @return The default value. + */ + public String getDefaultValue() { + return _defaultValue; + } + } + + static class PutResponse { + private int took; + private boolean errors; + private List items; + + public PutResponse() {} + + public int getTook() { + return took; + } + + public void setTook(int took) { + this.took = took; + } + + public boolean isErrors() { + return errors; + } + + public void setErrors(boolean errors) { + this.errors = errors; + } + + public List getItems() { + return items; + } + + public void setItems(List items) { + this.items = items; + } + + @JsonIgnoreProperties(ignoreUnknown = true) + static class Item { + private CreateItem create; + private CreateItem index; + + public Item() {} + + public CreateItem getCreate() { + return create; + } + + public void setCreate(CreateItem create) { + this.create = create; + } + + public CreateItem getIndex() { + return index; + } + + public void setIndex(CreateItem index) { + this.index = index; + } + } + + @JsonIgnoreProperties(ignoreUnknown = true) + static class CreateItem { + private String _index; + private String _type; + private String _id; + private int status; + private Error error; + + public CreateItem() {} + + public String get_index() { + return _index; + } + + public void set_index(String _index) { + this._index = _index; + } + + public String get_type() { + return _type; + } + + public void set_type(String _type) { + this._type = _type; + } + + public String get_id() { + return _id; + } + + public void set_id(String _id) { + this._id = _id; + } + + public int getStatus() { + return status; + } + + public void setStatus(int status) { + this.status = status; + } + + public Error getError() { + return error; + } + + public void setError(Error error) { + this.error = error; + } + } + + @JsonIgnoreProperties(ignoreUnknown = true) + static class Error { + private String type; + private String reason; + + public Error() {} + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getReason() { + return reason; + } + + public void setReason(String reason) { + this.reason = reason; + } + } + } } diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java index 920bc72a8..df7ed2a3c 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java @@ -35,166 +35,166 @@ */ public class AbstractSchemaServiceTest extends AbstractTest { - private int scopesCount = 0; - private int scopeAndMetricsCount = 0; - private int metricsCount = 0; + private int scopesCount = 0; + private int scopeAndMetricsCount = 0; + private int metricsCount = 0; - @Test - public void testPutEverythingCached() { - List metrics = createRandomMetrics("test-scope", "test-metric", 10); + @Test + public void testPutEverythingCached() { + List metrics = createRandomMetrics("test-scope", "test-metric", 10); - metrics.addAll(createRandomMetrics(null, null, 10)); + metrics.addAll(createRandomMetrics(null, null, 10)); - ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - ElasticSearchSchemaService spyService = _initializeSpyService(service); + ElasticSearchSchemaService spyService = _initializeSpyService(service); - spyService.put(metrics); + spyService.put(metrics); - Set scopeNames = new HashSet<>(); - Set> scopeAndMetricNames = new HashSet<>(); + Set scopeNames = new HashSet<>(); + Set> scopeAndMetricNames = new HashSet<>(); - for(Metric m : metrics) - { - scopeNames.add(m.getScope()); - scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); - } + for(Metric m : metrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } - assertEquals(metricsCount, metrics.size()); - assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); - assertEquals(scopesCount, scopeNames.size()); + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); - // add to bloom filter cache - spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); - spyService._addToBloomFilterScopeAndMetricOnly(spyService._fractureScopeAndMetrics(scopeAndMetricNames).get(0)); - spyService._addToBloomFilterScopeOnly(spyService._fractureScopes(scopeNames).get(0)); + // add to bloom filter cache + spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); + spyService._addToBloomFilterScopeAndMetricOnly(spyService._fractureScopeAndMetrics(scopeAndMetricNames).get(0)); + spyService._addToBloomFilterScopeOnly(spyService._fractureScopes(scopeNames).get(0)); - spyService.put(metrics); - // count should be same since we are re-reading cached value + spyService.put(metrics); + // count should be same since we are re-reading cached value - assertEquals(metricsCount, metrics.size()); - assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); - assertEquals(scopesCount, scopeNames.size()); - } + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); + } - @Test - public void testPutPartialCached() { - List metrics = createRandomMetrics("test-scope", "test-metric", 10); + @Test + public void testPutPartialCached() { + List metrics = createRandomMetrics("test-scope", "test-metric", 10); - ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - ElasticSearchSchemaService spyService = _initializeSpyService(service); + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); + ElasticSearchSchemaService spyService = _initializeSpyService(service); - spyService.put(metrics); + spyService.put(metrics); - Set scopeNames = new HashSet<>(); - Set> scopeAndMetricNames = new HashSet<>(); + Set scopeNames = new HashSet<>(); + Set> scopeAndMetricNames = new HashSet<>(); - for(Metric m : metrics) - { - scopeNames.add(m.getScope()); - scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); - } + for(Metric m : metrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } - assertEquals(metricsCount, metrics.size()); - assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); - assertEquals(scopesCount, scopeNames.size()); + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); - // add to bloom filter cache - spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); - spyService._addToBloomFilterScopeAndMetricOnly(spyService._fractureScopeAndMetrics(scopeAndMetricNames).get(0)); - spyService._addToBloomFilterScopeOnly(spyService._fractureScopes(scopeNames).get(0)); + // add to bloom filter cache + spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); + spyService._addToBloomFilterScopeAndMetricOnly(spyService._fractureScopeAndMetrics(scopeAndMetricNames).get(0)); + spyService._addToBloomFilterScopeOnly(spyService._fractureScopes(scopeNames).get(0)); - List newMetrics = createRandomMetrics(null, null, 10); + List newMetrics = createRandomMetrics(null, null, 10); - // 1st metric already in cache (partial case scenario), and now we call put with both list of metrics + // 1st metric already in cache (partial case scenario), and now we call put with both list of metrics - initCounters(); - spyService.put(metrics); - spyService.put(newMetrics); + initCounters(); + spyService.put(metrics); + spyService.put(newMetrics); - scopeNames.clear(); - scopeAndMetricNames.clear(); + scopeNames.clear(); + scopeAndMetricNames.clear(); - for(Metric m : newMetrics) - { - scopeNames.add(m.getScope()); - scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); - } + for(Metric m : newMetrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } - assertEquals(metricsCount, newMetrics.size()); - assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); - assertEquals(scopesCount, scopeNames.size()); - } - - @Test - public void testPutNothingCached() { - List metrics = createRandomMetrics("test-scope", "test-metric", 10); + assertEquals(metricsCount, newMetrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); + } - metrics.addAll(createRandomMetrics(null, null, 10)); + @Test + public void testPutNothingCached() { + List metrics = createRandomMetrics("test-scope", "test-metric", 10); - ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - ElasticSearchSchemaService spyService = _initializeSpyService(service); + metrics.addAll(createRandomMetrics(null, null, 10)); - spyService.put(metrics); + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); + ElasticSearchSchemaService spyService = _initializeSpyService(service); - Set scopeNames = new HashSet<>(); - Set> scopeAndMetricNames = new HashSet<>(); + spyService.put(metrics); - for(Metric m : metrics) - { - scopeNames.add(m.getScope()); - scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); - } - - assertEquals(metricsCount, metrics.size()); - assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); - assertEquals(scopesCount, scopeNames.size()); - - spyService.put(metrics); - - assertEquals(metricsCount, 2 * metrics.size()); - assertEquals(scopeAndMetricsCount, 2 * scopeAndMetricNames.size()); - assertEquals(scopesCount, 2 * scopeNames.size()); - } - - private ElasticSearchSchemaService _initializeSpyService(ElasticSearchSchemaService service) { - ElasticSearchSchemaService spyService = Mockito.spy(service); - initCounters(); - - Mockito.doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - @SuppressWarnings("unchecked") - List metrics = List.class.cast(invocation.getArguments()[0]); - - Set scopeNames = Set.class.cast(invocation.getArguments()[1]); - - Set> scopeAndMetricNames = Set.class.cast(invocation.getArguments()[2]); - - scopesCount += scopeNames.size(); - scopeAndMetricsCount += scopeAndMetricNames.size(); - metricsCount += metrics.size(); - - return null; - } - }).when(spyService).implementationSpecificPut(Mockito.any(), Mockito.any(), Mockito.any()); - return spyService; - } - - private void initCounters() { - scopesCount = 0; - scopeAndMetricsCount = 0; - metricsCount = 0; - } - - @Test - public void getNumHoursUntilNextFlushBloomFilter() { - ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - - Calendar calendar = Calendar.getInstance(); - - // Will wait 24 hours before next flush if at same hour boundary - int hour = calendar.get(Calendar.HOUR_OF_DAY); - assertTrue(service.getNumHoursUntilTargetHour(hour) == 24); - } + Set scopeNames = new HashSet<>(); + Set> scopeAndMetricNames = new HashSet<>(); + + for(Metric m : metrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } + + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); + + spyService.put(metrics); + + assertEquals(metricsCount, 2 * metrics.size()); + assertEquals(scopeAndMetricsCount, 2 * scopeAndMetricNames.size()); + assertEquals(scopesCount, 2 * scopeNames.size()); + } + + private ElasticSearchSchemaService _initializeSpyService(ElasticSearchSchemaService service) { + ElasticSearchSchemaService spyService = Mockito.spy(service); + initCounters(); + + Mockito.doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + @SuppressWarnings("unchecked") + List metrics = List.class.cast(invocation.getArguments()[0]); + + Set scopeNames = Set.class.cast(invocation.getArguments()[1]); + + Set> scopeAndMetricNames = Set.class.cast(invocation.getArguments()[2]); + + scopesCount += scopeNames.size(); + scopeAndMetricsCount += scopeAndMetricNames.size(); + metricsCount += metrics.size(); + + return null; + } + }).when(spyService).implementationSpecificPut(Mockito.any(), Mockito.any(), Mockito.any()); + return spyService; + } + + private void initCounters() { + scopesCount = 0; + scopeAndMetricsCount = 0; + metricsCount = 0; + } + + @Test + public void getNumHoursUntilNextFlushBloomFilter() { + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); + + Calendar calendar = Calendar.getInstance(); + + // Will wait 24 hours before next flush if at same hour boundary + int hour = calendar.get(Calendar.HOUR_OF_DAY); + assertTrue(service.getNumHoursUntilTargetHour(hour) == 24); + } } diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java index e12cffe90..fd868ca52 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java @@ -340,7 +340,7 @@ private String convertToPrettyJson(String jsonString) { Gson gson = new GsonBuilder().setPrettyPrinting().create(); JsonElement el = parser.parse(jsonString); - return gson.toJson(el); // done + return gson.toJson(el); } private ElasticSearchSchemaService _initializeSpyService(ElasticSearchSchemaService service, String reply) { From 1679547f19a6abb08f4e3b49cb27eee63375a27a Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Tue, 24 Jul 2018 17:04:16 -0700 Subject: [PATCH 18/27] Undo Tab-Space Fix --- .../service/schema/AbstractSchemaService.java | 750 ++--- .../schema/ElasticSearchSchemaService.java | 2882 ++++++++--------- .../schema/AbstractSchemaServiceTest.java | 238 +- 3 files changed, 1935 insertions(+), 1935 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java index 8f78303af..d8b5067d7 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java @@ -37,379 +37,379 @@ * @author Dilip Devaraj (ddevaraj@salesforce.com) */ public abstract class AbstractSchemaService extends DefaultService implements SchemaService { - private static final long POLL_INTERVAL_MS = 10 * 60 * 1000L; - private static final int DAY_IN_SECONDS = 24 * 60 * 60; - private static final int HOUR_IN_SECONDS = 60 * 60; - - /* Have three separate bloom filters one for metrics schema, one only for scope names schema and one only for scope name and metric name schema. - * Since scopes will continue to repeat more often on subsequent kafka batch reads, we can easily check this from the bloom filter for scopes only. - * Hence we can avoid the extra call to populate scopenames index on ES in subsequent Kafka reads. - * The same logic applies to scope name and metric name schema. - */ - protected static BloomFilter bloomFilter; - protected static BloomFilter bloomFilterScopeOnly; - protected static BloomFilter bloomFilterScopeAndMetricOnly; - private Random rand = new Random(); - private int randomNumber = rand.nextInt(); - private int bloomFilterExpectedNumberInsertions; - private double bloomFilterErrorRate; - private int bloomFilterScopeOnlyExpectedNumberInsertions; - private double bloomFilterScopeOnlyErrorRate; - private int bloomFilterScopeAndMetricOnlyExpectedNumberInsertions; - private double bloomFilterScopeAndMetricOnlyErrorRate; - private final Logger _logger = LoggerFactory.getLogger(getClass()); - private final Thread _bloomFilterMonitorThread; - protected final boolean _syncPut; - private int bloomFilterFlushHourToStartAt; - private ScheduledExecutorService scheduledExecutorService; - - protected AbstractSchemaService(SystemConfiguration config) { - super(config); - - bloomFilterExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS.getName(), - Property.BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); - bloomFilterErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_ERROR_RATE.getName(), - Property.BLOOMFILTER_ERROR_RATE.getDefaultValue())); - - bloomFilterScopeOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), - Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); - bloomFilterScopeOnlyErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_ERROR_RATE.getName(), - Property.BLOOMFILTER_SCOPE_ONLY_ERROR_RATE.getDefaultValue())); - - bloomFilterScopeAndMetricOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), - Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); - bloomFilterScopeAndMetricOnlyErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE.getName(), - Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE.getDefaultValue())); - - bloomFilter = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterExpectedNumberInsertions , bloomFilterErrorRate); - - bloomFilterScopeOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterScopeOnlyExpectedNumberInsertions , bloomFilterScopeOnlyErrorRate); - bloomFilterScopeAndMetricOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), - bloomFilterScopeAndMetricOnlyExpectedNumberInsertions , bloomFilterScopeAndMetricOnlyErrorRate); - - _syncPut = Boolean.parseBoolean( - config.getValue(Property.SYNC_PUT.getName(), Property.SYNC_PUT.getDefaultValue())); - - _bloomFilterMonitorThread = new Thread(new BloomFilterMonitorThread(), "bloom-filter-monitor"); - _bloomFilterMonitorThread.start(); - - bloomFilterFlushHourToStartAt = Integer.parseInt(config.getValue(Property.BLOOM_FILTER_FLUSH_HOUR_TO_START_AT.getName(), - Property.BLOOM_FILTER_FLUSH_HOUR_TO_START_AT.getDefaultValue())); - createScheduledExecutorService(bloomFilterFlushHourToStartAt); - } - - @Override - public void put(Metric metric) { - requireNotDisposed(); - SystemAssert.requireArgument(metric != null, "Metric cannot be null."); - put(Arrays.asList(metric)); - } - - @Override - public void put(List metrics) { - requireNotDisposed(); - SystemAssert.requireArgument(metrics != null, "Metric list cannot be null."); - - // Create a list of metricsToPut that do not exist on the BLOOMFILTER and then call implementation - // specific put with only those subset of metricsToPut. - List metricsToPut = new ArrayList<>(metrics.size()); - Set scopesToPut = new HashSet<>(metrics.size()); - - Set> scopesAndMetricsNamesToPut = new HashSet<>(metrics.size()); - - for(Metric metric : metrics) { - // check metric schema bloom filter - if(metric.getTags().isEmpty()) { - // if metric does not have tags - String key = constructKey(metric, null); - boolean found = bloomFilter.mightContain(key); - if(!found) { - metricsToPut.add(metric); - } - } else { - // if metric has tags - boolean newTags = false; - for(Entry tagEntry : metric.getTags().entrySet()) { - String key = constructKey(metric, tagEntry); - boolean found = bloomFilter.mightContain(key); - if(!found) { - newTags = true; - } - } - - if(newTags) { - metricsToPut.add(metric); - } - } - - String scopeName = metric.getScope(); - String metricName = metric.getMetric(); - - // Check scope only bloom filter - String key = constructScopeOnlyKey(scopeName); - boolean found = bloomFilterScopeOnly.mightContain(key); - if(!found) { - scopesToPut.add(scopeName); - } - - // Check scope and metric only bloom filter - key = constructScopeAndMetricOnlyKey(scopeName, metricName); - found = bloomFilterScopeAndMetricOnly.mightContain(key); - if(!found) { - scopesAndMetricsNamesToPut.add(Pair.of(scopeName, metricName)); - } - } - - implementationSpecificPut(metricsToPut, scopesToPut, scopesAndMetricsNamesToPut); - } - - /* - * Calls the implementation specific write for indexing the records - * - * @param metrics The metrics metadata that will be written to a separate index. - * @param scopeNames The scope names that will be written to a separate index. - * @param scopesAndMetricNames The scope and metric names that will be written to a separate index. - */ - protected abstract void implementationSpecificPut(List metrics, Set scopeNames, - Set> scopesAndMetricNames); - - @Override - public void dispose() { - requireNotDisposed(); - if (_bloomFilterMonitorThread != null && _bloomFilterMonitorThread.isAlive()) { - _logger.info("Stopping bloom filter monitor thread."); - _bloomFilterMonitorThread.interrupt(); - _logger.info("Bloom filter monitor thread interrupted."); - try { - _logger.info("Waiting for bloom filter monitor thread to terminate."); - _bloomFilterMonitorThread.join(); - } catch (InterruptedException ex) { - _logger.warn("Bloom filter monitor thread was interrupted while shutting down."); - } - _logger.info("System monitoring stopped."); - } else { - _logger.info("Requested shutdown of bloom filter monitor thread aborted, as it is not yet running."); - } - shutdownScheduledExecutorService(); - } - - @Override - public abstract Properties getServiceProperties(); - - @Override - public abstract List get(MetricSchemaRecordQuery query); - - @Override - public abstract List getUnique(MetricSchemaRecordQuery query, RecordType type); - - @Override - public abstract List keywordSearch(KeywordQuery query); - - protected String constructKey(Metric metric, Entry tagEntry) { - StringBuilder sb = new StringBuilder(metric.getScope()); - sb.append('\0').append(metric.getMetric()); - - if(metric.getNamespace() != null) { - sb.append('\0').append(metric.getNamespace()); - } - - if(tagEntry != null) { - sb.append('\0').append(tagEntry.getKey()).append('\0').append(tagEntry.getValue()); - } - - // Add randomness for each instance of bloom filter running on different - // schema clients to reduce probability of false positives that metric schemas are not written to ES - sb.append('\0').append(randomNumber); - - return sb.toString(); - } - - protected String constructKey(String scope, String metric, String tagk, String tagv, String namespace) { - - StringBuilder sb = new StringBuilder(scope); - - if(!StringUtils.isEmpty(metric)) { - sb.append('\0').append(metric); - } - - if(!StringUtils.isEmpty(namespace)) { - sb.append('\0').append(namespace); - } - - if(!StringUtils.isEmpty(tagk)) { - sb.append('\0').append(tagk); - } - - if(!StringUtils.isEmpty(tagv)) { - sb.append('\0').append(tagv); - } - - // Add randomness for each instance of bloom filter running on different - // schema clients to reduce probability of false positives that metric schemas are not written to ES - sb.append('\0').append(randomNumber); - - return sb.toString(); - } - - protected String constructScopeOnlyKey(String scope) { - - return constructKey(scope, null, null, null, null); - } - - protected String constructScopeAndMetricOnlyKey(String scope, String metric) { - - return constructKey(scope, metric, null, null, null); - } - - private void createScheduledExecutorService(int targetHourToStartAt){ - scheduledExecutorService = Executors.newScheduledThreadPool(1); - int initialDelayInSeconds = getNumHoursUntilTargetHour(targetHourToStartAt) * HOUR_IN_SECONDS; - BloomFilterFlushThread bloomFilterFlushThread = new BloomFilterFlushThread(); - scheduledExecutorService.scheduleAtFixedRate(bloomFilterFlushThread, initialDelayInSeconds, DAY_IN_SECONDS, TimeUnit.SECONDS); - } - - private void shutdownScheduledExecutorService(){ - _logger.info("Shutting down scheduled bloom filter flush executor service"); - scheduledExecutorService.shutdown(); - try { - scheduledExecutorService.awaitTermination(10, TimeUnit.SECONDS); - } catch (InterruptedException ex) { - _logger.warn("Shutdown of executor service was interrupted."); - Thread.currentThread().interrupt(); - } - } - - protected int getNumHoursUntilTargetHour(int targetHour){ - _logger.info("Initialized bloom filter flushing out, at {} hour of day", targetHour); - Calendar calendar = Calendar.getInstance(); - int hour = calendar.get(Calendar.HOUR_OF_DAY); - return hour < targetHour ? (targetHour - hour) : (targetHour + 24 - hour); - } - - /** - * The set of implementation specific configuration properties. - * - * @author Bhinav Sura (bhinav.sura@salesforce.com) - */ - public enum Property { - SYNC_PUT("service.property.schema.sync.put", "false"), - BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.expected.number.insertions", "40"), - BLOOMFILTER_ERROR_RATE("service.property.schema.bloomfilter.error.rate", "0.00001"), - - /* - * Estimated Filter Size using bloomFilter 1 million entries - * https://hur.st/bloomfilter/?n=1000000&p=1.0E-5&m=&k= 2.86MiB - * Storing in a Set 100K entries with avg length of 15 chars would be 100K * 15 * 2 B = 30B * 100K = 3 MB - * If # of entries is 1 million, then it would be 30 MB resulting in savings in space. - */ - - BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.only.expected.number.insertions", "1000000"), - BLOOMFILTER_SCOPE_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.only.error.rate", "0.00001"), - - /* - * Estimated Filter Size using bloomFilter 10 million entries - * https://hur.st/bloomfilter/?n=10000000&p=1.0E-5&m=&k= 28.56MiB - * Storing in a Set 1M entries with avg length of 30 chars would be 1M * 30 * 2 B = 60B * 1M = 60 MB - * If # of entries is 10 million, then it would be 600 MB resulting in savings in space. - */ - - BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.and.metric.only.expected.number.insertions", "10000000"), - BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.and.metric.only.error.rate", "0.00001"), - - /* - * Have a different configured flush start hour for different machines to prevent thundering herd problem. - */ - BLOOM_FILTER_FLUSH_HOUR_TO_START_AT("service.property.schema.bloomfilter.flush.hour.to.start.at","2"); - - private final String _name; - private final String _defaultValue; - - private Property(String name, String defaultValue) { - _name = name; - _defaultValue = defaultValue; - } - - /** - * Returns the property name. - * - * @return The property name. - */ - public String getName() { - return _name; - } - - /** - * Returns the default value for the property. - * - * @return The default value. - */ - public String getDefaultValue() { - return _defaultValue; - } - } - - - //~ Inner Classes ******************************************************************************************************************************** - - /** - * Bloom Filter monitoring thread. - * - * @author Dilip Devaraj (ddevaraj@salesforce.com) - */ - private class BloomFilterMonitorThread implements Runnable { - @Override - public void run() { - _logger.info("Initialized random number for bloom filter key = {}", randomNumber); - while (!Thread.currentThread().isInterrupted()) { - _sleepForPollPeriod(); - if (!Thread.currentThread().isInterrupted()) { - try { - _checkBloomFilterUsage(); - } catch (Exception ex) { - _logger.warn("Exception occurred while checking bloom filter usage.", ex); - } - } - } - } - - private void _checkBloomFilterUsage() { - _logger.info("Metrics Bloom approx no. elements = {}", bloomFilter.approximateElementCount()); - _logger.info("Metrics Bloom expected error rate = {}", bloomFilter.expectedFpp()); - _logger.info("Scope only Bloom approx no. elements = {}", bloomFilterScopeOnly.approximateElementCount()); - _logger.info("Scope only Bloom expected error rate = {}", bloomFilterScopeOnly.expectedFpp()); - _logger.info("Scope and metric only Bloom approx no. elements = {}", bloomFilterScopeAndMetricOnly.approximateElementCount()); - _logger.info("Scope and metric only Bloom expected error rate = {}", bloomFilterScopeAndMetricOnly.expectedFpp()); - } - - private void _sleepForPollPeriod() { - try { - _logger.info("Sleeping for {}s before checking bloom filter statistics.", POLL_INTERVAL_MS / 1000); - Thread.sleep(POLL_INTERVAL_MS); - } catch (InterruptedException ex) { - _logger.warn("AbstractSchemaService memory monitor thread was interrupted."); - Thread.currentThread().interrupt(); - } - } - } - - private class BloomFilterFlushThread implements Runnable { - @Override - public void run() { - try{ - _flushBloomFilter(); - } catch (Exception ex) { - _logger.warn("Exception occurred while flushing bloom filter.", ex); - } - } - - private void _flushBloomFilter() { - _logger.info("Flushing out bloom filter entries"); - bloomFilter = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterExpectedNumberInsertions , bloomFilterErrorRate); - bloomFilterScopeOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterScopeOnlyExpectedNumberInsertions , bloomFilterScopeOnlyErrorRate); - bloomFilterScopeAndMetricOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), - bloomFilterScopeAndMetricOnlyExpectedNumberInsertions , bloomFilterScopeAndMetricOnlyErrorRate); - /* Don't need explicit synchronization to prevent slowness majority of the time*/ - randomNumber = rand.nextInt(); - } - } + private static final long POLL_INTERVAL_MS = 10 * 60 * 1000L; + private static final int DAY_IN_SECONDS = 24 * 60 * 60; + private static final int HOUR_IN_SECONDS = 60 * 60; + + /* Have three separate bloom filters one for metrics schema, one only for scope names schema and one only for scope name and metric name schema. + * Since scopes will continue to repeat more often on subsequent kafka batch reads, we can easily check this from the bloom filter for scopes only. + * Hence we can avoid the extra call to populate scopenames index on ES in subsequent Kafka reads. + * The same logic applies to scope name and metric name schema. + */ + protected static BloomFilter bloomFilter; + protected static BloomFilter bloomFilterScopeOnly; + protected static BloomFilter bloomFilterScopeAndMetricOnly; + private Random rand = new Random(); + private int randomNumber = rand.nextInt(); + private int bloomFilterExpectedNumberInsertions; + private double bloomFilterErrorRate; + private int bloomFilterScopeOnlyExpectedNumberInsertions; + private double bloomFilterScopeOnlyErrorRate; + private int bloomFilterScopeAndMetricOnlyExpectedNumberInsertions; + private double bloomFilterScopeAndMetricOnlyErrorRate; + private final Logger _logger = LoggerFactory.getLogger(getClass()); + private final Thread _bloomFilterMonitorThread; + protected final boolean _syncPut; + private int bloomFilterFlushHourToStartAt; + private ScheduledExecutorService scheduledExecutorService; + + protected AbstractSchemaService(SystemConfiguration config) { + super(config); + + bloomFilterExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS.getName(), + Property.BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); + bloomFilterErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_ERROR_RATE.getName(), + Property.BLOOMFILTER_ERROR_RATE.getDefaultValue())); + + bloomFilterScopeOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), + Property.BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); + bloomFilterScopeOnlyErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_SCOPE_ONLY_ERROR_RATE.getName(), + Property.BLOOMFILTER_SCOPE_ONLY_ERROR_RATE.getDefaultValue())); + + bloomFilterScopeAndMetricOnlyExpectedNumberInsertions = Integer.parseInt(config.getValue(Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS.getName(), + Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS.getDefaultValue())); + bloomFilterScopeAndMetricOnlyErrorRate = Double.parseDouble(config.getValue(Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE.getName(), + Property.BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE.getDefaultValue())); + + bloomFilter = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterExpectedNumberInsertions , bloomFilterErrorRate); + + bloomFilterScopeOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterScopeOnlyExpectedNumberInsertions , bloomFilterScopeOnlyErrorRate); + bloomFilterScopeAndMetricOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), + bloomFilterScopeAndMetricOnlyExpectedNumberInsertions , bloomFilterScopeAndMetricOnlyErrorRate); + + _syncPut = Boolean.parseBoolean( + config.getValue(Property.SYNC_PUT.getName(), Property.SYNC_PUT.getDefaultValue())); + + _bloomFilterMonitorThread = new Thread(new BloomFilterMonitorThread(), "bloom-filter-monitor"); + _bloomFilterMonitorThread.start(); + + bloomFilterFlushHourToStartAt = Integer.parseInt(config.getValue(Property.BLOOM_FILTER_FLUSH_HOUR_TO_START_AT.getName(), + Property.BLOOM_FILTER_FLUSH_HOUR_TO_START_AT.getDefaultValue())); + createScheduledExecutorService(bloomFilterFlushHourToStartAt); + } + + @Override + public void put(Metric metric) { + requireNotDisposed(); + SystemAssert.requireArgument(metric != null, "Metric cannot be null."); + put(Arrays.asList(metric)); + } + + @Override + public void put(List metrics) { + requireNotDisposed(); + SystemAssert.requireArgument(metrics != null, "Metric list cannot be null."); + + // Create a list of metricsToPut that do not exist on the BLOOMFILTER and then call implementation + // specific put with only those subset of metricsToPut. + List metricsToPut = new ArrayList<>(metrics.size()); + Set scopesToPut = new HashSet<>(metrics.size()); + + Set> scopesAndMetricsNamesToPut = new HashSet<>(metrics.size()); + + for(Metric metric : metrics) { + // check metric schema bloom filter + if(metric.getTags().isEmpty()) { + // if metric does not have tags + String key = constructKey(metric, null); + boolean found = bloomFilter.mightContain(key); + if(!found) { + metricsToPut.add(metric); + } + } else { + // if metric has tags + boolean newTags = false; + for(Entry tagEntry : metric.getTags().entrySet()) { + String key = constructKey(metric, tagEntry); + boolean found = bloomFilter.mightContain(key); + if(!found) { + newTags = true; + } + } + + if(newTags) { + metricsToPut.add(metric); + } + } + + String scopeName = metric.getScope(); + String metricName = metric.getMetric(); + + // Check scope only bloom filter + String key = constructScopeOnlyKey(scopeName); + boolean found = bloomFilterScopeOnly.mightContain(key); + if(!found) { + scopesToPut.add(scopeName); + } + + // Check scope and metric only bloom filter + key = constructScopeAndMetricOnlyKey(scopeName, metricName); + found = bloomFilterScopeAndMetricOnly.mightContain(key); + if(!found) { + scopesAndMetricsNamesToPut.add(Pair.of(scopeName, metricName)); + } + } + + implementationSpecificPut(metricsToPut, scopesToPut, scopesAndMetricsNamesToPut); + } + + /* + * Calls the implementation specific write for indexing the records + * + * @param metrics The metrics metadata that will be written to a separate index. + * @param scopeNames The scope names that will be written to a separate index. + * @param scopesAndMetricNames The scope and metric names that will be written to a separate index. + */ + protected abstract void implementationSpecificPut(List metrics, Set scopeNames, + Set> scopesAndMetricNames); + + @Override + public void dispose() { + requireNotDisposed(); + if (_bloomFilterMonitorThread != null && _bloomFilterMonitorThread.isAlive()) { + _logger.info("Stopping bloom filter monitor thread."); + _bloomFilterMonitorThread.interrupt(); + _logger.info("Bloom filter monitor thread interrupted."); + try { + _logger.info("Waiting for bloom filter monitor thread to terminate."); + _bloomFilterMonitorThread.join(); + } catch (InterruptedException ex) { + _logger.warn("Bloom filter monitor thread was interrupted while shutting down."); + } + _logger.info("System monitoring stopped."); + } else { + _logger.info("Requested shutdown of bloom filter monitor thread aborted, as it is not yet running."); + } + shutdownScheduledExecutorService(); + } + + @Override + public abstract Properties getServiceProperties(); + + @Override + public abstract List get(MetricSchemaRecordQuery query); + + @Override + public abstract List getUnique(MetricSchemaRecordQuery query, RecordType type); + + @Override + public abstract List keywordSearch(KeywordQuery query); + + protected String constructKey(Metric metric, Entry tagEntry) { + StringBuilder sb = new StringBuilder(metric.getScope()); + sb.append('\0').append(metric.getMetric()); + + if(metric.getNamespace() != null) { + sb.append('\0').append(metric.getNamespace()); + } + + if(tagEntry != null) { + sb.append('\0').append(tagEntry.getKey()).append('\0').append(tagEntry.getValue()); + } + + // Add randomness for each instance of bloom filter running on different + // schema clients to reduce probability of false positives that metric schemas are not written to ES + sb.append('\0').append(randomNumber); + + return sb.toString(); + } + + protected String constructKey(String scope, String metric, String tagk, String tagv, String namespace) { + + StringBuilder sb = new StringBuilder(scope); + + if(!StringUtils.isEmpty(metric)) { + sb.append('\0').append(metric); + } + + if(!StringUtils.isEmpty(namespace)) { + sb.append('\0').append(namespace); + } + + if(!StringUtils.isEmpty(tagk)) { + sb.append('\0').append(tagk); + } + + if(!StringUtils.isEmpty(tagv)) { + sb.append('\0').append(tagv); + } + + // Add randomness for each instance of bloom filter running on different + // schema clients to reduce probability of false positives that metric schemas are not written to ES + sb.append('\0').append(randomNumber); + + return sb.toString(); + } + + protected String constructScopeOnlyKey(String scope) { + + return constructKey(scope, null, null, null, null); + } + + protected String constructScopeAndMetricOnlyKey(String scope, String metric) { + + return constructKey(scope, metric, null, null, null); + } + + private void createScheduledExecutorService(int targetHourToStartAt){ + scheduledExecutorService = Executors.newScheduledThreadPool(1); + int initialDelayInSeconds = getNumHoursUntilTargetHour(targetHourToStartAt) * HOUR_IN_SECONDS; + BloomFilterFlushThread bloomFilterFlushThread = new BloomFilterFlushThread(); + scheduledExecutorService.scheduleAtFixedRate(bloomFilterFlushThread, initialDelayInSeconds, DAY_IN_SECONDS, TimeUnit.SECONDS); + } + + private void shutdownScheduledExecutorService(){ + _logger.info("Shutting down scheduled bloom filter flush executor service"); + scheduledExecutorService.shutdown(); + try { + scheduledExecutorService.awaitTermination(10, TimeUnit.SECONDS); + } catch (InterruptedException ex) { + _logger.warn("Shutdown of executor service was interrupted."); + Thread.currentThread().interrupt(); + } + } + + protected int getNumHoursUntilTargetHour(int targetHour){ + _logger.info("Initialized bloom filter flushing out, at {} hour of day", targetHour); + Calendar calendar = Calendar.getInstance(); + int hour = calendar.get(Calendar.HOUR_OF_DAY); + return hour < targetHour ? (targetHour - hour) : (targetHour + 24 - hour); + } + + /** + * The set of implementation specific configuration properties. + * + * @author Bhinav Sura (bhinav.sura@salesforce.com) + */ + public enum Property { + SYNC_PUT("service.property.schema.sync.put", "false"), + BLOOMFILTER_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.expected.number.insertions", "40"), + BLOOMFILTER_ERROR_RATE("service.property.schema.bloomfilter.error.rate", "0.00001"), + + /* + * Estimated Filter Size using bloomFilter 1 million entries + * https://hur.st/bloomfilter/?n=1000000&p=1.0E-5&m=&k= 2.86MiB + * Storing in a Set 100K entries with avg length of 15 chars would be 100K * 15 * 2 B = 30B * 100K = 3 MB + * If # of entries is 1 million, then it would be 30 MB resulting in savings in space. + */ + + BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.only.expected.number.insertions", "1000000"), + BLOOMFILTER_SCOPE_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.only.error.rate", "0.00001"), + + /* + * Estimated Filter Size using bloomFilter 10 million entries + * https://hur.st/bloomfilter/?n=10000000&p=1.0E-5&m=&k= 28.56MiB + * Storing in a Set 1M entries with avg length of 30 chars would be 1M * 30 * 2 B = 60B * 1M = 60 MB + * If # of entries is 10 million, then it would be 600 MB resulting in savings in space. + */ + + BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.and.metric.only.expected.number.insertions", "10000000"), + BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.and.metric.only.error.rate", "0.00001"), + + /* + * Have a different configured flush start hour for different machines to prevent thundering herd problem. + */ + BLOOM_FILTER_FLUSH_HOUR_TO_START_AT("service.property.schema.bloomfilter.flush.hour.to.start.at","2"); + + private final String _name; + private final String _defaultValue; + + private Property(String name, String defaultValue) { + _name = name; + _defaultValue = defaultValue; + } + + /** + * Returns the property name. + * + * @return The property name. + */ + public String getName() { + return _name; + } + + /** + * Returns the default value for the property. + * + * @return The default value. + */ + public String getDefaultValue() { + return _defaultValue; + } + } + + + //~ Inner Classes ******************************************************************************************************************************** + + /** + * Bloom Filter monitoring thread. + * + * @author Dilip Devaraj (ddevaraj@salesforce.com) + */ + private class BloomFilterMonitorThread implements Runnable { + @Override + public void run() { + _logger.info("Initialized random number for bloom filter key = {}", randomNumber); + while (!Thread.currentThread().isInterrupted()) { + _sleepForPollPeriod(); + if (!Thread.currentThread().isInterrupted()) { + try { + _checkBloomFilterUsage(); + } catch (Exception ex) { + _logger.warn("Exception occurred while checking bloom filter usage.", ex); + } + } + } + } + + private void _checkBloomFilterUsage() { + _logger.info("Metrics Bloom approx no. elements = {}", bloomFilter.approximateElementCount()); + _logger.info("Metrics Bloom expected error rate = {}", bloomFilter.expectedFpp()); + _logger.info("Scope only Bloom approx no. elements = {}", bloomFilterScopeOnly.approximateElementCount()); + _logger.info("Scope only Bloom expected error rate = {}", bloomFilterScopeOnly.expectedFpp()); + _logger.info("Scope and metric only Bloom approx no. elements = {}", bloomFilterScopeAndMetricOnly.approximateElementCount()); + _logger.info("Scope and metric only Bloom expected error rate = {}", bloomFilterScopeAndMetricOnly.expectedFpp()); + } + + private void _sleepForPollPeriod() { + try { + _logger.info("Sleeping for {}s before checking bloom filter statistics.", POLL_INTERVAL_MS / 1000); + Thread.sleep(POLL_INTERVAL_MS); + } catch (InterruptedException ex) { + _logger.warn("AbstractSchemaService memory monitor thread was interrupted."); + Thread.currentThread().interrupt(); + } + } + } + + private class BloomFilterFlushThread implements Runnable { + @Override + public void run() { + try{ + _flushBloomFilter(); + } catch (Exception ex) { + _logger.warn("Exception occurred while flushing bloom filter.", ex); + } + } + + private void _flushBloomFilter() { + _logger.info("Flushing out bloom filter entries"); + bloomFilter = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterExpectedNumberInsertions , bloomFilterErrorRate); + bloomFilterScopeOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), bloomFilterScopeOnlyExpectedNumberInsertions , bloomFilterScopeOnlyErrorRate); + bloomFilterScopeAndMetricOnly = BloomFilter.create(Funnels.stringFunnel(Charset.defaultCharset()), + bloomFilterScopeAndMetricOnlyExpectedNumberInsertions , bloomFilterScopeAndMetricOnlyErrorRate); + /* Don't need explicit synchronization to prevent slowness majority of the time*/ + randomNumber = rand.nextInt(); + } + } } diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index d7841f3d8..1ab93ad1c 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -71,1451 +71,1451 @@ @Singleton public class ElasticSearchSchemaService extends AbstractSchemaService { - private static String SCOPE_INDEX_NAME; - private static String SCOPE_TYPE_NAME; - - private static String SCOPE_AND_METRIC_INDEX_NAME; - private static String SCOPE_AND_METRIC_TYPE_NAME; - - private static final String INDEX_NAME = "metadata_index"; - private static final String TYPE_NAME = "metadata_type"; - private static final String KEEP_SCROLL_CONTEXT_OPEN_FOR = "1m"; - private static final int INDEX_MAX_RESULT_WINDOW = 10000; - private static final int MAX_RETRY_TIMEOUT = 300 * 1000; - private static final String FIELD_TYPE_TEXT = "text"; - private static final String FIELD_TYPE_DATE ="date"; - - private final ObjectMapper _mapper; - private final ObjectMapper _scopeOnlyMapper; - private final ObjectMapper _scopeAndMetricOnlyMapper; - - private Logger _logger = LoggerFactory.getLogger(getClass()); - private final MonitorService _monitorService; - private RestClient _esRestClient; - private final int _replicationFactor; - private final int _numShards; - private final int _replicationFactorForScopeIndex; - private final int _numShardsForScopeIndex; - private final int _replicationFactorForScopeAndMetricIndex; - private final int _numShardsForScopeAndMetricIndex; - private final int _bulkIndexingSize; - private HashAlgorithm _idgenHashAlgo; - - @Inject - public ElasticSearchSchemaService(SystemConfiguration config, MonitorService monitorService) { - super(config); - - _monitorService = monitorService; - _mapper = _createObjectMapper(); - _scopeOnlyMapper = _createScopeOnlyObjectMapper(); - _scopeAndMetricOnlyMapper = _createScopeAndMetricOnlyObjectMapper(); - - SCOPE_INDEX_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getName(), - Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getDefaultValue()); - SCOPE_TYPE_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_TYPE_NAME.getName(), - Property.ELASTICSEARCH_SCOPE_TYPE_NAME.getDefaultValue()); - - SCOPE_AND_METRIC_INDEX_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME.getName(), - Property.ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME.getDefaultValue()); - SCOPE_AND_METRIC_TYPE_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME.getName(), - Property.ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME.getDefaultValue()); - - String algorithm = config.getValue(Property.ELASTICSEARCH_IDGEN_HASH_ALGO.getName(), Property.ELASTICSEARCH_IDGEN_HASH_ALGO.getDefaultValue()); - try { - _idgenHashAlgo = HashAlgorithm.fromString(algorithm); - } catch(IllegalArgumentException e) { - _logger.warn("{} is not supported by this service. Valid values are: {}.", algorithm, Arrays.asList(HashAlgorithm.values())); - _idgenHashAlgo = HashAlgorithm.MD5; - } - - _logger.info("Using {} for Elasticsearch document id generation.", _idgenHashAlgo); - - _replicationFactor = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS.getName(), Property.ELASTICSEARCH_NUM_REPLICAS.getDefaultValue())); - - _numShards = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT.getName(), Property.ELASTICSEARCH_SHARDS_COUNT.getDefaultValue())); - - _replicationFactorForScopeIndex = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX.getName(), Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX.getDefaultValue())); - - _numShardsForScopeIndex = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX.getName(), Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX.getDefaultValue())); - - _replicationFactorForScopeAndMetricIndex = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX.getDefaultValue())); - - _numShardsForScopeAndMetricIndex = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX.getDefaultValue())); - - _bulkIndexingSize = Integer.parseInt( - config.getValue(Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getName(), Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getDefaultValue())); - - String[] nodes = config.getValue(Property.ELASTICSEARCH_ENDPOINT.getName(), Property.ELASTICSEARCH_ENDPOINT.getDefaultValue()).split(","); - HttpHost[] httpHosts = new HttpHost[nodes.length]; - - for(int i=0; i _createMappingsNode()); - - _createIndexIfNotExists(SCOPE_INDEX_NAME, _replicationFactorForScopeIndex, _numShardsForScopeIndex, - () -> _createScopeMappingsNode()); - - _createIndexIfNotExists(SCOPE_AND_METRIC_INDEX_NAME, _replicationFactorForScopeAndMetricIndex, - _numShardsForScopeAndMetricIndex, () -> _createScopeAndMetricMappingsNode()); - } - - - @Override - public void dispose() { - super.dispose(); - try { - _esRestClient.close(); - _logger.info("Shutdown of ElasticSearch RESTClient complete"); - } catch (IOException e) { - _logger.warn("ElasticSearch RestClient failed to shutdown properly.", e); - } - } - - @Override - public Properties getServiceProperties() { - Properties serviceProps = new Properties(); - - for (Property property : Property.values()) { - serviceProps.put(property.getName(), property.getDefaultValue()); - } - return serviceProps; - } - - @Override - protected void implementationSpecificPut(List metrics, Set scopeNames, - Set> scopesAndMetricNames) { - SystemAssert.requireArgument(metrics != null, "Metrics list cannot be null."); - - _logger.info("{} new metrics need to be indexed on ES.", metrics.size()); - - long start = System.currentTimeMillis(); - List> fracturedList = _fracture(metrics); - - for(List records : fracturedList) { - if(!records.isEmpty()) { - _upsert(records); - } - } - - int count = 0; - for(List records : fracturedList) { - count += records.size(); - } - - _monitorService.modifyCounter(MonitorService.Counter.SCHEMARECORDS_WRITTEN, count, null); - _monitorService.modifyCounter(MonitorService.Counter.SCHEMARECORDS_WRITE_LATENCY, (System.currentTimeMillis() - start), null); - - _logger.info("{} new scopes need to be indexed on ES.", scopeNames.size()); - - start = System.currentTimeMillis(); - List> fracturedScopesList = _fractureScopes(scopeNames); - - for(List records : fracturedScopesList) { - if(!records.isEmpty()) { - _upsertScopes(records); - } - } - - count = 0; - for(List records : fracturedScopesList) { - count += records.size(); - } - - _monitorService.modifyCounter(MonitorService.Counter.SCOPENAMES_WRITTEN, count, null); - _monitorService.modifyCounter(MonitorService.Counter.SCOPENAMES_WRITE_LATENCY, (System.currentTimeMillis() - start), null); - - _logger.info("{} new scope and metric names need to be indexed on ES.", scopesAndMetricNames.size()); - - start = System.currentTimeMillis(); - List> fracturedScopesAndMetricsList = _fractureScopeAndMetrics(scopesAndMetricNames); - - for(List records : fracturedScopesAndMetricsList) { - if(!records.isEmpty()) { - _upsertScopeAndMetrics(records); - } - } - - count = 0; - for(List records : fracturedScopesAndMetricsList) { - count += records.size(); - } - - _monitorService.modifyCounter(MonitorService.Counter.SCOPEANDMETRICNAMES_WRITTEN, count, null); - _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_WRITE_LATENCY, (System.currentTimeMillis() - start), null); - } - - /* Convert the given list of metrics to a list of metric schema records. At the same time, fracture the records list - * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. - */ - protected List> _fracture(List metrics) { - List> fracturedList = new ArrayList<>(); - - List records = new ArrayList<>(_bulkIndexingSize); - for(Metric metric : metrics) { - if(metric.getTags().isEmpty()) { - MetricSchemaRecord msr = new MetricSchemaRecord(metric.getScope(), metric.getMetric()); - msr.setNamespace(metric.getNamespace()); - records.add(msr); - if(records.size() == _bulkIndexingSize) { - fracturedList.add(records); - records = new ArrayList<>(_bulkIndexingSize); - } - continue; - } - - for(Map.Entry entry : metric.getTags().entrySet()) { - records.add(new MetricSchemaRecord(metric.getNamespace(), metric.getScope(), metric.getMetric(), - entry.getKey(), entry.getValue())); - if(records.size() == _bulkIndexingSize) { - fracturedList.add(records); - records = new ArrayList<>(_bulkIndexingSize); - } - } - } - - if(!records.isEmpty()) { - fracturedList.add(records); - } - - return fracturedList; - } - - /* Convert the given list of scope and metric names to a list of scope and metric only schema records. - * At the same time, fracture the records list if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. - */ - protected List> _fractureScopeAndMetrics(Set> scopesAndMetricNames) { - List> fracturedList = new ArrayList<>(); - - List records = new ArrayList<>(_bulkIndexingSize); - for(Pair scopeAndMetric : scopesAndMetricNames) { - records.add(new ScopeAndMetricOnlySchemaRecord(scopeAndMetric.getLeft(), scopeAndMetric.getRight())); - - if(records.size() == _bulkIndexingSize) { - fracturedList.add(records); - records = new ArrayList<>(_bulkIndexingSize); - } - } - - if(!records.isEmpty()) { - fracturedList.add(records); - } - - return fracturedList; - } - - /* Convert the given list of scopes to a list of scope only schema records. At the same time, fracture the records list - * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. - */ - protected List> _fractureScopes(Set scopeNames) { - List> fracturedList = new ArrayList<>(); - - List records = new ArrayList<>(_bulkIndexingSize); - for(String scope : scopeNames) { - records.add(new ScopeOnlySchemaRecord(scope)); - - if(records.size() == _bulkIndexingSize) { - fracturedList.add(records); - records = new ArrayList<>(_bulkIndexingSize); - } - } - - if(!records.isEmpty()) { - fracturedList.add(records); - } - - return fracturedList; - } - - @Override - public List get(MetricSchemaRecordQuery query) { - requireNotDisposed(); - SystemAssert.requireArgument(query != null, "MetricSchemaRecordQuery cannot be null."); - long size = (long) query.getLimit() * query.getPage(); - SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, - "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); - - - Map tags = new HashMap<>(); - tags.put("type", "REGEXP_WITHOUT_AGGREGATION"); - long start = System.currentTimeMillis(); - boolean scroll = false; - StringBuilder sb = new StringBuilder().append("/") - .append(INDEX_NAME) - .append("/") - .append(TYPE_NAME) - .append("/") - .append("_search"); - - int from = 0, scrollSize; - if(query.getLimit() * query.getPage() > 10000) { - sb.append("?scroll=").append(KEEP_SCROLL_CONTEXT_OPEN_FOR); - scroll = true; - int total = query.getLimit() * query.getPage(); - scrollSize = (int) (total / (total / 10000 + 1)); - } else { - from = query.getLimit() * (query.getPage() - 1); - scrollSize = query.getLimit(); - } - - String requestUrl = sb.toString(); - String queryJson = _constructTermQuery(query, from, scrollSize); - - try { - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); - - MetricSchemaRecordList list = toEntity(extractResponse(response), new TypeReference() {}); - - if(scroll) { - requestUrl = new StringBuilder().append("/").append("_search").append("/").append("scroll").toString(); - List records = new LinkedList<>(list.getRecords()); - - while(true) { - String scrollID = list.getScrollID(); - - Map requestBody = new HashMap<>(); - requestBody.put("scroll_id", scrollID); - requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); - - response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), - new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); - - list = toEntity(extractResponse(response), new TypeReference() {}); - records.addAll(list.getRecords()); - - if(records.size() >= query.getLimit() * query.getPage() || list.getRecords().size() < scrollSize) { - break; - } - } - - int fromIndex = query.getLimit() * (query.getPage() - 1); - if(records.size() <= fromIndex) { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return Collections.emptyList(); - } - - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return records.subList(fromIndex, records.size()); - - } else { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return list.getRecords(); - } - - } catch (UnsupportedEncodingException | JsonProcessingException e) { - throw new SystemException("Search failed.", e); - } catch (IOException e) { - throw new SystemException("IOException when trying to perform ES request.", e); - } - } - - @Override - public List getUnique(MetricSchemaRecordQuery query, RecordType type) { - requireNotDisposed(); - SystemAssert.requireArgument(query != null, "MetricSchemaRecordQuery cannot be null."); - long size = (long) query.getLimit() * query.getPage(); - SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, - "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); - - - Map tags = new HashMap<>(); - tags.put("type", "REGEXP_WITH_AGGREGATION"); - long start = System.currentTimeMillis(); - - String indexName = INDEX_NAME; - String typeName = TYPE_NAME; - - if (query.isQueryOnlyOnScope() && RecordType.SCOPE.equals(type)) - { - indexName = SCOPE_INDEX_NAME; - typeName = SCOPE_TYPE_NAME; - } - else if (query.isQueryOnlyOnScopeAndMetric()) - { - indexName = SCOPE_AND_METRIC_INDEX_NAME; - typeName = SCOPE_AND_METRIC_TYPE_NAME; - } - - String requestUrl = new StringBuilder().append("/") - .append(indexName) - .append("/") - .append(typeName) - .append("/") - .append("_search") - .toString(); - - String queryJson = _constructTermAggregationQuery(query, type); - try { - - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); - String str = extractResponse(response); - List records = SchemaService.constructMetricSchemaRecordsForType( - toEntity(str, new TypeReference>() {}), type); - - if (query.isQueryOnlyOnScope() && RecordType.SCOPE.equals(type)) { - _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - - } else if (query.isQueryOnlyOnScopeAndMetric()) { - _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - } else { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - } - - - int fromIndex = query.getLimit() * (query.getPage() - 1); - if(records.size() <= fromIndex) { - return Collections.emptyList(); - } - - if(records.size() < query.getLimit() * query.getPage()) { - return records.subList(fromIndex, records.size()); - } else { - return records.subList(fromIndex, query.getLimit() * query.getPage()); - } - } catch (IOException e) { - throw new SystemException(e); - } - } - - @Override - public List keywordSearch(KeywordQuery kq) { - requireNotDisposed(); - SystemAssert.requireArgument(kq != null, "Query cannot be null."); - SystemAssert.requireArgument(kq.getQuery() != null || kq.getType() != null, "Either the query string or the type must not be null."); - - long size = (long) kq.getLimit() * kq.getPage(); - SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, - "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); - - - Map tags = new HashMap<>(); - tags.put("type", "FTS_WITH_AGGREGATION"); - long start = System.currentTimeMillis(); - StringBuilder sb = new StringBuilder().append("/") - .append(INDEX_NAME) - .append("/") - .append(TYPE_NAME) - .append("/") - .append("_search"); - try { - - if(kq.getQuery() != null) { - - int from = 0, scrollSize = 0; - boolean scroll = false;; - if(kq.getLimit() * kq.getPage() > 10000) { - sb.append("?scroll=").append(KEEP_SCROLL_CONTEXT_OPEN_FOR); - scroll = true; - int total = kq.getLimit() * kq.getPage(); - scrollSize = (int) (total / (total / 10000 + 1)); - } else { - from = kq.getLimit() * (kq.getPage() - 1); - scrollSize = kq.getLimit(); - } - - List tokens = _analyzedTokens(kq.getQuery()); - String queryJson = _constructQueryStringQuery(tokens, from, scrollSize); - String requestUrl = sb.toString(); - - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); - String strResponse = extractResponse(response); - MetricSchemaRecordList list = toEntity(strResponse, new TypeReference() {}); - - if(scroll) { - requestUrl = new StringBuilder().append("/").append("_search").append("/").append("scroll").toString(); - List records = new LinkedList<>(list.getRecords()); - - while(true) { - Map requestBody = new HashMap<>(); - requestBody.put("scroll_id", list.getScrollID()); - requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); - - response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), - new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); - - list = toEntity(extractResponse(response), new TypeReference() {}); - - records.addAll(list.getRecords()); - - if(records.size() >= kq.getLimit() * kq.getPage() || list.getRecords().size() < scrollSize) { - break; - } - } - - int fromIndex = kq.getLimit() * (kq.getPage() - 1); - if(records.size() <= fromIndex) { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return Collections.emptyList(); - } - - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return records.subList(fromIndex, records.size()); - - } else { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return list.getRecords(); - } - - - } else { - Map> tokensMap = new HashMap<>(); - - List tokens = _analyzedTokens(kq.getScope()); - if(!tokens.isEmpty()) { - tokensMap.put(RecordType.SCOPE, tokens); - } - - tokens = _analyzedTokens(kq.getMetric()); - if(!tokens.isEmpty()) { - tokensMap.put(RecordType.METRIC, tokens); - } - - tokens = _analyzedTokens(kq.getTagKey()); - if(!tokens.isEmpty()) { - tokensMap.put(RecordType.TAGK, tokens); - } - - tokens = _analyzedTokens(kq.getTagValue()); - if(!tokens.isEmpty()) { - tokensMap.put(RecordType.TAGV, tokens); - } - - tokens = _analyzedTokens(kq.getNamespace()); - if(!tokens.isEmpty()) { - tokensMap.put(RecordType.NAMESPACE, tokens); - } - - String queryJson = _constructQueryStringQuery(kq, tokensMap); - String requestUrl = sb.toString(); - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); - String strResponse = extractResponse(response); - - List records = SchemaService.constructMetricSchemaRecordsForType( - toEntity(strResponse, new TypeReference>() {}), kq.getType()); - - int fromIndex = kq.getLimit() * (kq.getPage() - 1); - if(records.size() <= fromIndex) { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return Collections.emptyList(); - } - - if(records.size() < kq.getLimit() * kq.getPage()) { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return records.subList(fromIndex, records.size()); - } else { - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); - _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - return records.subList(fromIndex, kq.getLimit() * kq.getPage()); - } - - } - - } catch (IOException e) { - throw new SystemException(e); - } - } - - private List _analyzedTokens(String query) { - - if(!SchemaService.containsFilter(query)) { - return Collections.emptyList(); - } - - List tokens = new ArrayList<>(); - - String requestUrl = new StringBuilder("/").append(INDEX_NAME).append("/_analyze").toString(); - - String requestBody = "{\"analyzer\" : \"metadata_analyzer\", \"text\": \"" + query + "\" }"; - - try { - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); - String strResponse = extractResponse(response); - JsonNode tokensNode = _mapper.readTree(strResponse).get("tokens"); - if(tokensNode.isArray()) { - for(JsonNode tokenNode : tokensNode) { - tokens.add(tokenNode.get("token").asText()); - } - } - - return tokens; - } catch (IOException e) { - throw new SystemException(e); - } - } - - private void _upsert(List records) { - String requestUrl = new StringBuilder().append("/") - .append(INDEX_NAME) - .append("/") - .append(TYPE_NAME) - .append("/") - .append("_bulk") - .toString(); - - String strResponse = ""; - - MetricSchemaRecordList msrList = new MetricSchemaRecordList(records, _idgenHashAlgo); - try { - String requestBody = _mapper.writeValueAsString(msrList); - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); - strResponse = extractResponse(response); - - } catch (IOException e) { - //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? - throw new SystemException(e); - } - - try { - PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); - //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. - if(putResponse.errors) { - List recordsToRemove = new ArrayList<>(); - for(Item item : putResponse.items) { - if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { - _logger.warn("Failed to index metric. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); - recordsToRemove.add(msrList.getRecord(item.create._id)); - } - - if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { - _logger.warn("Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); - recordsToRemove.add(msrList.getRecord(item.index._id)); - } - } - if(recordsToRemove.size() != 0) { - _logger.info("{} records were not written to ES", recordsToRemove.size()); - records.removeAll(recordsToRemove); - } - } - //add to bloom filter - _addToBloomFilter(records); - - } catch(IOException e) { - throw new SystemException("Failed to parse reponse of put metrics. The response was: " + strResponse, e); - } - } - - private void _upsertScopeAndMetrics(List records) { - String requestUrl = new StringBuilder().append("/") - .append(SCOPE_AND_METRIC_INDEX_NAME) - .append("/") - .append(SCOPE_AND_METRIC_TYPE_NAME) - .append("/") - .append("_bulk") - .toString(); - - String strResponse = ""; - - ScopeAndMetricOnlySchemaRecordList recordList = new ScopeAndMetricOnlySchemaRecordList(records, _idgenHashAlgo); - - try { - String requestBody = _scopeAndMetricOnlyMapper.writeValueAsString(recordList); - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); - strResponse = extractResponse(response); - } catch (IOException e) { - //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? - throw new SystemException(e); - } - - try { - PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); - //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. - if(putResponse.errors) { - List recordsToRemove = new ArrayList<>(); - for(Item item : putResponse.items) { - if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { - _logger.warn("Failed to index scope. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); - recordsToRemove.add(recordList.getRecord(item.create._id)); - } - - if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { - _logger.warn("Scope Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); - recordsToRemove.add(recordList.getRecord(item.index._id)); - } - } - if(recordsToRemove.size() != 0) { - _logger.info("{} records were not written to ES", recordsToRemove.size()); - records.removeAll(recordsToRemove); - } - } - //add to bloom filter - _addToBloomFilterScopeAndMetricOnly(records); - - } catch(IOException e) { - throw new SystemException("Failed to parse reponse of put scope names. The response was: " + strResponse, e); - } - } - - private void _upsertScopes(List records) { - String requestUrl = new StringBuilder().append("/") - .append(SCOPE_INDEX_NAME) - .append("/") - .append(SCOPE_TYPE_NAME) - .append("/") - .append("_bulk") - .toString(); - - String strResponse = ""; - - ScopeOnlySchemaRecordList scopeOnlySchemaRecordList = new ScopeOnlySchemaRecordList(records, _idgenHashAlgo); - - try { - String requestBody = _scopeOnlyMapper.writeValueAsString(scopeOnlySchemaRecordList); - Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); - strResponse = extractResponse(response); - } catch (IOException e) { - //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? - throw new SystemException(e); - } - - try { - PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); - //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. - if(putResponse.errors) { - List recordsToRemove = new ArrayList<>(); - for(Item item : putResponse.items) { - if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { - _logger.warn("Failed to index scope. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); - recordsToRemove.add(scopeOnlySchemaRecordList.getRecord(item.create._id)); - } - - if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { - _logger.warn("Scope Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); - recordsToRemove.add(scopeOnlySchemaRecordList.getRecord(item.index._id)); - } - } - if(recordsToRemove.size() != 0) { - _logger.info("{} records were not written to ES", recordsToRemove.size()); - records.removeAll(recordsToRemove); - } - } - //add to bloom filter - _addToBloomFilterScopeOnly(records); - - } catch(IOException e) { - throw new SystemException("Failed to parse reponse of put scope names. The response was: " + strResponse, e); - } - } - - protected void _addToBloomFilter(List records){ - _logger.info("Adding {} records into bloom filter.", records.size()); - for(MetricSchemaRecord record : records) { - String key = constructKey(record.getScope(), record.getMetric(), record.getTagKey(), record.getTagValue(), record.getNamespace()); - bloomFilter.put(key); - } - } - - protected void _addToBloomFilterScopeAndMetricOnly(List records){ - _logger.info("Adding {} records into scope and metric only bloom filter.", records.size()); - for(ScopeAndMetricOnlySchemaRecord record : records) { - String key = constructScopeAndMetricOnlyKey(record.getScope(), record.getMetric()); - bloomFilterScopeAndMetricOnly.put(key); - } - } - - protected void _addToBloomFilterScopeOnly(List records){ - _logger.info("Adding {} records into scope only bloom filter.", records.size()); - for(ScopeOnlySchemaRecord record : records) { - String key = constructScopeOnlyKey(record.getScope()); - bloomFilterScopeOnly.put(key); - } - } - - private String _constructTermAggregationQuery(MetricSchemaRecordQuery query, RecordType type) { - ObjectMapper mapper = new ObjectMapper(); - ObjectNode queryNode = _constructQueryNode(query, mapper); - - long size = query.getLimit() * query.getPage(); - SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, - "(limit * page) must be greater than 0 and less than Integer.MAX_VALUE"); - - ObjectNode aggsNode = _constructAggsNode(type, Math.max(size, 10000), mapper); - - ObjectNode rootNode = mapper.createObjectNode(); - rootNode.put("query", queryNode); - rootNode.put("size", 0); - rootNode.put("aggs", aggsNode); - - return rootNode.toString(); - } - - private String _constructTermQuery(MetricSchemaRecordQuery query, int from, int size) { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode queryNode = _constructQueryNode(query, mapper); - - ObjectNode rootNode = _mapper.createObjectNode(); - rootNode.put("query", queryNode); - rootNode.put("from", from); - rootNode.put("size", size); - - return rootNode.toString(); - } - - private ObjectNode _constructSimpleQueryStringNode(List tokens, RecordType... types) { - - if(tokens.isEmpty()) { - return null; - } - - ObjectMapper mapper = new ObjectMapper(); - - StringBuilder queryString = new StringBuilder(); - for(String token : tokens) { - queryString.append('+').append(token).append(' '); - } - queryString.replace(queryString.length() - 1, queryString.length(), "*"); - - ObjectNode node = mapper.createObjectNode(); - ArrayNode fieldsNode = mapper.createArrayNode(); - for(RecordType type : types) { - fieldsNode.add(type.getName()); - } - node.put("fields", fieldsNode); - node.put("query", queryString.toString()); - - ObjectNode simpleQueryStringNode = mapper.createObjectNode(); - simpleQueryStringNode.put("simple_query_string", node); - - return simpleQueryStringNode; - } - - private String _constructQueryStringQuery(List tokens, int from, int size) { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode simpleQueryStringNode = _constructSimpleQueryStringNode(tokens, RecordType.values()); - - ObjectNode rootNode = mapper.createObjectNode(); - rootNode.put("query", simpleQueryStringNode); - rootNode.put("from", from); - rootNode.put("size", size); - - return rootNode.toString(); - } - - private String _constructQueryStringQuery(KeywordQuery kq, Map> tokensMap) { - ObjectMapper mapper = new ObjectMapper(); - - ArrayNode filterNodes = mapper.createArrayNode(); - for(Map.Entry> entry : tokensMap.entrySet()) { - ObjectNode simpleQueryStringNode = _constructSimpleQueryStringNode(entry.getValue(), entry.getKey()); - filterNodes.add(simpleQueryStringNode); - } - - ObjectNode boolNode = mapper.createObjectNode(); - boolNode.put("filter", filterNodes); - - ObjectNode queryNode = mapper.createObjectNode(); - queryNode.put("bool", boolNode); - - ObjectNode rootNode = mapper.createObjectNode(); - rootNode.put("query", queryNode); - rootNode.put("size", 0); - - long size = kq.getLimit() * kq.getPage(); - SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, - "(limit * page) must be greater than 0 and less than Integer.MAX_VALUE"); - rootNode.put("aggs", _constructAggsNode(kq.getType(), Math.max(size, 10000), mapper)); - - return rootNode.toString(); - - } - - private ObjectNode _constructQueryNode(MetricSchemaRecordQuery query, ObjectMapper mapper) { - ArrayNode filterNodes = mapper.createArrayNode(); - if(SchemaService.containsFilter(query.getMetric())) { - ObjectNode node = mapper.createObjectNode(); - ObjectNode regexpNode = mapper.createObjectNode(); - regexpNode.put(RecordType.METRIC.getName() + ".raw", SchemaService.convertToRegex(query.getMetric())); - node.put("regexp", regexpNode); - filterNodes.add(node); - } - - if(SchemaService.containsFilter(query.getScope())) { - ObjectNode node = mapper.createObjectNode(); - ObjectNode regexpNode = mapper.createObjectNode(); - regexpNode.put(RecordType.SCOPE.getName() + ".raw", SchemaService.convertToRegex(query.getScope())); - node.put("regexp", regexpNode); - filterNodes.add(node); - } - - if(SchemaService.containsFilter(query.getTagKey())) { - ObjectNode node = mapper.createObjectNode(); - ObjectNode regexpNode = mapper.createObjectNode(); - regexpNode.put(RecordType.TAGK.getName() + ".raw", SchemaService.convertToRegex(query.getTagKey())); - node.put("regexp", regexpNode); - filterNodes.add(node); - } - - if(SchemaService.containsFilter(query.getTagValue())) { - ObjectNode node = mapper.createObjectNode(); - ObjectNode regexpNode = mapper.createObjectNode(); - regexpNode.put(RecordType.TAGV.getName() + ".raw", SchemaService.convertToRegex(query.getTagValue())); - node.put("regexp", regexpNode); - filterNodes.add(node); - } - - if(SchemaService.containsFilter(query.getNamespace())) { - ObjectNode node = mapper.createObjectNode(); - ObjectNode regexpNode = mapper.createObjectNode(); - regexpNode.put(RecordType.NAMESPACE.getName() + ".raw", SchemaService.convertToRegex(query.getNamespace())); - node.put("regexp", regexpNode); - filterNodes.add(node); - } - - ObjectNode boolNode = mapper.createObjectNode(); - boolNode.put("filter", filterNodes); - - ObjectNode queryNode = mapper.createObjectNode(); - queryNode.put("bool", boolNode); - return queryNode; - } - - private ObjectNode _constructAggsNode(RecordType type, long limit, ObjectMapper mapper) { - - ObjectNode termsNode = mapper.createObjectNode(); - termsNode.put("field", type.getName() + ".raw"); - termsNode.put("order", mapper.createObjectNode().put("_term", "asc")); - termsNode.put("size", limit); - termsNode.put("execution_hint", "map"); - - ObjectNode distinctValuesNode = mapper.createObjectNode(); - distinctValuesNode.put("terms", termsNode); - - ObjectNode aggsNode = mapper.createObjectNode(); - aggsNode.put("distinct_values", distinctValuesNode); - return aggsNode; - } - - - /* Helper method to convert JSON String representation to the corresponding Java entity. */ - private T toEntity(String content, TypeReference type) { - try { - return _mapper.readValue(content, type); - } catch (IOException ex) { - throw new SystemException(ex); - } - } - - /* Method to change the rest client. Used for testing. */ - protected void setRestClient(RestClient restClient) - { - this._esRestClient = restClient; - } - - /** Helper to process the response. - * Throws a SystemException when the http status code is outsdie of the range 200 - 300. - * @param response ES response - * @return Stringified response - */ - protected String extractResponse(Response response) { - requireArgument(response != null, "HttpResponse object cannot be null."); - - int status = response.getStatusLine().getStatusCode(); - String strResponse = extractStringResponse(response); - - if ((status < HttpStatus.SC_OK) || (status >= HttpStatus.SC_MULTIPLE_CHOICES)) { - throw new SystemException("Status code: " + status + " . Error occurred. " + strResponse); - } else { - return strResponse; - } - } - - private String extractStringResponse(Response content) { - requireArgument(content != null, "Response content is null."); - - String result; - HttpEntity entity = null; - - try { - entity = content.getEntity(); - if (entity == null) { - result = ""; - } else { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - - entity.writeTo(baos); - result = baos.toString("UTF-8"); - } - return result; - } catch (IOException ex) { - throw new SystemException(ex); - } finally { - if (entity != null) { - try { - EntityUtils.consume(entity); - } catch (IOException ex) { - _logger.warn("Failed to close entity stream.", ex); - } - } - } - } - - private ObjectMapper _createObjectMapper() { - ObjectMapper mapper = new ObjectMapper(); - - mapper.setSerializationInclusion(Include.NON_NULL); - SimpleModule module = new SimpleModule(); - module.addSerializer(MetricSchemaRecordList.class, new MetricSchemaRecordList.Serializer()); - module.addDeserializer(MetricSchemaRecordList.class, new MetricSchemaRecordList.Deserializer()); - module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); - mapper.registerModule(module); - - return mapper; - } - - private ObjectMapper _createScopeAndMetricOnlyObjectMapper() { - ObjectMapper mapper = new ObjectMapper(); - - mapper.setSerializationInclusion(Include.NON_NULL); - SimpleModule module = new SimpleModule(); - module.addSerializer(ScopeAndMetricOnlySchemaRecordList.class, new ScopeAndMetricOnlySchemaRecordList.Serializer()); - module.addDeserializer(ScopeAndMetricOnlySchemaRecordList.class, new ScopeAndMetricOnlySchemaRecordList.Deserializer()); - module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); - mapper.registerModule(module); - - return mapper; - } - - private ObjectMapper _createScopeOnlyObjectMapper() { - ObjectMapper mapper = new ObjectMapper(); - - mapper.setSerializationInclusion(Include.NON_NULL); - SimpleModule module = new SimpleModule(); - module.addSerializer(ScopeOnlySchemaRecordList.class, new ScopeOnlySchemaRecordList.Serializer()); - module.addDeserializer(ScopeOnlySchemaRecordList.class, new ScopeOnlySchemaRecordList.Deserializer()); - module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); - mapper.registerModule(module); - - return mapper; - } - - private ObjectNode _createSettingsNode(int replicationFactor, int numShards) { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode metadataAnalyzer = mapper.createObjectNode(); - metadataAnalyzer.put("tokenizer", "metadata_tokenizer"); - metadataAnalyzer.put("filter", mapper.createArrayNode().add("lowercase")); - - ObjectNode analyzerNode = mapper.createObjectNode(); - analyzerNode.put("metadata_analyzer", metadataAnalyzer); - - ObjectNode tokenizerNode = mapper.createObjectNode(); - tokenizerNode.put("metadata_tokenizer", mapper.createObjectNode().put("type", "pattern").put("pattern", "([^\\p{L}\\d]+)|(?<=[\\p{L}&&[^\\p{Lu}]])(?=\\p{Lu})|(?<=\\p{Lu})(?=\\p{Lu}[\\p{L}&&[^\\p{Lu}]])")); - - ObjectNode analysisNode = mapper.createObjectNode(); - analysisNode.put("analyzer", analyzerNode); - analysisNode.put("tokenizer", tokenizerNode); - - ObjectNode indexNode = mapper.createObjectNode(); - indexNode.put("max_result_window", INDEX_MAX_RESULT_WINDOW); - indexNode.put("number_of_replicas", replicationFactor); - indexNode.put("number_of_shards", numShards); - - ObjectNode settingsNode = mapper.createObjectNode(); - settingsNode.put("analysis", analysisNode); - settingsNode.put("index", indexNode); - - return settingsNode; - } - - private ObjectNode _createMappingsNode() { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode propertiesNode = mapper.createObjectNode(); - propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - propertiesNode.put(RecordType.METRIC.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - propertiesNode.put(RecordType.TAGK.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - propertiesNode.put(RecordType.TAGV.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - propertiesNode.put(RecordType.NAMESPACE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - - propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); - - ObjectNode typeNode = mapper.createObjectNode(); - typeNode.put("properties", propertiesNode); - - ObjectNode mappingsNode = mapper.createObjectNode(); - mappingsNode.put(TYPE_NAME, typeNode); - return mappingsNode; - } - - private ObjectNode _createScopeAndMetricMappingsNode() { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode propertiesNode = mapper.createObjectNode(); - propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - propertiesNode.put(RecordType.METRIC.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - - propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); - propertiesNode.put("cts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + private static String SCOPE_INDEX_NAME; + private static String SCOPE_TYPE_NAME; + + private static String SCOPE_AND_METRIC_INDEX_NAME; + private static String SCOPE_AND_METRIC_TYPE_NAME; + + private static final String INDEX_NAME = "metadata_index"; + private static final String TYPE_NAME = "metadata_type"; + private static final String KEEP_SCROLL_CONTEXT_OPEN_FOR = "1m"; + private static final int INDEX_MAX_RESULT_WINDOW = 10000; + private static final int MAX_RETRY_TIMEOUT = 300 * 1000; + private static final String FIELD_TYPE_TEXT = "text"; + private static final String FIELD_TYPE_DATE ="date"; + + private final ObjectMapper _mapper; + private final ObjectMapper _scopeOnlyMapper; + private final ObjectMapper _scopeAndMetricOnlyMapper; + + private Logger _logger = LoggerFactory.getLogger(getClass()); + private final MonitorService _monitorService; + private RestClient _esRestClient; + private final int _replicationFactor; + private final int _numShards; + private final int _replicationFactorForScopeIndex; + private final int _numShardsForScopeIndex; + private final int _replicationFactorForScopeAndMetricIndex; + private final int _numShardsForScopeAndMetricIndex; + private final int _bulkIndexingSize; + private HashAlgorithm _idgenHashAlgo; + + @Inject + public ElasticSearchSchemaService(SystemConfiguration config, MonitorService monitorService) { + super(config); + + _monitorService = monitorService; + _mapper = _createObjectMapper(); + _scopeOnlyMapper = _createScopeOnlyObjectMapper(); + _scopeAndMetricOnlyMapper = _createScopeAndMetricOnlyObjectMapper(); + + SCOPE_INDEX_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getName(), + Property.ELASTICSEARCH_SCOPE_INDEX_NAME.getDefaultValue()); + SCOPE_TYPE_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_TYPE_NAME.getName(), + Property.ELASTICSEARCH_SCOPE_TYPE_NAME.getDefaultValue()); + + SCOPE_AND_METRIC_INDEX_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME.getName(), + Property.ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME.getDefaultValue()); + SCOPE_AND_METRIC_TYPE_NAME = config.getValue(Property.ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME.getName(), + Property.ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME.getDefaultValue()); + + String algorithm = config.getValue(Property.ELASTICSEARCH_IDGEN_HASH_ALGO.getName(), Property.ELASTICSEARCH_IDGEN_HASH_ALGO.getDefaultValue()); + try { + _idgenHashAlgo = HashAlgorithm.fromString(algorithm); + } catch(IllegalArgumentException e) { + _logger.warn("{} is not supported by this service. Valid values are: {}.", algorithm, Arrays.asList(HashAlgorithm.values())); + _idgenHashAlgo = HashAlgorithm.MD5; + } + + _logger.info("Using {} for Elasticsearch document id generation.", _idgenHashAlgo); + + _replicationFactor = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS.getName(), Property.ELASTICSEARCH_NUM_REPLICAS.getDefaultValue())); + + _numShards = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT.getName(), Property.ELASTICSEARCH_SHARDS_COUNT.getDefaultValue())); + + _replicationFactorForScopeIndex = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX.getName(), Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX.getDefaultValue())); + + _numShardsForScopeIndex = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX.getName(), Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX.getDefaultValue())); + + _replicationFactorForScopeAndMetricIndex = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX.getDefaultValue())); + + _numShardsForScopeAndMetricIndex = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX.getDefaultValue())); + + _bulkIndexingSize = Integer.parseInt( + config.getValue(Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getName(), Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getDefaultValue())); + + String[] nodes = config.getValue(Property.ELASTICSEARCH_ENDPOINT.getName(), Property.ELASTICSEARCH_ENDPOINT.getDefaultValue()).split(","); + HttpHost[] httpHosts = new HttpHost[nodes.length]; + + for(int i=0; i _createMappingsNode()); + + _createIndexIfNotExists(SCOPE_INDEX_NAME, _replicationFactorForScopeIndex, _numShardsForScopeIndex, + () -> _createScopeMappingsNode()); + + _createIndexIfNotExists(SCOPE_AND_METRIC_INDEX_NAME, _replicationFactorForScopeAndMetricIndex, + _numShardsForScopeAndMetricIndex, () -> _createScopeAndMetricMappingsNode()); + } + + + @Override + public void dispose() { + super.dispose(); + try { + _esRestClient.close(); + _logger.info("Shutdown of ElasticSearch RESTClient complete"); + } catch (IOException e) { + _logger.warn("ElasticSearch RestClient failed to shutdown properly.", e); + } + } + + @Override + public Properties getServiceProperties() { + Properties serviceProps = new Properties(); + + for (Property property : Property.values()) { + serviceProps.put(property.getName(), property.getDefaultValue()); + } + return serviceProps; + } + + @Override + protected void implementationSpecificPut(List metrics, Set scopeNames, + Set> scopesAndMetricNames) { + SystemAssert.requireArgument(metrics != null, "Metrics list cannot be null."); + + _logger.info("{} new metrics need to be indexed on ES.", metrics.size()); + + long start = System.currentTimeMillis(); + List> fracturedList = _fracture(metrics); + + for(List records : fracturedList) { + if(!records.isEmpty()) { + _upsert(records); + } + } + + int count = 0; + for(List records : fracturedList) { + count += records.size(); + } + + _monitorService.modifyCounter(MonitorService.Counter.SCHEMARECORDS_WRITTEN, count, null); + _monitorService.modifyCounter(MonitorService.Counter.SCHEMARECORDS_WRITE_LATENCY, (System.currentTimeMillis() - start), null); + + _logger.info("{} new scopes need to be indexed on ES.", scopeNames.size()); + + start = System.currentTimeMillis(); + List> fracturedScopesList = _fractureScopes(scopeNames); + + for(List records : fracturedScopesList) { + if(!records.isEmpty()) { + _upsertScopes(records); + } + } + + count = 0; + for(List records : fracturedScopesList) { + count += records.size(); + } + + _monitorService.modifyCounter(MonitorService.Counter.SCOPENAMES_WRITTEN, count, null); + _monitorService.modifyCounter(MonitorService.Counter.SCOPENAMES_WRITE_LATENCY, (System.currentTimeMillis() - start), null); + + _logger.info("{} new scope and metric names need to be indexed on ES.", scopesAndMetricNames.size()); + + start = System.currentTimeMillis(); + List> fracturedScopesAndMetricsList = _fractureScopeAndMetrics(scopesAndMetricNames); + + for(List records : fracturedScopesAndMetricsList) { + if(!records.isEmpty()) { + _upsertScopeAndMetrics(records); + } + } + + count = 0; + for(List records : fracturedScopesAndMetricsList) { + count += records.size(); + } + + _monitorService.modifyCounter(MonitorService.Counter.SCOPEANDMETRICNAMES_WRITTEN, count, null); + _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_WRITE_LATENCY, (System.currentTimeMillis() - start), null); + } + + /* Convert the given list of metrics to a list of metric schema records. At the same time, fracture the records list + * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. + */ + protected List> _fracture(List metrics) { + List> fracturedList = new ArrayList<>(); + + List records = new ArrayList<>(_bulkIndexingSize); + for(Metric metric : metrics) { + if(metric.getTags().isEmpty()) { + MetricSchemaRecord msr = new MetricSchemaRecord(metric.getScope(), metric.getMetric()); + msr.setNamespace(metric.getNamespace()); + records.add(msr); + if(records.size() == _bulkIndexingSize) { + fracturedList.add(records); + records = new ArrayList<>(_bulkIndexingSize); + } + continue; + } + + for(Map.Entry entry : metric.getTags().entrySet()) { + records.add(new MetricSchemaRecord(metric.getNamespace(), metric.getScope(), metric.getMetric(), + entry.getKey(), entry.getValue())); + if(records.size() == _bulkIndexingSize) { + fracturedList.add(records); + records = new ArrayList<>(_bulkIndexingSize); + } + } + } + + if(!records.isEmpty()) { + fracturedList.add(records); + } + + return fracturedList; + } + + /* Convert the given list of scope and metric names to a list of scope and metric only schema records. + * At the same time, fracture the records list if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. + */ + protected List> _fractureScopeAndMetrics(Set> scopesAndMetricNames) { + List> fracturedList = new ArrayList<>(); + + List records = new ArrayList<>(_bulkIndexingSize); + for(Pair scopeAndMetric : scopesAndMetricNames) { + records.add(new ScopeAndMetricOnlySchemaRecord(scopeAndMetric.getLeft(), scopeAndMetric.getRight())); + + if(records.size() == _bulkIndexingSize) { + fracturedList.add(records); + records = new ArrayList<>(_bulkIndexingSize); + } + } + + if(!records.isEmpty()) { + fracturedList.add(records); + } + + return fracturedList; + } + + /* Convert the given list of scopes to a list of scope only schema records. At the same time, fracture the records list + * if its size is greater than ELASTICSEARCH_INDEXING_BATCH_SIZE. + */ + protected List> _fractureScopes(Set scopeNames) { + List> fracturedList = new ArrayList<>(); + + List records = new ArrayList<>(_bulkIndexingSize); + for(String scope : scopeNames) { + records.add(new ScopeOnlySchemaRecord(scope)); + + if(records.size() == _bulkIndexingSize) { + fracturedList.add(records); + records = new ArrayList<>(_bulkIndexingSize); + } + } + + if(!records.isEmpty()) { + fracturedList.add(records); + } + + return fracturedList; + } + + @Override + public List get(MetricSchemaRecordQuery query) { + requireNotDisposed(); + SystemAssert.requireArgument(query != null, "MetricSchemaRecordQuery cannot be null."); + long size = (long) query.getLimit() * query.getPage(); + SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, + "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); + + + Map tags = new HashMap<>(); + tags.put("type", "REGEXP_WITHOUT_AGGREGATION"); + long start = System.currentTimeMillis(); + boolean scroll = false; + StringBuilder sb = new StringBuilder().append("/") + .append(INDEX_NAME) + .append("/") + .append(TYPE_NAME) + .append("/") + .append("_search"); + + int from = 0, scrollSize; + if(query.getLimit() * query.getPage() > 10000) { + sb.append("?scroll=").append(KEEP_SCROLL_CONTEXT_OPEN_FOR); + scroll = true; + int total = query.getLimit() * query.getPage(); + scrollSize = (int) (total / (total / 10000 + 1)); + } else { + from = query.getLimit() * (query.getPage() - 1); + scrollSize = query.getLimit(); + } + + String requestUrl = sb.toString(); + String queryJson = _constructTermQuery(query, from, scrollSize); + + try { + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); + + MetricSchemaRecordList list = toEntity(extractResponse(response), new TypeReference() {}); + + if(scroll) { + requestUrl = new StringBuilder().append("/").append("_search").append("/").append("scroll").toString(); + List records = new LinkedList<>(list.getRecords()); + + while(true) { + String scrollID = list.getScrollID(); + + Map requestBody = new HashMap<>(); + requestBody.put("scroll_id", scrollID); + requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); + + response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), + new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); + + list = toEntity(extractResponse(response), new TypeReference() {}); + records.addAll(list.getRecords()); + + if(records.size() >= query.getLimit() * query.getPage() || list.getRecords().size() < scrollSize) { + break; + } + } + + int fromIndex = query.getLimit() * (query.getPage() - 1); + if(records.size() <= fromIndex) { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return Collections.emptyList(); + } + + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return records.subList(fromIndex, records.size()); + + } else { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return list.getRecords(); + } + + } catch (UnsupportedEncodingException | JsonProcessingException e) { + throw new SystemException("Search failed.", e); + } catch (IOException e) { + throw new SystemException("IOException when trying to perform ES request.", e); + } + } + + @Override + public List getUnique(MetricSchemaRecordQuery query, RecordType type) { + requireNotDisposed(); + SystemAssert.requireArgument(query != null, "MetricSchemaRecordQuery cannot be null."); + long size = (long) query.getLimit() * query.getPage(); + SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, + "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); + + + Map tags = new HashMap<>(); + tags.put("type", "REGEXP_WITH_AGGREGATION"); + long start = System.currentTimeMillis(); + + String indexName = INDEX_NAME; + String typeName = TYPE_NAME; + + if (query.isQueryOnlyOnScope() && RecordType.SCOPE.equals(type)) + { + indexName = SCOPE_INDEX_NAME; + typeName = SCOPE_TYPE_NAME; + } + else if (query.isQueryOnlyOnScopeAndMetric()) + { + indexName = SCOPE_AND_METRIC_INDEX_NAME; + typeName = SCOPE_AND_METRIC_TYPE_NAME; + } + + String requestUrl = new StringBuilder().append("/") + .append(indexName) + .append("/") + .append(typeName) + .append("/") + .append("_search") + .toString(); + + String queryJson = _constructTermAggregationQuery(query, type); + try { + + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); + String str = extractResponse(response); + List records = SchemaService.constructMetricSchemaRecordsForType( + toEntity(str, new TypeReference>() {}), type); + + if (query.isQueryOnlyOnScope() && RecordType.SCOPE.equals(type)) { + _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + + } else if (query.isQueryOnlyOnScopeAndMetric()) { + _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + } else { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + } + + + int fromIndex = query.getLimit() * (query.getPage() - 1); + if(records.size() <= fromIndex) { + return Collections.emptyList(); + } + + if(records.size() < query.getLimit() * query.getPage()) { + return records.subList(fromIndex, records.size()); + } else { + return records.subList(fromIndex, query.getLimit() * query.getPage()); + } + } catch (IOException e) { + throw new SystemException(e); + } + } + + @Override + public List keywordSearch(KeywordQuery kq) { + requireNotDisposed(); + SystemAssert.requireArgument(kq != null, "Query cannot be null."); + SystemAssert.requireArgument(kq.getQuery() != null || kq.getType() != null, "Either the query string or the type must not be null."); + + long size = (long) kq.getLimit() * kq.getPage(); + SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, + "(limit * page) must be greater than 0 and atmost Integer.MAX_VALUE"); + + + Map tags = new HashMap<>(); + tags.put("type", "FTS_WITH_AGGREGATION"); + long start = System.currentTimeMillis(); + StringBuilder sb = new StringBuilder().append("/") + .append(INDEX_NAME) + .append("/") + .append(TYPE_NAME) + .append("/") + .append("_search"); + try { + + if(kq.getQuery() != null) { + + int from = 0, scrollSize = 0; + boolean scroll = false;; + if(kq.getLimit() * kq.getPage() > 10000) { + sb.append("?scroll=").append(KEEP_SCROLL_CONTEXT_OPEN_FOR); + scroll = true; + int total = kq.getLimit() * kq.getPage(); + scrollSize = (int) (total / (total / 10000 + 1)); + } else { + from = kq.getLimit() * (kq.getPage() - 1); + scrollSize = kq.getLimit(); + } + + List tokens = _analyzedTokens(kq.getQuery()); + String queryJson = _constructQueryStringQuery(tokens, from, scrollSize); + String requestUrl = sb.toString(); + + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); + String strResponse = extractResponse(response); + MetricSchemaRecordList list = toEntity(strResponse, new TypeReference() {}); + + if(scroll) { + requestUrl = new StringBuilder().append("/").append("_search").append("/").append("scroll").toString(); + List records = new LinkedList<>(list.getRecords()); + + while(true) { + Map requestBody = new HashMap<>(); + requestBody.put("scroll_id", list.getScrollID()); + requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); + + response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), + new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); + + list = toEntity(extractResponse(response), new TypeReference() {}); + + records.addAll(list.getRecords()); + + if(records.size() >= kq.getLimit() * kq.getPage() || list.getRecords().size() < scrollSize) { + break; + } + } + + int fromIndex = kq.getLimit() * (kq.getPage() - 1); + if(records.size() <= fromIndex) { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return Collections.emptyList(); + } + + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return records.subList(fromIndex, records.size()); + + } else { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return list.getRecords(); + } + + + } else { + Map> tokensMap = new HashMap<>(); + + List tokens = _analyzedTokens(kq.getScope()); + if(!tokens.isEmpty()) { + tokensMap.put(RecordType.SCOPE, tokens); + } + + tokens = _analyzedTokens(kq.getMetric()); + if(!tokens.isEmpty()) { + tokensMap.put(RecordType.METRIC, tokens); + } + + tokens = _analyzedTokens(kq.getTagKey()); + if(!tokens.isEmpty()) { + tokensMap.put(RecordType.TAGK, tokens); + } + + tokens = _analyzedTokens(kq.getTagValue()); + if(!tokens.isEmpty()) { + tokensMap.put(RecordType.TAGV, tokens); + } + + tokens = _analyzedTokens(kq.getNamespace()); + if(!tokens.isEmpty()) { + tokensMap.put(RecordType.NAMESPACE, tokens); + } + + String queryJson = _constructQueryStringQuery(kq, tokensMap); + String requestUrl = sb.toString(); + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); + String strResponse = extractResponse(response); + + List records = SchemaService.constructMetricSchemaRecordsForType( + toEntity(strResponse, new TypeReference>() {}), kq.getType()); + + int fromIndex = kq.getLimit() * (kq.getPage() - 1); + if(records.size() <= fromIndex) { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return Collections.emptyList(); + } + + if(records.size() < kq.getLimit() * kq.getPage()) { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return records.subList(fromIndex, records.size()); + } else { + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_COUNT, 1, tags); + _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); + return records.subList(fromIndex, kq.getLimit() * kq.getPage()); + } + + } + + } catch (IOException e) { + throw new SystemException(e); + } + } + + private List _analyzedTokens(String query) { + + if(!SchemaService.containsFilter(query)) { + return Collections.emptyList(); + } + + List tokens = new ArrayList<>(); + + String requestUrl = new StringBuilder("/").append(INDEX_NAME).append("/_analyze").toString(); + + String requestBody = "{\"analyzer\" : \"metadata_analyzer\", \"text\": \"" + query + "\" }"; + + try { + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); + String strResponse = extractResponse(response); + JsonNode tokensNode = _mapper.readTree(strResponse).get("tokens"); + if(tokensNode.isArray()) { + for(JsonNode tokenNode : tokensNode) { + tokens.add(tokenNode.get("token").asText()); + } + } + + return tokens; + } catch (IOException e) { + throw new SystemException(e); + } + } + + private void _upsert(List records) { + String requestUrl = new StringBuilder().append("/") + .append(INDEX_NAME) + .append("/") + .append(TYPE_NAME) + .append("/") + .append("_bulk") + .toString(); + + String strResponse = ""; + + MetricSchemaRecordList msrList = new MetricSchemaRecordList(records, _idgenHashAlgo); + try { + String requestBody = _mapper.writeValueAsString(msrList); + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); + strResponse = extractResponse(response); + + } catch (IOException e) { + //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? + throw new SystemException(e); + } + + try { + PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); + //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. + if(putResponse.errors) { + List recordsToRemove = new ArrayList<>(); + for(Item item : putResponse.items) { + if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { + _logger.warn("Failed to index metric. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); + recordsToRemove.add(msrList.getRecord(item.create._id)); + } + + if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { + _logger.warn("Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); + recordsToRemove.add(msrList.getRecord(item.index._id)); + } + } + if(recordsToRemove.size() != 0) { + _logger.info("{} records were not written to ES", recordsToRemove.size()); + records.removeAll(recordsToRemove); + } + } + //add to bloom filter + _addToBloomFilter(records); + + } catch(IOException e) { + throw new SystemException("Failed to parse reponse of put metrics. The response was: " + strResponse, e); + } + } + + private void _upsertScopeAndMetrics(List records) { + String requestUrl = new StringBuilder().append("/") + .append(SCOPE_AND_METRIC_INDEX_NAME) + .append("/") + .append(SCOPE_AND_METRIC_TYPE_NAME) + .append("/") + .append("_bulk") + .toString(); + + String strResponse = ""; + + ScopeAndMetricOnlySchemaRecordList recordList = new ScopeAndMetricOnlySchemaRecordList(records, _idgenHashAlgo); + + try { + String requestBody = _scopeAndMetricOnlyMapper.writeValueAsString(recordList); + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); + strResponse = extractResponse(response); + } catch (IOException e) { + //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? + throw new SystemException(e); + } + + try { + PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); + //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. + if(putResponse.errors) { + List recordsToRemove = new ArrayList<>(); + for(Item item : putResponse.items) { + if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { + _logger.warn("Failed to index scope. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); + recordsToRemove.add(recordList.getRecord(item.create._id)); + } + + if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { + _logger.warn("Scope Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); + recordsToRemove.add(recordList.getRecord(item.index._id)); + } + } + if(recordsToRemove.size() != 0) { + _logger.info("{} records were not written to ES", recordsToRemove.size()); + records.removeAll(recordsToRemove); + } + } + //add to bloom filter + _addToBloomFilterScopeAndMetricOnly(records); + + } catch(IOException e) { + throw new SystemException("Failed to parse reponse of put scope names. The response was: " + strResponse, e); + } + } + + private void _upsertScopes(List records) { + String requestUrl = new StringBuilder().append("/") + .append(SCOPE_INDEX_NAME) + .append("/") + .append(SCOPE_TYPE_NAME) + .append("/") + .append("_bulk") + .toString(); + + String strResponse = ""; + + ScopeOnlySchemaRecordList scopeOnlySchemaRecordList = new ScopeOnlySchemaRecordList(records, _idgenHashAlgo); + + try { + String requestBody = _scopeOnlyMapper.writeValueAsString(scopeOnlySchemaRecordList); + Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestBody)); + strResponse = extractResponse(response); + } catch (IOException e) { + //TODO: Retry with exponential back-off for handling EsRejectedExecutionException/RemoteTransportException/TimeoutException?? + throw new SystemException(e); + } + + try { + PutResponse putResponse = new ObjectMapper().readValue(strResponse, PutResponse.class); + //TODO: If response contains HTTP 429 Too Many Requests (EsRejectedExecutionException), then retry with exponential back-off. + if(putResponse.errors) { + List recordsToRemove = new ArrayList<>(); + for(Item item : putResponse.items) { + if(item.create != null && item.create.status != HttpStatus.SC_CONFLICT && item.create.status != HttpStatus.SC_CREATED) { + _logger.warn("Failed to index scope. Reason: " + new ObjectMapper().writeValueAsString(item.create.error)); + recordsToRemove.add(scopeOnlySchemaRecordList.getRecord(item.create._id)); + } + + if(item.index != null && item.index.status == HttpStatus.SC_NOT_FOUND) { + _logger.warn("Scope Index does not exist. Error: " + new ObjectMapper().writeValueAsString(item.index.error)); + recordsToRemove.add(scopeOnlySchemaRecordList.getRecord(item.index._id)); + } + } + if(recordsToRemove.size() != 0) { + _logger.info("{} records were not written to ES", recordsToRemove.size()); + records.removeAll(recordsToRemove); + } + } + //add to bloom filter + _addToBloomFilterScopeOnly(records); + + } catch(IOException e) { + throw new SystemException("Failed to parse reponse of put scope names. The response was: " + strResponse, e); + } + } + + protected void _addToBloomFilter(List records){ + _logger.info("Adding {} records into bloom filter.", records.size()); + for(MetricSchemaRecord record : records) { + String key = constructKey(record.getScope(), record.getMetric(), record.getTagKey(), record.getTagValue(), record.getNamespace()); + bloomFilter.put(key); + } + } + + protected void _addToBloomFilterScopeAndMetricOnly(List records){ + _logger.info("Adding {} records into scope and metric only bloom filter.", records.size()); + for(ScopeAndMetricOnlySchemaRecord record : records) { + String key = constructScopeAndMetricOnlyKey(record.getScope(), record.getMetric()); + bloomFilterScopeAndMetricOnly.put(key); + } + } + + protected void _addToBloomFilterScopeOnly(List records){ + _logger.info("Adding {} records into scope only bloom filter.", records.size()); + for(ScopeOnlySchemaRecord record : records) { + String key = constructScopeOnlyKey(record.getScope()); + bloomFilterScopeOnly.put(key); + } + } + + private String _constructTermAggregationQuery(MetricSchemaRecordQuery query, RecordType type) { + ObjectMapper mapper = new ObjectMapper(); + ObjectNode queryNode = _constructQueryNode(query, mapper); + + long size = query.getLimit() * query.getPage(); + SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, + "(limit * page) must be greater than 0 and less than Integer.MAX_VALUE"); + + ObjectNode aggsNode = _constructAggsNode(type, Math.max(size, 10000), mapper); + + ObjectNode rootNode = mapper.createObjectNode(); + rootNode.put("query", queryNode); + rootNode.put("size", 0); + rootNode.put("aggs", aggsNode); + + return rootNode.toString(); + } + + private String _constructTermQuery(MetricSchemaRecordQuery query, int from, int size) { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode queryNode = _constructQueryNode(query, mapper); + + ObjectNode rootNode = _mapper.createObjectNode(); + rootNode.put("query", queryNode); + rootNode.put("from", from); + rootNode.put("size", size); + + return rootNode.toString(); + } + + private ObjectNode _constructSimpleQueryStringNode(List tokens, RecordType... types) { + + if(tokens.isEmpty()) { + return null; + } + + ObjectMapper mapper = new ObjectMapper(); + + StringBuilder queryString = new StringBuilder(); + for(String token : tokens) { + queryString.append('+').append(token).append(' '); + } + queryString.replace(queryString.length() - 1, queryString.length(), "*"); + + ObjectNode node = mapper.createObjectNode(); + ArrayNode fieldsNode = mapper.createArrayNode(); + for(RecordType type : types) { + fieldsNode.add(type.getName()); + } + node.put("fields", fieldsNode); + node.put("query", queryString.toString()); + + ObjectNode simpleQueryStringNode = mapper.createObjectNode(); + simpleQueryStringNode.put("simple_query_string", node); + + return simpleQueryStringNode; + } + + private String _constructQueryStringQuery(List tokens, int from, int size) { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode simpleQueryStringNode = _constructSimpleQueryStringNode(tokens, RecordType.values()); + + ObjectNode rootNode = mapper.createObjectNode(); + rootNode.put("query", simpleQueryStringNode); + rootNode.put("from", from); + rootNode.put("size", size); + + return rootNode.toString(); + } + + private String _constructQueryStringQuery(KeywordQuery kq, Map> tokensMap) { + ObjectMapper mapper = new ObjectMapper(); + + ArrayNode filterNodes = mapper.createArrayNode(); + for(Map.Entry> entry : tokensMap.entrySet()) { + ObjectNode simpleQueryStringNode = _constructSimpleQueryStringNode(entry.getValue(), entry.getKey()); + filterNodes.add(simpleQueryStringNode); + } + + ObjectNode boolNode = mapper.createObjectNode(); + boolNode.put("filter", filterNodes); + + ObjectNode queryNode = mapper.createObjectNode(); + queryNode.put("bool", boolNode); + + ObjectNode rootNode = mapper.createObjectNode(); + rootNode.put("query", queryNode); + rootNode.put("size", 0); + + long size = kq.getLimit() * kq.getPage(); + SystemAssert.requireArgument(size > 0 && size <= Integer.MAX_VALUE, + "(limit * page) must be greater than 0 and less than Integer.MAX_VALUE"); + rootNode.put("aggs", _constructAggsNode(kq.getType(), Math.max(size, 10000), mapper)); + + return rootNode.toString(); + + } + + private ObjectNode _constructQueryNode(MetricSchemaRecordQuery query, ObjectMapper mapper) { + ArrayNode filterNodes = mapper.createArrayNode(); + if(SchemaService.containsFilter(query.getMetric())) { + ObjectNode node = mapper.createObjectNode(); + ObjectNode regexpNode = mapper.createObjectNode(); + regexpNode.put(RecordType.METRIC.getName() + ".raw", SchemaService.convertToRegex(query.getMetric())); + node.put("regexp", regexpNode); + filterNodes.add(node); + } + + if(SchemaService.containsFilter(query.getScope())) { + ObjectNode node = mapper.createObjectNode(); + ObjectNode regexpNode = mapper.createObjectNode(); + regexpNode.put(RecordType.SCOPE.getName() + ".raw", SchemaService.convertToRegex(query.getScope())); + node.put("regexp", regexpNode); + filterNodes.add(node); + } + + if(SchemaService.containsFilter(query.getTagKey())) { + ObjectNode node = mapper.createObjectNode(); + ObjectNode regexpNode = mapper.createObjectNode(); + regexpNode.put(RecordType.TAGK.getName() + ".raw", SchemaService.convertToRegex(query.getTagKey())); + node.put("regexp", regexpNode); + filterNodes.add(node); + } + + if(SchemaService.containsFilter(query.getTagValue())) { + ObjectNode node = mapper.createObjectNode(); + ObjectNode regexpNode = mapper.createObjectNode(); + regexpNode.put(RecordType.TAGV.getName() + ".raw", SchemaService.convertToRegex(query.getTagValue())); + node.put("regexp", regexpNode); + filterNodes.add(node); + } + + if(SchemaService.containsFilter(query.getNamespace())) { + ObjectNode node = mapper.createObjectNode(); + ObjectNode regexpNode = mapper.createObjectNode(); + regexpNode.put(RecordType.NAMESPACE.getName() + ".raw", SchemaService.convertToRegex(query.getNamespace())); + node.put("regexp", regexpNode); + filterNodes.add(node); + } + + ObjectNode boolNode = mapper.createObjectNode(); + boolNode.put("filter", filterNodes); + + ObjectNode queryNode = mapper.createObjectNode(); + queryNode.put("bool", boolNode); + return queryNode; + } + + private ObjectNode _constructAggsNode(RecordType type, long limit, ObjectMapper mapper) { + + ObjectNode termsNode = mapper.createObjectNode(); + termsNode.put("field", type.getName() + ".raw"); + termsNode.put("order", mapper.createObjectNode().put("_term", "asc")); + termsNode.put("size", limit); + termsNode.put("execution_hint", "map"); + + ObjectNode distinctValuesNode = mapper.createObjectNode(); + distinctValuesNode.put("terms", termsNode); + + ObjectNode aggsNode = mapper.createObjectNode(); + aggsNode.put("distinct_values", distinctValuesNode); + return aggsNode; + } + + + /* Helper method to convert JSON String representation to the corresponding Java entity. */ + private T toEntity(String content, TypeReference type) { + try { + return _mapper.readValue(content, type); + } catch (IOException ex) { + throw new SystemException(ex); + } + } + + /* Method to change the rest client. Used for testing. */ + protected void setRestClient(RestClient restClient) + { + this._esRestClient = restClient; + } + + /** Helper to process the response. + * Throws a SystemException when the http status code is outsdie of the range 200 - 300. + * @param response ES response + * @return Stringified response + */ + protected String extractResponse(Response response) { + requireArgument(response != null, "HttpResponse object cannot be null."); + + int status = response.getStatusLine().getStatusCode(); + String strResponse = extractStringResponse(response); + + if ((status < HttpStatus.SC_OK) || (status >= HttpStatus.SC_MULTIPLE_CHOICES)) { + throw new SystemException("Status code: " + status + " . Error occurred. " + strResponse); + } else { + return strResponse; + } + } + + private String extractStringResponse(Response content) { + requireArgument(content != null, "Response content is null."); + + String result; + HttpEntity entity = null; + + try { + entity = content.getEntity(); + if (entity == null) { + result = ""; + } else { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + entity.writeTo(baos); + result = baos.toString("UTF-8"); + } + return result; + } catch (IOException ex) { + throw new SystemException(ex); + } finally { + if (entity != null) { + try { + EntityUtils.consume(entity); + } catch (IOException ex) { + _logger.warn("Failed to close entity stream.", ex); + } + } + } + } + + private ObjectMapper _createObjectMapper() { + ObjectMapper mapper = new ObjectMapper(); + + mapper.setSerializationInclusion(Include.NON_NULL); + SimpleModule module = new SimpleModule(); + module.addSerializer(MetricSchemaRecordList.class, new MetricSchemaRecordList.Serializer()); + module.addDeserializer(MetricSchemaRecordList.class, new MetricSchemaRecordList.Deserializer()); + module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); + mapper.registerModule(module); + + return mapper; + } + + private ObjectMapper _createScopeAndMetricOnlyObjectMapper() { + ObjectMapper mapper = new ObjectMapper(); + + mapper.setSerializationInclusion(Include.NON_NULL); + SimpleModule module = new SimpleModule(); + module.addSerializer(ScopeAndMetricOnlySchemaRecordList.class, new ScopeAndMetricOnlySchemaRecordList.Serializer()); + module.addDeserializer(ScopeAndMetricOnlySchemaRecordList.class, new ScopeAndMetricOnlySchemaRecordList.Deserializer()); + module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); + mapper.registerModule(module); + + return mapper; + } + + private ObjectMapper _createScopeOnlyObjectMapper() { + ObjectMapper mapper = new ObjectMapper(); + + mapper.setSerializationInclusion(Include.NON_NULL); + SimpleModule module = new SimpleModule(); + module.addSerializer(ScopeOnlySchemaRecordList.class, new ScopeOnlySchemaRecordList.Serializer()); + module.addDeserializer(ScopeOnlySchemaRecordList.class, new ScopeOnlySchemaRecordList.Deserializer()); + module.addDeserializer(List.class, new SchemaRecordList.AggDeserializer()); + mapper.registerModule(module); + + return mapper; + } + + private ObjectNode _createSettingsNode(int replicationFactor, int numShards) { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode metadataAnalyzer = mapper.createObjectNode(); + metadataAnalyzer.put("tokenizer", "metadata_tokenizer"); + metadataAnalyzer.put("filter", mapper.createArrayNode().add("lowercase")); + + ObjectNode analyzerNode = mapper.createObjectNode(); + analyzerNode.put("metadata_analyzer", metadataAnalyzer); + + ObjectNode tokenizerNode = mapper.createObjectNode(); + tokenizerNode.put("metadata_tokenizer", mapper.createObjectNode().put("type", "pattern").put("pattern", "([^\\p{L}\\d]+)|(?<=[\\p{L}&&[^\\p{Lu}]])(?=\\p{Lu})|(?<=\\p{Lu})(?=\\p{Lu}[\\p{L}&&[^\\p{Lu}]])")); + + ObjectNode analysisNode = mapper.createObjectNode(); + analysisNode.put("analyzer", analyzerNode); + analysisNode.put("tokenizer", tokenizerNode); + + ObjectNode indexNode = mapper.createObjectNode(); + indexNode.put("max_result_window", INDEX_MAX_RESULT_WINDOW); + indexNode.put("number_of_replicas", replicationFactor); + indexNode.put("number_of_shards", numShards); + + ObjectNode settingsNode = mapper.createObjectNode(); + settingsNode.put("analysis", analysisNode); + settingsNode.put("index", indexNode); + + return settingsNode; + } + + private ObjectNode _createMappingsNode() { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode propertiesNode = mapper.createObjectNode(); + propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.METRIC.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.TAGK.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.TAGV.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.NAMESPACE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + + propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + + ObjectNode typeNode = mapper.createObjectNode(); + typeNode.put("properties", propertiesNode); + + ObjectNode mappingsNode = mapper.createObjectNode(); + mappingsNode.put(TYPE_NAME, typeNode); + return mappingsNode; + } + + private ObjectNode _createScopeAndMetricMappingsNode() { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode propertiesNode = mapper.createObjectNode(); + propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + propertiesNode.put(RecordType.METRIC.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + + propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + propertiesNode.put("cts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); - ObjectNode typeNode = mapper.createObjectNode(); - typeNode.put("properties", propertiesNode); - - ObjectNode mappingsNode = mapper.createObjectNode(); - mappingsNode.put(SCOPE_AND_METRIC_TYPE_NAME, typeNode); + ObjectNode typeNode = mapper.createObjectNode(); + typeNode.put("properties", propertiesNode); + + ObjectNode mappingsNode = mapper.createObjectNode(); + mappingsNode.put(SCOPE_AND_METRIC_TYPE_NAME, typeNode); - return mappingsNode; - } + return mappingsNode; + } - private ObjectNode _createScopeMappingsNode() { - ObjectMapper mapper = new ObjectMapper(); + private ObjectNode _createScopeMappingsNode() { + ObjectMapper mapper = new ObjectMapper(); - ObjectNode propertiesNode = mapper.createObjectNode(); - propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); - - propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); - propertiesNode.put("cts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); - - ObjectNode typeNode = mapper.createObjectNode(); - typeNode.put("properties", propertiesNode); - - ObjectNode mappingsNode = mapper.createObjectNode(); - mappingsNode.put(SCOPE_TYPE_NAME, typeNode); - - return mappingsNode; - } - - private ObjectNode _createFieldNode(String type) { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode fieldNode = mapper.createObjectNode(); - fieldNode.put("type", type); - fieldNode.put("analyzer", "metadata_analyzer"); - ObjectNode keywordNode = mapper.createObjectNode(); - keywordNode.put("type", "keyword"); - ObjectNode fieldsNode = mapper.createObjectNode(); - fieldsNode.put("raw", keywordNode); - fieldNode.put("fields", fieldsNode); - return fieldNode; - } - - private ObjectNode _createFieldNodeNoAnalyzer(String type) { - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode fieldNode = mapper.createObjectNode(); - fieldNode.put("type", type); - return fieldNode; - } - - private void _createIndexIfNotExists(String indexName, int replicationFactor, int numShards, - Supplier createMappingsNode) { - try { - Response response = _esRestClient.performRequest(HttpMethod.HEAD.getName(), "/" + indexName); - boolean indexExists = response.getStatusLine().getStatusCode() == HttpStatus.SC_OK ? true : false; - - if(!indexExists) { - _logger.info("Index [" + indexName + "] does not exist. Will create one."); - ObjectMapper mapper = new ObjectMapper(); - - ObjectNode rootNode = mapper.createObjectNode(); - rootNode.put("settings", _createSettingsNode(replicationFactor, numShards)); - rootNode.put("mappings", createMappingsNode.get()); - - String settingsAndMappingsJson = rootNode.toString(); - String requestUrl = new StringBuilder().append("/").append(indexName).toString(); - - response = _esRestClient.performRequest(HttpMethod.PUT.getName(), requestUrl, Collections.emptyMap(), new StringEntity(settingsAndMappingsJson)); - extractResponse(response); - } - } catch (Exception e) { - _logger.error("Failed to check/create {} index. ElasticSearchSchemaService may not function. {}", - indexName, e); - } - } - - /** - * Enumeration of supported HTTP methods. - * - * @author Bhinav Sura (bhinav.sura@salesforce.com) - */ - private enum HttpMethod { - - /** POST operation. */ - POST("POST"), - /** PUT operation. */ - PUT("PUT"), - /** HEAD operation. */ - HEAD("HEAD"); - - private String name; - - HttpMethod(String name) { - this.setName(name); - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - } - - - /** - * The set of implementation specific configuration properties. - * - * @author Bhinav Sura (bhinav.sura@salesforce.com) - */ - public enum Property { - - ELASTICSEARCH_ENDPOINT("service.property.schema.elasticsearch.endpoint", "http://localhost:9200,http://localhost:9201"), - /** Connection timeout for ES REST client. */ - ELASTICSEARCH_ENDPOINT_CONNECTION_TIMEOUT("service.property.schema.elasticsearch.endpoint.connection.timeout", "10000"), - /** Socket connection timeout for ES REST client. */ - ELASTICSEARCH_ENDPOINT_SOCKET_TIMEOUT("service.property.schema.elasticsearch.endpoint.socket.timeout", "10000"), - /** Connection count for ES REST client. */ - ELASTICSEARCH_CONNECTION_COUNT("service.property.schema.elasticsearch.connection.count", "10"), - /** Replication factor for metadata_index. */ - ELASTICSEARCH_NUM_REPLICAS("service.property.schema.elasticsearch.num.replicas", "1"), - /** Shard count for metadata_index. */ - ELASTICSEARCH_SHARDS_COUNT("service.property.schema.elasticsearch.shards.count", "10"), - /** Replication factor for scopenames */ - ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.num.replicas.for.scope.index", "1"), - /** Shard count for scopenames */ - ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.shards.count.for.scope.index", "6"), - /** The no. of records to batch for bulk indexing requests. - * https://www.elastic.co/guide/en/elasticsearch/guide/current/indexing-performance.html#_using_and_sizing_bulk_requests - */ - ELASTICSEARCH_INDEXING_BATCH_SIZE("service.property.schema.elasticsearch.indexing.batch.size", "10000"), - /** The hashing algorithm to use for generating document id. */ - ELASTICSEARCH_IDGEN_HASH_ALGO("service.property.schema.elasticsearch.idgen.hash.algo", "MD5"), - - /** Name of scope only index */ - ELASTICSEARCH_SCOPE_INDEX_NAME("service.property.schema.elasticsearch.scope.index.name", "scopenames"), - /** Type within scope only index */ - ELASTICSEARCH_SCOPE_TYPE_NAME("service.property.schema.elasticsearch.scope.type.name", "scope_type"), - - /** Replication factor for scope and metric names */ - ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.num.replicas.for.scopeandmetric.index", "1"), - /** Shard count for scope and metric names */ - ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.shards.count.for.scopeandmetric.index", "6"), - - /** Name of scope and metric only index */ - ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME("service.property.schema.elasticsearch.scopeandmetric.index.name", "scopemetricnames"), - /** Type within scope and metric only index */ - ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME("service.property.schema.elasticsearch.scopeandmetric.type.name", "scopemetric_type"); - - private final String _name; - private final String _defaultValue; - - private Property(String name, String defaultValue) { - _name = name; - _defaultValue = defaultValue; - } - - /** - * Returns the property name. - * - * @return The property name. - */ - public String getName() { - return _name; - } - - /** - * Returns the default value for the property. - * - * @return The default value. - */ - public String getDefaultValue() { - return _defaultValue; - } - } - - static class PutResponse { - private int took; - private boolean errors; - private List items; - - public PutResponse() {} - - public int getTook() { - return took; - } - - public void setTook(int took) { - this.took = took; - } - - public boolean isErrors() { - return errors; - } - - public void setErrors(boolean errors) { - this.errors = errors; - } - - public List getItems() { - return items; - } - - public void setItems(List items) { - this.items = items; - } - - @JsonIgnoreProperties(ignoreUnknown = true) - static class Item { - private CreateItem create; - private CreateItem index; - - public Item() {} - - public CreateItem getCreate() { - return create; - } - - public void setCreate(CreateItem create) { - this.create = create; - } - - public CreateItem getIndex() { - return index; - } - - public void setIndex(CreateItem index) { - this.index = index; - } - } - - @JsonIgnoreProperties(ignoreUnknown = true) - static class CreateItem { - private String _index; - private String _type; - private String _id; - private int status; - private Error error; - - public CreateItem() {} - - public String get_index() { - return _index; - } - - public void set_index(String _index) { - this._index = _index; - } - - public String get_type() { - return _type; - } - - public void set_type(String _type) { - this._type = _type; - } - - public String get_id() { - return _id; - } - - public void set_id(String _id) { - this._id = _id; - } - - public int getStatus() { - return status; - } - - public void setStatus(int status) { - this.status = status; - } - - public Error getError() { - return error; - } - - public void setError(Error error) { - this.error = error; - } - } - - @JsonIgnoreProperties(ignoreUnknown = true) - static class Error { - private String type; - private String reason; - - public Error() {} - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public String getReason() { - return reason; - } - - public void setReason(String reason) { - this.reason = reason; - } - } - } + ObjectNode propertiesNode = mapper.createObjectNode(); + propertiesNode.put(RecordType.SCOPE.getName(), _createFieldNode(FIELD_TYPE_TEXT)); + + propertiesNode.put("mts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + propertiesNode.put("cts", _createFieldNodeNoAnalyzer(FIELD_TYPE_DATE)); + + ObjectNode typeNode = mapper.createObjectNode(); + typeNode.put("properties", propertiesNode); + + ObjectNode mappingsNode = mapper.createObjectNode(); + mappingsNode.put(SCOPE_TYPE_NAME, typeNode); + + return mappingsNode; + } + + private ObjectNode _createFieldNode(String type) { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode fieldNode = mapper.createObjectNode(); + fieldNode.put("type", type); + fieldNode.put("analyzer", "metadata_analyzer"); + ObjectNode keywordNode = mapper.createObjectNode(); + keywordNode.put("type", "keyword"); + ObjectNode fieldsNode = mapper.createObjectNode(); + fieldsNode.put("raw", keywordNode); + fieldNode.put("fields", fieldsNode); + return fieldNode; + } + + private ObjectNode _createFieldNodeNoAnalyzer(String type) { + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode fieldNode = mapper.createObjectNode(); + fieldNode.put("type", type); + return fieldNode; + } + + private void _createIndexIfNotExists(String indexName, int replicationFactor, int numShards, + Supplier createMappingsNode) { + try { + Response response = _esRestClient.performRequest(HttpMethod.HEAD.getName(), "/" + indexName); + boolean indexExists = response.getStatusLine().getStatusCode() == HttpStatus.SC_OK ? true : false; + + if(!indexExists) { + _logger.info("Index [" + indexName + "] does not exist. Will create one."); + ObjectMapper mapper = new ObjectMapper(); + + ObjectNode rootNode = mapper.createObjectNode(); + rootNode.put("settings", _createSettingsNode(replicationFactor, numShards)); + rootNode.put("mappings", createMappingsNode.get()); + + String settingsAndMappingsJson = rootNode.toString(); + String requestUrl = new StringBuilder().append("/").append(indexName).toString(); + + response = _esRestClient.performRequest(HttpMethod.PUT.getName(), requestUrl, Collections.emptyMap(), new StringEntity(settingsAndMappingsJson)); + extractResponse(response); + } + } catch (Exception e) { + _logger.error("Failed to check/create {} index. ElasticSearchSchemaService may not function. {}", + indexName, e); + } + } + + /** + * Enumeration of supported HTTP methods. + * + * @author Bhinav Sura (bhinav.sura@salesforce.com) + */ + private enum HttpMethod { + + /** POST operation. */ + POST("POST"), + /** PUT operation. */ + PUT("PUT"), + /** HEAD operation. */ + HEAD("HEAD"); + + private String name; + + HttpMethod(String name) { + this.setName(name); + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + } + + + /** + * The set of implementation specific configuration properties. + * + * @author Bhinav Sura (bhinav.sura@salesforce.com) + */ + public enum Property { + + ELASTICSEARCH_ENDPOINT("service.property.schema.elasticsearch.endpoint", "http://localhost:9200,http://localhost:9201"), + /** Connection timeout for ES REST client. */ + ELASTICSEARCH_ENDPOINT_CONNECTION_TIMEOUT("service.property.schema.elasticsearch.endpoint.connection.timeout", "10000"), + /** Socket connection timeout for ES REST client. */ + ELASTICSEARCH_ENDPOINT_SOCKET_TIMEOUT("service.property.schema.elasticsearch.endpoint.socket.timeout", "10000"), + /** Connection count for ES REST client. */ + ELASTICSEARCH_CONNECTION_COUNT("service.property.schema.elasticsearch.connection.count", "10"), + /** Replication factor for metadata_index. */ + ELASTICSEARCH_NUM_REPLICAS("service.property.schema.elasticsearch.num.replicas", "1"), + /** Shard count for metadata_index. */ + ELASTICSEARCH_SHARDS_COUNT("service.property.schema.elasticsearch.shards.count", "10"), + /** Replication factor for scopenames */ + ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.num.replicas.for.scope.index", "1"), + /** Shard count for scopenames */ + ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_INDEX("service.property.schema.elasticsearch.shards.count.for.scope.index", "6"), + /** The no. of records to batch for bulk indexing requests. + * https://www.elastic.co/guide/en/elasticsearch/guide/current/indexing-performance.html#_using_and_sizing_bulk_requests + */ + ELASTICSEARCH_INDEXING_BATCH_SIZE("service.property.schema.elasticsearch.indexing.batch.size", "10000"), + /** The hashing algorithm to use for generating document id. */ + ELASTICSEARCH_IDGEN_HASH_ALGO("service.property.schema.elasticsearch.idgen.hash.algo", "MD5"), + + /** Name of scope only index */ + ELASTICSEARCH_SCOPE_INDEX_NAME("service.property.schema.elasticsearch.scope.index.name", "scopenames"), + /** Type within scope only index */ + ELASTICSEARCH_SCOPE_TYPE_NAME("service.property.schema.elasticsearch.scope.type.name", "scope_type"), + + /** Replication factor for scope and metric names */ + ELASTICSEARCH_NUM_REPLICAS_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.num.replicas.for.scopeandmetric.index", "1"), + /** Shard count for scope and metric names */ + ELASTICSEARCH_SHARDS_COUNT_FOR_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.shards.count.for.scopeandmetric.index", "6"), + + /** Name of scope and metric only index */ + ELASTICSEARCH_SCOPE_AND_METRIC_INDEX_NAME("service.property.schema.elasticsearch.scopeandmetric.index.name", "scopemetricnames"), + /** Type within scope and metric only index */ + ELASTICSEARCH_SCOPE_AND_METRIC_TYPE_NAME("service.property.schema.elasticsearch.scopeandmetric.type.name", "scopemetric_type"); + + private final String _name; + private final String _defaultValue; + + private Property(String name, String defaultValue) { + _name = name; + _defaultValue = defaultValue; + } + + /** + * Returns the property name. + * + * @return The property name. + */ + public String getName() { + return _name; + } + + /** + * Returns the default value for the property. + * + * @return The default value. + */ + public String getDefaultValue() { + return _defaultValue; + } + } + + static class PutResponse { + private int took; + private boolean errors; + private List items; + + public PutResponse() {} + + public int getTook() { + return took; + } + + public void setTook(int took) { + this.took = took; + } + + public boolean isErrors() { + return errors; + } + + public void setErrors(boolean errors) { + this.errors = errors; + } + + public List getItems() { + return items; + } + + public void setItems(List items) { + this.items = items; + } + + @JsonIgnoreProperties(ignoreUnknown = true) + static class Item { + private CreateItem create; + private CreateItem index; + + public Item() {} + + public CreateItem getCreate() { + return create; + } + + public void setCreate(CreateItem create) { + this.create = create; + } + + public CreateItem getIndex() { + return index; + } + + public void setIndex(CreateItem index) { + this.index = index; + } + } + + @JsonIgnoreProperties(ignoreUnknown = true) + static class CreateItem { + private String _index; + private String _type; + private String _id; + private int status; + private Error error; + + public CreateItem() {} + + public String get_index() { + return _index; + } + + public void set_index(String _index) { + this._index = _index; + } + + public String get_type() { + return _type; + } + + public void set_type(String _type) { + this._type = _type; + } + + public String get_id() { + return _id; + } + + public void set_id(String _id) { + this._id = _id; + } + + public int getStatus() { + return status; + } + + public void setStatus(int status) { + this.status = status; + } + + public Error getError() { + return error; + } + + public void setError(Error error) { + this.error = error; + } + } + + @JsonIgnoreProperties(ignoreUnknown = true) + static class Error { + private String type; + private String reason; + + public Error() {} + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getReason() { + return reason; + } + + public void setReason(String reason) { + this.reason = reason; + } + } + } } diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java index df7ed2a3c..372ffcc9f 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/AbstractSchemaServiceTest.java @@ -35,166 +35,166 @@ */ public class AbstractSchemaServiceTest extends AbstractTest { - private int scopesCount = 0; - private int scopeAndMetricsCount = 0; - private int metricsCount = 0; + private int scopesCount = 0; + private int scopeAndMetricsCount = 0; + private int metricsCount = 0; - @Test - public void testPutEverythingCached() { - List metrics = createRandomMetrics("test-scope", "test-metric", 10); + @Test + public void testPutEverythingCached() { + List metrics = createRandomMetrics("test-scope", "test-metric", 10); - metrics.addAll(createRandomMetrics(null, null, 10)); + metrics.addAll(createRandomMetrics(null, null, 10)); - ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - ElasticSearchSchemaService spyService = _initializeSpyService(service); + ElasticSearchSchemaService spyService = _initializeSpyService(service); - spyService.put(metrics); + spyService.put(metrics); - Set scopeNames = new HashSet<>(); - Set> scopeAndMetricNames = new HashSet<>(); + Set scopeNames = new HashSet<>(); + Set> scopeAndMetricNames = new HashSet<>(); - for(Metric m : metrics) - { - scopeNames.add(m.getScope()); - scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); - } + for(Metric m : metrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } - assertEquals(metricsCount, metrics.size()); - assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); - assertEquals(scopesCount, scopeNames.size()); + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); - // add to bloom filter cache - spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); - spyService._addToBloomFilterScopeAndMetricOnly(spyService._fractureScopeAndMetrics(scopeAndMetricNames).get(0)); - spyService._addToBloomFilterScopeOnly(spyService._fractureScopes(scopeNames).get(0)); + // add to bloom filter cache + spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); + spyService._addToBloomFilterScopeAndMetricOnly(spyService._fractureScopeAndMetrics(scopeAndMetricNames).get(0)); + spyService._addToBloomFilterScopeOnly(spyService._fractureScopes(scopeNames).get(0)); - spyService.put(metrics); - // count should be same since we are re-reading cached value + spyService.put(metrics); + // count should be same since we are re-reading cached value - assertEquals(metricsCount, metrics.size()); - assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); - assertEquals(scopesCount, scopeNames.size()); - } + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); + } - @Test - public void testPutPartialCached() { - List metrics = createRandomMetrics("test-scope", "test-metric", 10); + @Test + public void testPutPartialCached() { + List metrics = createRandomMetrics("test-scope", "test-metric", 10); - ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - ElasticSearchSchemaService spyService = _initializeSpyService(service); + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); + ElasticSearchSchemaService spyService = _initializeSpyService(service); - spyService.put(metrics); + spyService.put(metrics); - Set scopeNames = new HashSet<>(); - Set> scopeAndMetricNames = new HashSet<>(); + Set scopeNames = new HashSet<>(); + Set> scopeAndMetricNames = new HashSet<>(); - for(Metric m : metrics) - { - scopeNames.add(m.getScope()); - scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); - } + for(Metric m : metrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } - assertEquals(metricsCount, metrics.size()); - assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); - assertEquals(scopesCount, scopeNames.size()); + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); - // add to bloom filter cache - spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); - spyService._addToBloomFilterScopeAndMetricOnly(spyService._fractureScopeAndMetrics(scopeAndMetricNames).get(0)); - spyService._addToBloomFilterScopeOnly(spyService._fractureScopes(scopeNames).get(0)); + // add to bloom filter cache + spyService._addToBloomFilter(spyService._fracture(metrics).get(0)); + spyService._addToBloomFilterScopeAndMetricOnly(spyService._fractureScopeAndMetrics(scopeAndMetricNames).get(0)); + spyService._addToBloomFilterScopeOnly(spyService._fractureScopes(scopeNames).get(0)); - List newMetrics = createRandomMetrics(null, null, 10); + List newMetrics = createRandomMetrics(null, null, 10); - // 1st metric already in cache (partial case scenario), and now we call put with both list of metrics + // 1st metric already in cache (partial case scenario), and now we call put with both list of metrics - initCounters(); - spyService.put(metrics); - spyService.put(newMetrics); + initCounters(); + spyService.put(metrics); + spyService.put(newMetrics); - scopeNames.clear(); - scopeAndMetricNames.clear(); + scopeNames.clear(); + scopeAndMetricNames.clear(); - for(Metric m : newMetrics) - { - scopeNames.add(m.getScope()); - scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); - } + for(Metric m : newMetrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } - assertEquals(metricsCount, newMetrics.size()); - assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); - assertEquals(scopesCount, scopeNames.size()); - } + assertEquals(metricsCount, newMetrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); + } - @Test - public void testPutNothingCached() { - List metrics = createRandomMetrics("test-scope", "test-metric", 10); + @Test + public void testPutNothingCached() { + List metrics = createRandomMetrics("test-scope", "test-metric", 10); - metrics.addAll(createRandomMetrics(null, null, 10)); + metrics.addAll(createRandomMetrics(null, null, 10)); - ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - ElasticSearchSchemaService spyService = _initializeSpyService(service); + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); + ElasticSearchSchemaService spyService = _initializeSpyService(service); - spyService.put(metrics); + spyService.put(metrics); - Set scopeNames = new HashSet<>(); - Set> scopeAndMetricNames = new HashSet<>(); + Set scopeNames = new HashSet<>(); + Set> scopeAndMetricNames = new HashSet<>(); - for(Metric m : metrics) - { - scopeNames.add(m.getScope()); - scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); - } + for(Metric m : metrics) + { + scopeNames.add(m.getScope()); + scopeAndMetricNames.add(Pair.of(m.getScope(), m.getMetric())); + } - assertEquals(metricsCount, metrics.size()); - assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); - assertEquals(scopesCount, scopeNames.size()); + assertEquals(metricsCount, metrics.size()); + assertEquals(scopeAndMetricsCount, scopeAndMetricNames.size()); + assertEquals(scopesCount, scopeNames.size()); - spyService.put(metrics); + spyService.put(metrics); - assertEquals(metricsCount, 2 * metrics.size()); - assertEquals(scopeAndMetricsCount, 2 * scopeAndMetricNames.size()); - assertEquals(scopesCount, 2 * scopeNames.size()); - } + assertEquals(metricsCount, 2 * metrics.size()); + assertEquals(scopeAndMetricsCount, 2 * scopeAndMetricNames.size()); + assertEquals(scopesCount, 2 * scopeNames.size()); + } - private ElasticSearchSchemaService _initializeSpyService(ElasticSearchSchemaService service) { - ElasticSearchSchemaService spyService = Mockito.spy(service); - initCounters(); + private ElasticSearchSchemaService _initializeSpyService(ElasticSearchSchemaService service) { + ElasticSearchSchemaService spyService = Mockito.spy(service); + initCounters(); - Mockito.doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocation) throws Throwable { - @SuppressWarnings("unchecked") - List metrics = List.class.cast(invocation.getArguments()[0]); + Mockito.doAnswer(new Answer() { + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + @SuppressWarnings("unchecked") + List metrics = List.class.cast(invocation.getArguments()[0]); - Set scopeNames = Set.class.cast(invocation.getArguments()[1]); + Set scopeNames = Set.class.cast(invocation.getArguments()[1]); - Set> scopeAndMetricNames = Set.class.cast(invocation.getArguments()[2]); + Set> scopeAndMetricNames = Set.class.cast(invocation.getArguments()[2]); - scopesCount += scopeNames.size(); - scopeAndMetricsCount += scopeAndMetricNames.size(); - metricsCount += metrics.size(); + scopesCount += scopeNames.size(); + scopeAndMetricsCount += scopeAndMetricNames.size(); + metricsCount += metrics.size(); - return null; - } - }).when(spyService).implementationSpecificPut(Mockito.any(), Mockito.any(), Mockito.any()); - return spyService; - } + return null; + } + }).when(spyService).implementationSpecificPut(Mockito.any(), Mockito.any(), Mockito.any()); + return spyService; + } - private void initCounters() { - scopesCount = 0; - scopeAndMetricsCount = 0; - metricsCount = 0; - } + private void initCounters() { + scopesCount = 0; + scopeAndMetricsCount = 0; + metricsCount = 0; + } - @Test - public void getNumHoursUntilNextFlushBloomFilter() { - ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); + @Test + public void getNumHoursUntilNextFlushBloomFilter() { + ElasticSearchSchemaService service = new ElasticSearchSchemaService(system.getConfiguration(), system.getServiceFactory().getMonitorService()); - Calendar calendar = Calendar.getInstance(); + Calendar calendar = Calendar.getInstance(); - // Will wait 24 hours before next flush if at same hour boundary - int hour = calendar.get(Calendar.HOUR_OF_DAY); - assertTrue(service.getNumHoursUntilTargetHour(hour) == 24); - } + // Will wait 24 hours before next flush if at same hour boundary + int hour = calendar.get(Calendar.HOUR_OF_DAY); + assertTrue(service.getNumHoursUntilTargetHour(hour) == 24); + } } From 247a20dbd621845654fef10125afa1b595993b78 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Tue, 24 Jul 2018 17:26:40 -0700 Subject: [PATCH 19/27] CR Fixes --- .../schema/ElasticSearchSchemaServiceTest.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java index fd868ca52..1756982f1 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java @@ -118,7 +118,7 @@ public class ElasticSearchSchemaServiceTest extends AbstractTest { " }", "}"); - private String metricQuery1 = String.join("\n", + private String metricQueryTagvRegex = String.join("\n", "{", " \"query\": {", " \"bool\": {", @@ -161,7 +161,7 @@ public class ElasticSearchSchemaServiceTest extends AbstractTest { " }", "}"); - private String metricQuery2 = String.join("\n", + private String metricQueryNamespaceRegex = String.join("\n", "{", " \"query\": {", " \"bool\": {", @@ -268,7 +268,7 @@ public void testGetUniqueUsingScopeAndMetricSchemaIndex() throws IOException { } @Test - public void testGetUniqueUsingMetricSchemaIndex1() throws IOException { + public void testGetUniqueUsingMetricTagvRegexSchemaIndex() throws IOException { MetricSchemaRecordQuery queryForMetric = new MetricSchemaRecordQuery.MetricSchemaRecordQueryBuilder().scope("system") .metric("argus") @@ -294,7 +294,7 @@ public void testGetUniqueUsingMetricSchemaIndex1() throws IOException { String requestUrl = requestUrlCaptor.getValue(); String queryJson = convertToPrettyJson(EntityUtils.toString(queryJsonCaptor.getValue())); - assertEquals(metricQuery1, queryJson); + assertEquals(metricQueryTagvRegex, queryJson); assertEquals("/metadata_index/metadata_type/_search", requestUrl); assertFalse(queryForMetric.isQueryOnlyOnScope()); @@ -302,7 +302,7 @@ public void testGetUniqueUsingMetricSchemaIndex1() throws IOException { } @Test - public void testGetUniqueUsingMetricSchemaIndex2() throws IOException { + public void testGetUniqueUsingMetricNamespaceRegexSchemaIndex() throws IOException { MetricSchemaRecordQuery queryForMetric = new MetricSchemaRecordQuery.MetricSchemaRecordQueryBuilder().scope("system") .metric("argus") @@ -328,7 +328,7 @@ public void testGetUniqueUsingMetricSchemaIndex2() throws IOException { String requestUrl = requestUrlCaptor.getValue(); String queryJson = convertToPrettyJson(EntityUtils.toString(queryJsonCaptor.getValue())); - assertEquals(metricQuery2, queryJson); + assertEquals(metricQueryNamespaceRegex, queryJson); assertEquals("/metadata_index/metadata_type/_search", requestUrl); assertFalse(queryForMetric.isQueryOnlyOnScope()); From 00f8a22fb4a74f22d0a18ccca5a335b25adf1aca Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Tue, 24 Jul 2018 17:37:55 -0700 Subject: [PATCH 20/27] CR Fixes2 --- .../dva/argus/entity/ScopeAndMetricOnlySchemaRecord.java | 2 +- .../service/schema/ScopeAndMetricOnlySchemaRecordList.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/ScopeAndMetricOnlySchemaRecord.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/ScopeAndMetricOnlySchemaRecord.java index 45d882f15..582efa8fb 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/ScopeAndMetricOnlySchemaRecord.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/entity/ScopeAndMetricOnlySchemaRecord.java @@ -100,7 +100,7 @@ public String toString() { return MessageFormat.format("ScopeAndMetricOnlySchemaRecord (Scope = {0} Metric = {1}", scope, metric); } - public static String print(ScopeAndMetricOnlySchemaRecord msr) { + public static String getFieldsAsString(ScopeAndMetricOnlySchemaRecord msr) { StringBuilder sb = new StringBuilder(msr.getScope()); sb.append(":"); diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeAndMetricOnlySchemaRecordList.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeAndMetricOnlySchemaRecordList.java index d75d1e2f8..5ed782393 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeAndMetricOnlySchemaRecordList.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ScopeAndMetricOnlySchemaRecordList.java @@ -48,7 +48,7 @@ public ScopeAndMetricOnlySchemaRecordList(List r public ScopeAndMetricOnlySchemaRecordList(List records, HashAlgorithm algorithm) { for(ScopeAndMetricOnlySchemaRecord record : records) { String id = null; - String scopeAndMetricName = ScopeAndMetricOnlySchemaRecord.print(record); + String scopeAndMetricName = ScopeAndMetricOnlySchemaRecord.getFieldsAsString(record); if(HashAlgorithm.MD5.equals(algorithm)) { id = DigestUtils.md5Hex(scopeAndMetricName); } else { From 4b54553f25d3c60da124f813a97c73a1f35fe177 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Wed, 25 Jul 2018 16:24:22 -0700 Subject: [PATCH 21/27] Fix bloom filter size --- .../argus/service/schema/AbstractSchemaService.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java index d8b5067d7..027012a9e 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java @@ -307,13 +307,13 @@ public enum Property { BLOOMFILTER_SCOPE_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.only.error.rate", "0.00001"), /* - * Estimated Filter Size using bloomFilter 10 million entries - * https://hur.st/bloomfilter/?n=10000000&p=1.0E-5&m=&k= 28.56MiB - * Storing in a Set 1M entries with avg length of 30 chars would be 1M * 30 * 2 B = 60B * 1M = 60 MB - * If # of entries is 10 million, then it would be 600 MB resulting in savings in space. + * Estimated Filter Size using bloomFilter 500 million entries + * https://hur.st/bloomfilter/?n=10000000&p=1.0E-5&m=&k= 1.39GiB + * Storing in a Set 100M entries with avg length of 30 chars would be 100M * 30 * 2 B = 60B * 100M = 6 GB + * If # of entries is 500 million, then it would be 30 GB resulting in savings in space. */ - BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.and.metric.only.expected.number.insertions", "10000000"), + BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.and.metric.only.expected.number.insertions", "500000000"), BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.and.metric.only.error.rate", "0.00001"), /* From 74b92cce4527b7a21c4e7c01feb958c219db3191 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Wed, 25 Jul 2018 18:59:59 -0700 Subject: [PATCH 22/27] CR Fixes --- .../dva/argus/service/schema/ElasticSearchSchemaService.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index 1ab93ad1c..fab83ed90 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -428,8 +428,8 @@ public List get(MetricSchemaRecordQuery query) { requestBody.put("scroll_id", scrollID); requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); - response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), - new StringEntity(new ObjectMapper().writeValueAsString(requestBody))); + String requestJson = new ObjectMapper().writeValueAsString(requestBody); + response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestJson)); list = toEntity(extractResponse(response), new TypeReference() {}); records.addAll(list.getRecords()); From b0ebf89e79f69db1c1037f84dce94d181637532e Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Thu, 26 Jul 2018 14:53:44 -0700 Subject: [PATCH 23/27] Logging Fix --- .../dva/argus/service/monitor/DefaultMonitorService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/monitor/DefaultMonitorService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/monitor/DefaultMonitorService.java index a74ceff9e..ddf416639 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/monitor/DefaultMonitorService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/monitor/DefaultMonitorService.java @@ -290,7 +290,7 @@ public double modifyCustomCounter(String name, double delta, Map Double value = _metrics.get(key); double newValue = value == null ? delta : value + delta; - _logger.debug("Modifying {} counter for {} to {}.", name, tags, newValue); + _logger.debug("Modifying {} counter from {} to {}.", name, value, newValue); _metrics.put(key, newValue); return newValue; } From 4cd2421573b3680f5565ba735ccd40a63ae9a237 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Fri, 27 Jul 2018 11:08:57 -0700 Subject: [PATCH 24/27] CR Fixes --- .../argus/service/schema/ElasticSearchSchemaService.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index fab83ed90..7416a396d 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -413,6 +413,7 @@ public List get(MetricSchemaRecordQuery query) { String queryJson = _constructTermQuery(query, from, scrollSize); try { + _logger.debug("get POST requestUrl {} queryJson {}", requestUrl, queryJson); Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); MetricSchemaRecordList list = toEntity(extractResponse(response), new TypeReference() {}); @@ -429,6 +430,7 @@ public List get(MetricSchemaRecordQuery query) { requestBody.put("scroll", KEEP_SCROLL_CONTEXT_OPEN_FOR); String requestJson = new ObjectMapper().writeValueAsString(requestBody); + _logger.debug("get Scroll POST requestUrl {} queryJson {}", requestUrl, queryJson); response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(requestJson)); list = toEntity(extractResponse(response), new TypeReference() {}); @@ -484,7 +486,7 @@ public List getUnique(MetricSchemaRecordQuery query, RecordT indexName = SCOPE_INDEX_NAME; typeName = SCOPE_TYPE_NAME; } - else if (query.isQueryOnlyOnScopeAndMetric()) + else if (query.isQueryOnlyOnScopeAndMetric() && (RecordType.SCOPE.equals(type) || RecordType.METRIC.equals(type))) { indexName = SCOPE_AND_METRIC_INDEX_NAME; typeName = SCOPE_AND_METRIC_TYPE_NAME; @@ -501,6 +503,7 @@ else if (query.isQueryOnlyOnScopeAndMetric()) String queryJson = _constructTermAggregationQuery(query, type); try { + _logger.debug("getUnique POST requestUrl {} queryJson {}", requestUrl, queryJson); Response response = _esRestClient.performRequest(HttpMethod.POST.getName(), requestUrl, Collections.emptyMap(), new StringEntity(queryJson)); String str = extractResponse(response); List records = SchemaService.constructMetricSchemaRecordsForType( @@ -510,7 +513,7 @@ else if (query.isQueryOnlyOnScopeAndMetric()) _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_COUNT, 1, tags); _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - } else if (query.isQueryOnlyOnScopeAndMetric()) { + } else if (query.isQueryOnlyOnScopeAndMetric() && (RecordType.SCOPE.equals(type) || RecordType.METRIC.equals(type))) { _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_COUNT, 1, tags); _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); } else { @@ -518,7 +521,6 @@ else if (query.isQueryOnlyOnScopeAndMetric()) _monitorService.modifyCounter(Counter.SCHEMARECORDS_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); } - int fromIndex = query.getLimit() * (query.getPage() - 1); if(records.size() <= fromIndex) { return Collections.emptyList(); From 5fce367621978a285b5ecd3ae6a7e1b8d15811ea Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Fri, 27 Jul 2018 16:52:38 -0700 Subject: [PATCH 25/27] Add paramter to use scopemetricnames --- .../service/schema/ElasticSearchSchemaService.java | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index 7416a396d..6e1fce7e6 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -101,6 +101,8 @@ public class ElasticSearchSchemaService extends AbstractSchemaService { private final int _bulkIndexingSize; private HashAlgorithm _idgenHashAlgo; + private final boolean _useScopeMetricNamesIndex; + @Inject public ElasticSearchSchemaService(SystemConfiguration config, MonitorService monitorService) { super(config); @@ -151,6 +153,9 @@ public ElasticSearchSchemaService(SystemConfiguration config, MonitorService mon _bulkIndexingSize = Integer.parseInt( config.getValue(Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getName(), Property.ELASTICSEARCH_INDEXING_BATCH_SIZE.getDefaultValue())); + _useScopeMetricNamesIndex = Boolean.parseBoolean( + config.getValue(Property.ELASTICSEARCH_USE_SCOPE_AND_METRIC_INDEX.getName(), Property.ELASTICSEARCH_USE_SCOPE_AND_METRIC_INDEX.getDefaultValue())); + String[] nodes = config.getValue(Property.ELASTICSEARCH_ENDPOINT.getName(), Property.ELASTICSEARCH_ENDPOINT.getDefaultValue()).split(","); HttpHost[] httpHosts = new HttpHost[nodes.length]; @@ -486,7 +491,8 @@ public List getUnique(MetricSchemaRecordQuery query, RecordT indexName = SCOPE_INDEX_NAME; typeName = SCOPE_TYPE_NAME; } - else if (query.isQueryOnlyOnScopeAndMetric() && (RecordType.SCOPE.equals(type) || RecordType.METRIC.equals(type))) + else if (_useScopeMetricNamesIndex && query.isQueryOnlyOnScopeAndMetric() && + (RecordType.SCOPE.equals(type) || RecordType.METRIC.equals(type))) { indexName = SCOPE_AND_METRIC_INDEX_NAME; typeName = SCOPE_AND_METRIC_TYPE_NAME; @@ -513,7 +519,8 @@ else if (query.isQueryOnlyOnScopeAndMetric() && (RecordType.SCOPE.equals(type) | _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_COUNT, 1, tags); _monitorService.modifyCounter(Counter.SCOPENAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); - } else if (query.isQueryOnlyOnScopeAndMetric() && (RecordType.SCOPE.equals(type) || RecordType.METRIC.equals(type))) { + } else if (_useScopeMetricNamesIndex && query.isQueryOnlyOnScopeAndMetric() && + (RecordType.SCOPE.equals(type) || RecordType.METRIC.equals(type))) { _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_COUNT, 1, tags); _monitorService.modifyCounter(Counter.SCOPEANDMETRICNAMES_QUERY_LATENCY, (System.currentTimeMillis() - start), tags); } else { @@ -1344,6 +1351,9 @@ public enum Property { * https://www.elastic.co/guide/en/elasticsearch/guide/current/indexing-performance.html#_using_and_sizing_bulk_requests */ ELASTICSEARCH_INDEXING_BATCH_SIZE("service.property.schema.elasticsearch.indexing.batch.size", "10000"), + + ELASTICSEARCH_USE_SCOPE_AND_METRIC_INDEX("service.property.schema.elasticsearch.use.scopeandmetric.index", "false"), + /** The hashing algorithm to use for generating document id. */ ELASTICSEARCH_IDGEN_HASH_ALGO("service.property.schema.elasticsearch.idgen.hash.algo", "MD5"), From fff0d7cce3065077d55744a62d3f7b95655a5096 Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Mon, 30 Jul 2018 14:17:30 -0700 Subject: [PATCH 26/27] Change defaults of bloomfilter insertions for uts --- .../dva/argus/service/schema/AbstractSchemaService.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java index 027012a9e..38c67a4ab 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/AbstractSchemaService.java @@ -303,7 +303,7 @@ public enum Property { * If # of entries is 1 million, then it would be 30 MB resulting in savings in space. */ - BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.only.expected.number.insertions", "1000000"), + BLOOMFILTER_SCOPE_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.only.expected.number.insertions", "40"), BLOOMFILTER_SCOPE_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.only.error.rate", "0.00001"), /* @@ -313,7 +313,7 @@ public enum Property { * If # of entries is 500 million, then it would be 30 GB resulting in savings in space. */ - BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.and.metric.only.expected.number.insertions", "500000000"), + BLOOMFILTER_SCOPE_AND_METRIC_ONLY_EXPECTED_NUMBER_INSERTIONS("service.property.schema.bloomfilter.scope.and.metric.only.expected.number.insertions", "40"), BLOOMFILTER_SCOPE_AND_METRIC_ONLY_ERROR_RATE("service.property.schema.bloomfilter.scope.and.metric.only.error.rate", "0.00001"), /* From 803c7712418230916a4e08a904ce21a54da40adc Mon Sep 17 00:00:00 2001 From: Naveen Reddy Karri Date: Mon, 30 Jul 2018 15:07:41 -0700 Subject: [PATCH 27/27] Fix unit tests --- .../argus/service/schema/ElasticSearchSchemaService.java | 8 +++++++- .../service/schema/ElasticSearchSchemaServiceTest.java | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java index 6e1fce7e6..1b6a5980c 100644 --- a/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java +++ b/ArgusCore/src/main/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaService.java @@ -101,7 +101,7 @@ public class ElasticSearchSchemaService extends AbstractSchemaService { private final int _bulkIndexingSize; private HashAlgorithm _idgenHashAlgo; - private final boolean _useScopeMetricNamesIndex; + private boolean _useScopeMetricNamesIndex; @Inject public ElasticSearchSchemaService(SystemConfiguration config, MonitorService monitorService) { @@ -1073,6 +1073,12 @@ protected void setRestClient(RestClient restClient) this._esRestClient = restClient; } + /* Method to enable ScopeMetricNames Index. Used for testing. */ + protected void enableScopeMetricNamesIndex() + { + this._useScopeMetricNamesIndex = true; + } + /** Helper to process the response. * Throws a SystemException when the http status code is outsdie of the range 200 - 300. * @param response ES response diff --git a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java index 1756982f1..6f3e0c0f2 100644 --- a/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java +++ b/ArgusCore/src/test/java/com/salesforce/dva/argus/service/schema/ElasticSearchSchemaServiceTest.java @@ -349,6 +349,8 @@ private ElasticSearchSchemaService _initializeSpyService(ElasticSearchSchemaServ service.setRestClient(restClient); + service.enableScopeMetricNamesIndex(); + ElasticSearchSchemaService spyService = spy(service); doReturn(reply).when(spyService).extractResponse(any());