Skip to content
This repository has been archived by the owner on Feb 12, 2022. It is now read-only.

Commit

Permalink
Merge pull request #449 from ArgusMonitoring/develop
Browse files Browse the repository at this point in the history
merge develop into master for release
  • Loading branch information
Philip Liew authored and GitHub Enterprise committed May 21, 2019
2 parents bd67850 + 96fce51 commit a99983d
Show file tree
Hide file tree
Showing 56 changed files with 3,106 additions and 427 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;

import static com.salesforce.dva.argus.system.SystemAssert.requireArgument;

Expand Down Expand Up @@ -252,7 +253,7 @@ private void setType(String type) {
*
* @param timestamp THe time stamp for the annotation. Cannot be null.
*/
private void setTimestamp(Long timestamp) {
public void setTimestamp(Long timestamp) {
requireArgument(timestamp != null, "Timestamp cannot be null.");
_timestamp = timestamp;
}
Expand Down Expand Up @@ -297,5 +298,10 @@ public String toString() {

return MessageFormat.format(format, params);
}

public static String getIdentifierFieldsAsString(Annotation annotation) {
return new StringBuilder(annotation.getScope()).append(":").append(annotation.getMetric()).append(":")
.append(annotation.getTags().toString()).append(":").append(annotation.getType()).append(":").append(annotation.getTimestamp()).toString();
}
}
/* Copyright (c) 2016, Salesforce.com, Inc. All rights reserved. */
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,16 @@
package com.salesforce.dva.argus.entity;

import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.common.base.Objects;
import com.salesforce.dva.argus.service.tsdb.MetricQuery;
import com.salesforce.dva.argus.system.SystemAssert;

import java.io.Serializable;
import java.text.MessageFormat;
import java.util.Collections;
import java.util.Comparator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.SortedMap;
import java.util.TreeMap;

import static com.salesforce.dva.argus.system.SystemAssert.requireArgument;
Expand All @@ -58,16 +59,21 @@
* @author Tom Valine ([email protected]), Bhinav Sura ([email protected])
*/
@SuppressWarnings("serial")
public class Metric extends TSDBEntity implements Serializable {
public class Metric extends TSDBEntity implements Serializable, Comparable<Metric> {

private static final Comparator<Metric> METRIC_COMPARATOR = Comparator
.comparing((Metric m) -> m.getScope().toLowerCase())
.thenComparing(m -> m.getMetric().toLowerCase())
.thenComparing(m -> m.getTags().toString().toLowerCase());

//~ Instance fields ******************************************************************************************************************************

private String _namespace;
private String _displayName;
private String _units;
private final Map<Long, Double> _datapoints;
private final SortedMap<Long, Double> _datapoints;
private MetricQuery _query;
private MetatagsRecord _metatagsRecord = null;
private MetatagsRecord _metatagsRecord = null;

//~ Constructors *********************************************************************************************************************************

Expand Down Expand Up @@ -146,7 +152,7 @@ public void setNamespace(String namespace) {
* @return The map of time series data points. Will never be null, but may be empty.
*/
public Map<Long, Double> getDatapoints() {
return Collections.unmodifiableMap(_datapoints);
return Collections.unmodifiableSortedMap(_datapoints);
}

/**
Expand Down Expand Up @@ -380,5 +386,10 @@ public MetatagsRecord getMetatagsRecord() {
public void setMetatagsRecord(MetatagsRecord metatagsRec) {
_metatagsRecord = metatagsRec;
}

@Override
public int compareTo(Metric m) {
return METRIC_COMPARATOR.compare(this, m);
}
}
/* Copyright (c) 2016, Salesforce.com, Inc. All rights reserved. */
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ public class MetricSchemaRecord extends AbstractSchemaRecord {
public static final String RETENTION_DISCOVERY = "_retention_discovery_";
public static final String EXPIRATION_TS = "ets"; //expiration timestamp

public static final int DEFAULT_RETENTION_DISCOVERY_DAYS = 45;
public static final int DEFAULT_RETENTION_DISCOVERY_DAYS = 52;
public static final int MAX_RETENTION_DISCOVERY_DAYS = 120;

//~ Instance fields ******************************************************************************************************************************
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;

import static com.salesforce.dva.argus.system.SystemAssert.requireArgument;

Expand All @@ -60,7 +61,7 @@ public abstract class TSDBEntity implements Serializable {
private String _uid;
private String _scope;
private String _metric;
private final Map<String, String> _tags = new HashMap<>(0);
private final Map<String, String> _tags = new TreeMap<>();

//~ Constructors *********************************************************************************************************************************

Expand Down Expand Up @@ -140,7 +141,7 @@ protected void setUid(String uid) {
* @return The tags for a metric. Will never be null but may be empty.
*/
public Map<String, String> getTags() {
Map<String, String> result = new HashMap<>();
Map<String, String> result = new TreeMap<>();

for (Map.Entry<String, String> entry : _tags.entrySet()) {
String key = entry.getKey();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -465,7 +465,9 @@ public enum NotificationStatus {
/** Indicates a notification for a triggering condition. */
TRIGGERED,
/** Indicates a notification for when a triggering condition is cleared. */
CLEARED;
CLEARED,
/** Indicates a notification was sent out. Used by refocus notifiers */
NOTIFIED;
}

/**
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
package com.salesforce.dva.argus.service;

import java.util.List;

import com.salesforce.dva.argus.entity.Annotation;
import com.salesforce.dva.argus.service.tsdb.AnnotationQuery;

/**
* Provides methods for putting or retrieving annotations from storage.
*
* @author Dilip Devaraj ([email protected])
*/
public interface AnnotationStorageService extends Service{
/**
* Writes annotation data. Any existing data is overwritten.
*
* @param annotations The list of annotations to write. Cannot be null, but may be empty.
*/
void putAnnotations(List<Annotation> annotations);

/**
* Reads annotation data.
*
* @param queries The list of queries to execute. Cannot be null, but may be empty.
*
* @return The query results. Will never be null, but may be empty.
*/
List<Annotation> getAnnotations(List<AnnotationQuery> queries);
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
package com.salesforce.dva.argus.service;

import com.salesforce.dva.argus.service.alert.notifier.GusTransport;
import org.apache.commons.lang.StringUtils;
import org.apache.http.HttpHost;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.config.Registry;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.NoopHostnameVerifier;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.DefaultProxyRoutePlanner;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.net.ssl.SSLContext;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.util.Optional;

import static com.salesforce.dva.argus.system.SystemAssert.requireArgument;

public class ArgusTransport {
private static final Logger LOGGER = LoggerFactory.getLogger(ArgusTransport.class);
private static final int CONNECTION_TIMEOUT_MILLIS = 10000;
private static final int READ_TIMEOUT_MILLIS = 10000;

protected final CloseableHttpClient httpClient;

public ArgusTransport(Optional<String> proxyHost,
Optional<Integer> proxyPort,
int connectionPoolMaxSize,
int connectionPoolMaxPerRoute) {
this.httpClient = buildHttpClient(proxyHost, proxyPort, connectionPoolMaxSize, connectionPoolMaxPerRoute);
}

public ArgusTransport(String proxyHost, String proxyPort, int connectionPoolMaxSize, int connectionPoolMaxPerRoute) {
this(validateProxyHostAndPortStrings(proxyHost, proxyPort) ? Optional.of(proxyHost) : Optional.empty(),
validateProxyHostAndPortStrings(proxyHost, proxyPort) ? Optional.of(Integer.parseInt(proxyPort)) : Optional.empty(),
connectionPoolMaxSize, connectionPoolMaxPerRoute);
}

public static boolean validateProxyHostAndPortStrings(String proxyHost, String proxyPort) {
requireArgument(StringUtils.isBlank(proxyPort) || StringUtils.isNumeric(proxyPort),
"proxyPort must be numeric if present");
return StringUtils.isNotBlank(proxyHost) && StringUtils.isNotBlank(proxyPort) && StringUtils.isNumeric(proxyPort);
}

/**
* Get HttpClient.
*
* @return HttpClient
*/
public CloseableHttpClient getHttpClient() {
return httpClient;
}

protected static SSLContext getSSLContext() {
SSLContext sslContext = null;
try {
sslContext = SSLContext.getInstance("TLS");
sslContext.init(null, null, null);
} catch (NoSuchAlgorithmException | KeyManagementException e) {
LOGGER.error("Failed to init SSLContext", e);
}
return sslContext;
}

protected static PoolingHttpClientConnectionManager buildConnectionManager(int connectionPoolMaxSize,
int connectionPoolMaxPerRoute,
SSLContext sslContext) {
requireArgument(connectionPoolMaxSize > 0,
String.format("connectionPoolMaxSize(%d) must be > 0", connectionPoolMaxSize));
requireArgument(connectionPoolMaxPerRoute > 0,
String.format("connectionPoolMaxPerRoute(%d) must be > 0", connectionPoolMaxPerRoute));

RegistryBuilder<ConnectionSocketFactory> rb = RegistryBuilder.<ConnectionSocketFactory>create()
.register("http", PlainConnectionSocketFactory.getSocketFactory());
if (sslContext != null) {
rb.register("https", new SSLConnectionSocketFactory(sslContext));
}
Registry<ConnectionSocketFactory> r = rb.build();
PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(r);
cm.setMaxTotal(connectionPoolMaxSize);
cm.setDefaultMaxPerRoute(connectionPoolMaxPerRoute);
LOGGER.info(String.format("Creating connection manager with maxPoolSize=%d, maxPerRoute=%d",
connectionPoolMaxSize,
connectionPoolMaxPerRoute));
return cm;
}

protected static CloseableHttpClient buildHttpClient(Optional<String> proxyHost,
Optional<Integer> proxyPort,
int connectionPoolMaxSize,
int connectionPoolMaxPerRoute) {
requireArgument(!proxyHost.isPresent() || StringUtils.isNotBlank(proxyHost.get()),
String.format("proxyHost must not be blank if present", proxyHost.isPresent() ? proxyHost.get() : "null"));
requireArgument(!proxyPort.isPresent() || proxyPort.get() > 0,
String.format("proxyPort(%s) must > 0 if present", proxyPort.isPresent() ? proxyPort.get().toString() : "null"));

SSLContext sslContext = getSSLContext();
PoolingHttpClientConnectionManager cm = buildConnectionManager(connectionPoolMaxSize, connectionPoolMaxPerRoute, sslContext);

RequestConfig requestConfig = RequestConfig.custom()
.setConnectTimeout(CONNECTION_TIMEOUT_MILLIS)
.setConnectionRequestTimeout(CONNECTION_TIMEOUT_MILLIS)
.setSocketTimeout(READ_TIMEOUT_MILLIS)
.build();

HttpClientBuilder builder = HttpClients.custom()
.setDefaultRequestConfig(requestConfig)
.setConnectionManager(cm);
if (sslContext != null) {
builder = builder
.setSSLContext(sslContext)
.setSSLHostnameVerifier(new NoopHostnameVerifier());
}
if (proxyHost.isPresent() && proxyHost.get().length() > 0 && proxyPort.isPresent()) {
HttpHost proxy = new HttpHost(proxyHost.get(), proxyPort.get().intValue());
DefaultProxyRoutePlanner routePlanner = new DefaultProxyRoutePlanner(proxy);
builder = builder.setRoutePlanner(routePlanner);
}
return builder.build();
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,11 @@ static boolean isWildcardQuery(MetricQuery query) {
}

static int maxTimeseriesAllowed(MetricQuery query, long maxDataPointsPerResponse) {
long timeWindowInMillis = query.getEndTimestamp() - query.getStartTimestamp();
long timeWindowInMillis = getTimeWindowInMillis(query.getStartTimestamp(), query.getEndTimestamp());
// return max datapoints for single second queries
if(timeWindowInMillis<=2000L) {
return (int)maxDataPointsPerResponse;
}
long downsamplingDivisor = (query.getDownsamplingPeriod() == null || query.getDownsamplingPeriod() <= 0) ? 60000l : query.getDownsamplingPeriod();
downsamplingDivisor = (timeWindowInMillis > downsamplingDivisor) ? downsamplingDivisor : timeWindowInMillis;
long samplingPeriod = (downsamplingDivisor>DATAPOINT_SAMPLING_FREQ_IN_MILLIS) ? DATAPOINT_SAMPLING_FREQ_IN_MILLIS : downsamplingDivisor;
Expand All @@ -135,6 +139,25 @@ static int maxTimeseriesAllowed(MetricQuery query, long maxDataPointsPerResponse

return (int) (maxDataPointsPerResponse / numDownsampledDPsPerSeries);
}


static long getTimeWindowInMillis(long startTimestamp, long endTimestamp) {
// handling case when start or end timestamp is specified in seconds
if(startTimestamp*1000<System.currentTimeMillis()) {
startTimestamp = startTimestamp*1000;
}

if(endTimestamp*1000<System.currentTimeMillis()) {
endTimestamp = endTimestamp*1000;
}

long timeWindowInMillis = endTimestamp - startTimestamp;

if(timeWindowInMillis>TSDBService.METRICS_RETENTION_PERIOD_MILLIS) {
timeWindowInMillis = TSDBService.METRICS_RETENTION_PERIOD_MILLIS;
}
return timeWindowInMillis;
}

static int numApproxTimeseriesForQuery(MetricQuery mq) {
int count = 1;
Expand All @@ -146,25 +169,26 @@ static int numApproxTimeseriesForQuery(MetricQuery mq) {
return count;
}

static void throwMaximumDatapointsExceededException(MetricQuery query, long maxDataPointsPerQuery, MonitorService monitorService, Logger logger) throws WildcardExpansionLimitExceededException{
// We are throwing the exception only when the downsampler is absent,
// as we want to give users some time to adjust their queries which have downsampler in them

if(query.getDownsamplingPeriod()==null || query.getDownsamplingPeriod()==0) {
throw new WildcardExpansionLimitExceededException(MessageFormat.format(EXCEPTION_MESSAGE, maxDataPointsPerQuery)) ;
static void throwMaximumDatapointsExceededException(MetricQuery query, long maxDataPointsPerQuery, boolean enforceDatapointLimit, MonitorService monitorService, Logger logger) throws WildcardExpansionLimitExceededException{
if((query.getDownsamplingPeriod()!=null && query.getDownsamplingPeriod()!=0) || enforceDatapointLimit) {
if(monitorService!=null) {
Map<String, String> tags = new HashMap<>();
tags.put("scope", TSDBEntity.replaceUnsupportedChars(query.getScope()));
tags.put("metric", TSDBEntity.replaceUnsupportedChars(query.getMetric()));
if(RequestContextHolder.getRequestContext()!=null) {
tags.put("user", RequestContextHolder.getRequestContext().getUserName());
}else {
tags.put("user", "unknown");
}
monitorService.modifyCounter(Counter.QUERY_DATAPOINTS_LIMIT_EXCEEDED, 1, tags);
logger.error("Maximum datapoints limit execeeded for query - " + query.toString() + ", user - "+tags.get("user"));
}
}

if(monitorService!=null) {
Map<String, String> tags = new HashMap<>();
tags.put("scope", TSDBEntity.replaceUnsupportedChars(query.getScope()));
tags.put("metric", TSDBEntity.replaceUnsupportedChars(query.getMetric()));
if(RequestContextHolder.getRequestContext()!=null) {
tags.put("user", RequestContextHolder.getRequestContext().getUserName());
}else {
tags.put("user", "unknown");
}
monitorService.modifyCounter(Counter.QUERY_DATAPOINTS_LIMIT_EXCEEDED, 1, tags);
logger.error("Maximum datapoints limit execeeded for query - " + query.toString() + ", user - "+tags.get("user"));
// We are throwing the exception only when the downsampler is absent,
// as we want to give users some time to adjust their queries which have downsampler in them, unless the enforceDatapointLimit flag is true
if(query.getDownsamplingPeriod()==null || query.getDownsamplingPeriod()==0 || enforceDatapointLimit) {
throw new WildcardExpansionLimitExceededException(MessageFormat.format(EXCEPTION_MESSAGE, maxDataPointsPerQuery)) ;
}
}
}
Expand Down
Loading

0 comments on commit a99983d

Please sign in to comment.