use of com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet in project aws-athena-query-federation by awslabs.
the class MetricsRecordHandlerTest method readMetricsWithConstraint.
@Test
public void readMetricsWithConstraint() throws Exception {
logger.info("readMetricsWithConstraint: enter");
String namespace = "namespace";
String dimName = "dimName";
String dimValue = "dimValye";
int numMetrics = 100;
AtomicLong numCalls = new AtomicLong(0);
when(mockMetrics.listMetrics(any(ListMetricsRequest.class))).thenAnswer((InvocationOnMock invocation) -> {
ListMetricsRequest request = invocation.getArgumentAt(0, ListMetricsRequest.class);
numCalls.incrementAndGet();
// assert that the namespace filter was indeed pushed down
assertEquals(namespace, request.getNamespace());
String nextToken = (request.getNextToken() == null) ? "valid" : null;
List<Metric> metrics = new ArrayList<>();
for (int i = 0; i < numMetrics; i++) {
metrics.add(new Metric().withNamespace(namespace).withMetricName("metric-" + i).withDimensions(new Dimension().withName(dimName).withValue(dimValue)));
metrics.add(new Metric().withNamespace(namespace + i).withMetricName("metric-" + i));
}
return new ListMetricsResult().withNextToken(nextToken).withMetrics(metrics);
});
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put(NAMESPACE_FIELD, makeStringEquals(allocator, namespace));
constraintsMap.put(DIMENSION_NAME_FIELD, makeStringEquals(allocator, dimName));
constraintsMap.put(DIMENSION_VALUE_FIELD, makeStringEquals(allocator, dimValue));
S3SpillLocation spillLocation = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
Split split = Split.newBuilder(spillLocation, keyFactory.create()).build();
ReadRecordsRequest request = new ReadRecordsRequest(identity, "catalog", "queryId-" + System.currentTimeMillis(), METRICS_TABLE_NAME, METRIC_TABLE.getSchema(), split, new Constraints(constraintsMap), 100_000_000_000L, // 100GB don't expect this to spill
100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof ReadRecordsResponse);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
logger.info("readMetricsWithConstraint: rows[{}]", response.getRecordCount());
assertEquals(numCalls.get() * numMetrics, response.getRecords().getRowCount());
logger.info("readMetricsWithConstraint: {}", BlockUtils.rowToString(response.getRecords(), 0));
logger.info("readMetricsWithConstraint: exit");
}
use of com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet in project aws-athena-query-federation by awslabs.
the class MetricUtilsTest method pushDownPredicate.
@Test
public void pushDownPredicate() {
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put(NAMESPACE_FIELD, makeStringEquals(allocator, "match1"));
constraintsMap.put(METRIC_NAME_FIELD, makeStringEquals(allocator, "match2"));
constraintsMap.put(STATISTIC_FIELD, makeStringEquals(allocator, "match3"));
constraintsMap.put(DIMENSION_NAME_FIELD, makeStringEquals(allocator, "match4"));
constraintsMap.put(DIMENSION_VALUE_FIELD, makeStringEquals(allocator, "match5"));
ListMetricsRequest request = new ListMetricsRequest();
MetricUtils.pushDownPredicate(new Constraints(constraintsMap), request);
assertEquals("match1", request.getNamespace());
assertEquals("match2", request.getMetricName());
assertEquals(1, request.getDimensions().size());
assertEquals(new DimensionFilter().withName("match4").withValue("match5"), request.getDimensions().get(0));
}
use of com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet in project aws-athena-query-federation by awslabs.
the class MetricUtilsTest method makeGetMetricDataRequest.
@Test
public void makeGetMetricDataRequest() {
String schema = "schema";
String table = "table";
Integer period = 60;
String statistic = "p90";
String metricName = "metricName";
String namespace = "namespace";
List<Dimension> dimensions = new ArrayList<>();
dimensions.add(new Dimension().withName("dim_name1").withValue("dim_value1"));
dimensions.add(new Dimension().withName("dim_name2").withValue("dim_value2"));
List<MetricStat> metricStats = new ArrayList<>();
metricStats.add(new MetricStat().withMetric(new Metric().withNamespace(namespace).withMetricName(metricName).withDimensions(dimensions)).withPeriod(60).withStat(statistic));
Split split = Split.newBuilder(null, null).add(NAMESPACE_FIELD, namespace).add(METRIC_NAME_FIELD, metricName).add(PERIOD_FIELD, String.valueOf(period)).add(STATISTIC_FIELD, statistic).add(SERIALIZED_METRIC_STATS_FIELD_NAME, MetricStatSerDe.serialize(metricStats)).build();
Schema schemaForRead = SchemaBuilder.newBuilder().addStringField(METRIC_NAME_FIELD).build();
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put(TIMESTAMP_FIELD, SortedRangeSet.copyOf(Types.MinorType.BIGINT.getType(), ImmutableList.of(Range.greaterThan(allocator, Types.MinorType.BIGINT.getType(), 1L)), false));
ReadRecordsRequest request = new ReadRecordsRequest(identity, catalog, "queryId-" + System.currentTimeMillis(), new TableName(schema, table), schemaForRead, split, new Constraints(constraintsMap), // 100GB don't expect this to spill
100_000_000_000L, 100_000_000_000L);
GetMetricDataRequest actual = MetricUtils.makeGetMetricDataRequest(request);
assertEquals(1, actual.getMetricDataQueries().size());
assertNotNull(actual.getMetricDataQueries().get(0).getId());
MetricStat metricStat = actual.getMetricDataQueries().get(0).getMetricStat();
assertNotNull(metricStat);
assertEquals(metricName, metricStat.getMetric().getMetricName());
assertEquals(namespace, metricStat.getMetric().getNamespace());
assertEquals(statistic, metricStat.getStat());
assertEquals(period, metricStat.getPeriod());
assertEquals(2, metricStat.getMetric().getDimensions().size());
assertEquals(1000L, actual.getStartTime().getTime());
assertTrue(actual.getStartTime().getTime() <= System.currentTimeMillis() + 1_000);
}
use of com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet in project aws-athena-query-federation by awslabs.
the class MetricUtils method pushDownPredicate.
/**
* Attempts to push the supplied predicate constraints onto the Cloudwatch Metrics request.
*/
protected static void pushDownPredicate(Constraints constraints, ListMetricsRequest listMetricsRequest) {
Map<String, ValueSet> summary = constraints.getSummary();
ValueSet namespaceConstraint = summary.get(NAMESPACE_FIELD);
if (namespaceConstraint != null && namespaceConstraint.isSingleValue()) {
listMetricsRequest.setNamespace(namespaceConstraint.getSingleValue().toString());
}
ValueSet metricConstraint = summary.get(METRIC_NAME_FIELD);
if (metricConstraint != null && metricConstraint.isSingleValue()) {
listMetricsRequest.setMetricName(metricConstraint.getSingleValue().toString());
}
ValueSet dimensionNameConstraint = summary.get(DIMENSION_NAME_FIELD);
ValueSet dimensionValueConstraint = summary.get(DIMENSION_VALUE_FIELD);
if (dimensionNameConstraint != null && dimensionNameConstraint.isSingleValue() && dimensionValueConstraint != null && dimensionValueConstraint.isSingleValue()) {
DimensionFilter filter = new DimensionFilter().withName(dimensionNameConstraint.getSingleValue().toString()).withValue(dimensionValueConstraint.getSingleValue().toString());
listMetricsRequest.setDimensions(Collections.singletonList(filter));
}
}
use of com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet in project aws-athena-query-federation by awslabs.
the class MetricUtils method makeGetMetricDataRequest.
/**
* Creates a Cloudwatch Metrics sample data request from the provided inputs
*
* @param readRecordsRequest The RecordReadRequest to make into a Cloudwatch Metrics Data request.
* @return The Cloudwatch Metrics Data request that matches the requested read operation.
*/
protected static GetMetricDataRequest makeGetMetricDataRequest(ReadRecordsRequest readRecordsRequest) {
Split split = readRecordsRequest.getSplit();
String serializedMetricStats = split.getProperty(MetricStatSerDe.SERIALIZED_METRIC_STATS_FIELD_NAME);
List<MetricStat> metricStats = MetricStatSerDe.deserialize(serializedMetricStats);
GetMetricDataRequest dataRequest = new GetMetricDataRequest();
com.amazonaws.services.cloudwatch.model.Metric metric = new com.amazonaws.services.cloudwatch.model.Metric();
metric.setNamespace(split.getProperty(NAMESPACE_FIELD));
metric.setMetricName(split.getProperty(METRIC_NAME_FIELD));
List<MetricDataQuery> metricDataQueries = new ArrayList<>();
int metricId = 1;
for (MetricStat nextMetricStat : metricStats) {
metricDataQueries.add(new MetricDataQuery().withMetricStat(nextMetricStat).withId("m" + metricId++));
}
dataRequest.withMetricDataQueries(metricDataQueries);
ValueSet timeConstraint = readRecordsRequest.getConstraints().getSummary().get(TIMESTAMP_FIELD);
if (timeConstraint instanceof SortedRangeSet && !timeConstraint.isNullAllowed()) {
// SortedRangeSet is how >, <, between is represented which are easiest and most common when
// searching logs so we attempt to push that down here as an optimization. SQL can represent complex
// overlapping ranges which Cloudwatch can not support so this is not a replacement for applying
// constraints using the ConstraintEvaluator.
Range basicPredicate = ((SortedRangeSet) timeConstraint).getSpan();
if (!basicPredicate.getLow().isNullValue()) {
Long lowerBound = (Long) basicPredicate.getLow().getValue();
// TODO: confirm timezone handling
logger.info("makeGetMetricsRequest: with startTime " + (lowerBound * 1000) + " " + new Date(lowerBound * 1000));
dataRequest.withStartTime(new Date(lowerBound * 1000));
} else {
// TODO: confirm timezone handling
dataRequest.withStartTime(new Date(0));
}
if (!basicPredicate.getHigh().isNullValue()) {
Long upperBound = (Long) basicPredicate.getHigh().getValue();
// TODO: confirm timezone handling
logger.info("makeGetMetricsRequest: with endTime " + (upperBound * 1000) + " " + new Date(upperBound * 1000));
dataRequest.withEndTime(new Date(upperBound * 1000));
} else {
// TODO: confirm timezone handling
dataRequest.withEndTime(new Date(System.currentTimeMillis()));
}
} else {
// TODO: confirm timezone handling
dataRequest.withStartTime(new Date(0));
dataRequest.withEndTime(new Date(System.currentTimeMillis()));
}
return dataRequest;
}
Aggregations