use of com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest in project aws-athena-query-federation by awslabs.
the class MetricsRecordHandlerTest method readMetricsWithConstraint.
@Test
public void readMetricsWithConstraint() throws Exception {
logger.info("readMetricsWithConstraint: enter");
String namespace = "namespace";
String dimName = "dimName";
String dimValue = "dimValye";
int numMetrics = 100;
AtomicLong numCalls = new AtomicLong(0);
when(mockMetrics.listMetrics(any(ListMetricsRequest.class))).thenAnswer((InvocationOnMock invocation) -> {
ListMetricsRequest request = invocation.getArgumentAt(0, ListMetricsRequest.class);
numCalls.incrementAndGet();
// assert that the namespace filter was indeed pushed down
assertEquals(namespace, request.getNamespace());
String nextToken = (request.getNextToken() == null) ? "valid" : null;
List<Metric> metrics = new ArrayList<>();
for (int i = 0; i < numMetrics; i++) {
metrics.add(new Metric().withNamespace(namespace).withMetricName("metric-" + i).withDimensions(new Dimension().withName(dimName).withValue(dimValue)));
metrics.add(new Metric().withNamespace(namespace + i).withMetricName("metric-" + i));
}
return new ListMetricsResult().withNextToken(nextToken).withMetrics(metrics);
});
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put(NAMESPACE_FIELD, makeStringEquals(allocator, namespace));
constraintsMap.put(DIMENSION_NAME_FIELD, makeStringEquals(allocator, dimName));
constraintsMap.put(DIMENSION_VALUE_FIELD, makeStringEquals(allocator, dimValue));
S3SpillLocation spillLocation = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
Split split = Split.newBuilder(spillLocation, keyFactory.create()).build();
ReadRecordsRequest request = new ReadRecordsRequest(identity, "catalog", "queryId-" + System.currentTimeMillis(), METRICS_TABLE_NAME, METRIC_TABLE.getSchema(), split, new Constraints(constraintsMap), 100_000_000_000L, // 100GB don't expect this to spill
100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof ReadRecordsResponse);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
logger.info("readMetricsWithConstraint: rows[{}]", response.getRecordCount());
assertEquals(numCalls.get() * numMetrics, response.getRecords().getRowCount());
logger.info("readMetricsWithConstraint: {}", BlockUtils.rowToString(response.getRecords(), 0));
logger.info("readMetricsWithConstraint: exit");
}
use of com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest in project aws-athena-query-federation by awslabs.
the class MetricUtilsTest method makeGetMetricDataRequest.
@Test
public void makeGetMetricDataRequest() {
String schema = "schema";
String table = "table";
Integer period = 60;
String statistic = "p90";
String metricName = "metricName";
String namespace = "namespace";
List<Dimension> dimensions = new ArrayList<>();
dimensions.add(new Dimension().withName("dim_name1").withValue("dim_value1"));
dimensions.add(new Dimension().withName("dim_name2").withValue("dim_value2"));
List<MetricStat> metricStats = new ArrayList<>();
metricStats.add(new MetricStat().withMetric(new Metric().withNamespace(namespace).withMetricName(metricName).withDimensions(dimensions)).withPeriod(60).withStat(statistic));
Split split = Split.newBuilder(null, null).add(NAMESPACE_FIELD, namespace).add(METRIC_NAME_FIELD, metricName).add(PERIOD_FIELD, String.valueOf(period)).add(STATISTIC_FIELD, statistic).add(SERIALIZED_METRIC_STATS_FIELD_NAME, MetricStatSerDe.serialize(metricStats)).build();
Schema schemaForRead = SchemaBuilder.newBuilder().addStringField(METRIC_NAME_FIELD).build();
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put(TIMESTAMP_FIELD, SortedRangeSet.copyOf(Types.MinorType.BIGINT.getType(), ImmutableList.of(Range.greaterThan(allocator, Types.MinorType.BIGINT.getType(), 1L)), false));
ReadRecordsRequest request = new ReadRecordsRequest(identity, catalog, "queryId-" + System.currentTimeMillis(), new TableName(schema, table), schemaForRead, split, new Constraints(constraintsMap), // 100GB don't expect this to spill
100_000_000_000L, 100_000_000_000L);
GetMetricDataRequest actual = MetricUtils.makeGetMetricDataRequest(request);
assertEquals(1, actual.getMetricDataQueries().size());
assertNotNull(actual.getMetricDataQueries().get(0).getId());
MetricStat metricStat = actual.getMetricDataQueries().get(0).getMetricStat();
assertNotNull(metricStat);
assertEquals(metricName, metricStat.getMetric().getMetricName());
assertEquals(namespace, metricStat.getMetric().getNamespace());
assertEquals(statistic, metricStat.getStat());
assertEquals(period, metricStat.getPeriod());
assertEquals(2, metricStat.getMetric().getDimensions().size());
assertEquals(1000L, actual.getStartTime().getTime());
assertTrue(actual.getStartTime().getTime() <= System.currentTimeMillis() + 1_000);
}
use of com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest in project aws-athena-query-federation by awslabs.
the class CloudwatchRecordHandlerTest method doReadRecordsNoSpill.
@Test
public void doReadRecordsNoSpill() throws Exception {
logger.info("doReadRecordsNoSpill: enter");
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put("time", SortedRangeSet.copyOf(Types.MinorType.BIGINT.getType(), ImmutableList.of(Range.equal(allocator, Types.MinorType.BIGINT.getType(), 100L)), false));
ReadRecordsRequest request = new ReadRecordsRequest(identity, "catalog", "queryId-" + System.currentTimeMillis(), new TableName("schema", "table"), schemaForRead, Split.newBuilder(S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build(), keyFactory.create()).add(CloudwatchMetadataHandler.LOG_STREAM_FIELD, "table").build(), new Constraints(constraintsMap), 100_000_000_000L, // 100GB don't expect this to spill
100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof ReadRecordsResponse);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
logger.info("doReadRecordsNoSpill: rows[{}]", response.getRecordCount());
assertTrue(response.getRecords().getRowCount() == 3);
logger.info("doReadRecordsNoSpill: {}", BlockUtils.rowToString(response.getRecords(), 0));
logger.info("doReadRecordsNoSpill: exit");
}
use of com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest in project aws-athena-query-federation by awslabs.
the class DataLakeGen2MuxRecordHandlerTest method buildSplitSql.
@Test
public void buildSplitSql() throws SQLException {
ReadRecordsRequest readRecordsRequest = Mockito.mock(ReadRecordsRequest.class);
Mockito.when(readRecordsRequest.getCatalogName()).thenReturn(DataLakeGen2Constants.NAME);
Connection jdbcConnection = Mockito.mock(Connection.class);
TableName tableName = new TableName("testSchema", "tableName");
Schema schema = Mockito.mock(Schema.class);
Constraints constraints = Mockito.mock(Constraints.class);
Split split = Mockito.mock(Split.class);
this.jdbcRecordHandler.buildSplitSql(jdbcConnection, DataLakeGen2Constants.NAME, tableName, schema, constraints, split);
Mockito.verify(this.dataLakeGen2RecordHandler, Mockito.times(1)).buildSplitSql(Mockito.eq(jdbcConnection), Mockito.eq(DataLakeGen2Constants.NAME), Mockito.eq(tableName), Mockito.eq(schema), Mockito.eq(constraints), Mockito.eq(split));
}
use of com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest in project aws-athena-query-federation by awslabs.
the class DataLakeGen2MuxRecordHandlerTest method readWithConstraintWithUnsupportedCatalog.
@Test(expected = RuntimeException.class)
public void readWithConstraintWithUnsupportedCatalog() {
BlockSpiller blockSpiller = Mockito.mock(BlockSpiller.class);
ReadRecordsRequest readRecordsRequest = Mockito.mock(ReadRecordsRequest.class);
Mockito.when(readRecordsRequest.getCatalogName()).thenReturn("unsupportedCatalog");
this.jdbcRecordHandler.readWithConstraint(blockSpiller, readRecordsRequest, queryStatusChecker);
}
Aggregations