use of com.amazonaws.athena.connector.lambda.handlers.GlueMetadataHandler.VIEW_METADATA_FIELD in project aws-athena-query-federation by awslabs.
the class TimestreamRecordHandlerTest method readRecordsTimeSeriesView.
@Test
public void readRecordsTimeSeriesView() throws Exception {
logger.info("readRecordsTimeSeriesView - enter");
Schema schemaForReadView = SchemaBuilder.newBuilder().addField("region", Types.MinorType.VARCHAR.getType()).addField("az", Types.MinorType.VARCHAR.getType()).addField("hostname", Types.MinorType.VARCHAR.getType()).addField(FieldBuilder.newBuilder("cpu_utilization", Types.MinorType.LIST.getType()).addField(FieldBuilder.newBuilder("cpu_utilization", Types.MinorType.STRUCT.getType()).addDateMilliField("time").addFloat8Field("measure_value::double").build()).build()).addMetadata(VIEW_METADATA_FIELD, "select az, hostname, region, CREATE_TIME_SERIES(time, measure_value::double) as cpu_utilization from \"" + DEFAULT_SCHEMA + "\".\"" + TEST_TABLE + "\" WHERE measure_name = 'cpu_utilization' GROUP BY measure_name, az, hostname, region").build();
String expectedQuery = "WITH t1 AS ( select az, hostname, region, CREATE_TIME_SERIES(time, measure_value::double) as cpu_utilization from \"my_schema\".\"my_table\" WHERE measure_name = 'cpu_utilization' GROUP BY measure_name, az, hostname, region ) SELECT region, az, hostname, cpu_utilization FROM t1 WHERE (\"az\" IN ('us-east-1a','us-east-1b'))";
QueryResult mockResult = makeMockQueryResult(schemaForReadView, 1_000);
when(mockClient.query(any(QueryRequest.class))).thenAnswer((Answer<QueryResult>) invocationOnMock -> {
QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0];
assertEquals("actual: " + request.getQueryString(), expectedQuery, request.getQueryString().replace("\n", ""));
return mockResult;
});
S3SpillLocation splitLoc = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
Split split = Split.newBuilder(splitLoc, null).build();
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put("az", EquatableValueSet.newBuilder(allocator, Types.MinorType.VARCHAR.getType(), true, true).add("us-east-1a").add("us-east-1b").build());
ReadRecordsRequest request = new ReadRecordsRequest(IDENTITY, "default", "queryId-" + System.currentTimeMillis(), new TableName(DEFAULT_SCHEMA, TEST_TABLE), schemaForReadView, split, new Constraints(constraintsMap), // 100GB don't expect this to spill
100_000_000_000L, 100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
logger.info("readRecordsTimeSeriesView: rows[{}]", response.getRecordCount());
for (int i = 0; i < response.getRecordCount() && i < 10; i++) {
logger.info("readRecordsTimeSeriesView: {}", BlockUtils.rowToString(response.getRecords(), i));
}
logger.info("readRecordsTimeSeriesView - exit");
}
use of com.amazonaws.athena.connector.lambda.handlers.GlueMetadataHandler.VIEW_METADATA_FIELD in project aws-athena-query-federation by awslabs.
the class TimestreamRecordHandlerTest method readRecordsView.
@Test
public void readRecordsView() throws Exception {
logger.info("readRecordsView - enter");
Schema schemaForReadView = SchemaBuilder.newBuilder().addField("measure_name", Types.MinorType.VARCHAR.getType()).addField("az", Types.MinorType.VARCHAR.getType()).addField("value", Types.MinorType.FLOAT8.getType()).addField("num_samples", Types.MinorType.BIGINT.getType()).addMetadata(VIEW_METADATA_FIELD, "select measure_name, az,sum(\"measure_value::double\") as value, count(*) as num_samples from \"" + DEFAULT_SCHEMA + "\".\"" + TEST_TABLE + "\" group by measure_name, az").build();
String expectedQuery = "WITH t1 AS ( select measure_name, az,sum(\"measure_value::double\") as value, count(*) as num_samples from \"my_schema\".\"my_table\" group by measure_name, az ) SELECT measure_name, az, value, num_samples FROM t1 WHERE (\"az\" IN ('us-east-1a','us-east-1b'))";
QueryResult mockResult = makeMockQueryResult(schemaForReadView, 1_000);
when(mockClient.query(any(QueryRequest.class))).thenAnswer((Answer<QueryResult>) invocationOnMock -> {
QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0];
assertEquals(expectedQuery, request.getQueryString().replace("\n", ""));
return mockResult;
});
S3SpillLocation splitLoc = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
Split split = Split.newBuilder(splitLoc, null).build();
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put("az", EquatableValueSet.newBuilder(allocator, Types.MinorType.VARCHAR.getType(), true, true).add("us-east-1a").add("us-east-1b").build());
ReadRecordsRequest request = new ReadRecordsRequest(IDENTITY, "default", "queryId-" + System.currentTimeMillis(), new TableName(DEFAULT_SCHEMA, TEST_VIEW), schemaForReadView, split, new Constraints(constraintsMap), // 100GB don't expect this to spill
100_000_000_000L, 100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
logger.info("readRecordsView: rows[{}]", response.getRecordCount());
for (int i = 0; i < response.getRecordCount() && i < 10; i++) {
logger.info("readRecordsView: {}", BlockUtils.rowToString(response.getRecords(), i));
}
logger.info("readRecordsView - exit");
}
Aggregations