use of com.amazonaws.athena.connector.lambda.records.RecordResponse in project aws-athena-query-federation by awslabs.
the class TimestreamRecordHandlerTest method readRecordsView.
@Test
public void readRecordsView() throws Exception {
logger.info("readRecordsView - enter");
Schema schemaForReadView = SchemaBuilder.newBuilder().addField("measure_name", Types.MinorType.VARCHAR.getType()).addField("az", Types.MinorType.VARCHAR.getType()).addField("value", Types.MinorType.FLOAT8.getType()).addField("num_samples", Types.MinorType.BIGINT.getType()).addMetadata(VIEW_METADATA_FIELD, "select measure_name, az,sum(\"measure_value::double\") as value, count(*) as num_samples from \"" + DEFAULT_SCHEMA + "\".\"" + TEST_TABLE + "\" group by measure_name, az").build();
String expectedQuery = "WITH t1 AS ( select measure_name, az,sum(\"measure_value::double\") as value, count(*) as num_samples from \"my_schema\".\"my_table\" group by measure_name, az ) SELECT measure_name, az, value, num_samples FROM t1 WHERE (\"az\" IN ('us-east-1a','us-east-1b'))";
QueryResult mockResult = makeMockQueryResult(schemaForReadView, 1_000);
when(mockClient.query(any(QueryRequest.class))).thenAnswer((Answer<QueryResult>) invocationOnMock -> {
QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0];
assertEquals(expectedQuery, request.getQueryString().replace("\n", ""));
return mockResult;
});
S3SpillLocation splitLoc = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
Split split = Split.newBuilder(splitLoc, null).build();
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put("az", EquatableValueSet.newBuilder(allocator, Types.MinorType.VARCHAR.getType(), true, true).add("us-east-1a").add("us-east-1b").build());
ReadRecordsRequest request = new ReadRecordsRequest(IDENTITY, "default", "queryId-" + System.currentTimeMillis(), new TableName(DEFAULT_SCHEMA, TEST_VIEW), schemaForReadView, split, new Constraints(constraintsMap), // 100GB don't expect this to spill
100_000_000_000L, 100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
logger.info("readRecordsView: rows[{}]", response.getRecordCount());
for (int i = 0; i < response.getRecordCount() && i < 10; i++) {
logger.info("readRecordsView: {}", BlockUtils.rowToString(response.getRecords(), i));
}
logger.info("readRecordsView - exit");
}
use of com.amazonaws.athena.connector.lambda.records.RecordResponse in project aws-athena-query-federation by awslabs.
the class TimestreamRecordHandlerTest method doReadRecordsSpill.
@Test
public void doReadRecordsSpill() throws Exception {
String expectedQuery = "SELECT measure_name, measure_value::double, az, time, hostname, region FROM \"my_schema\".\"my_table\" WHERE (\"az\" IN ('us-east-1a','us-east-1b'))";
QueryResult mockResult = makeMockQueryResult(schemaForRead, 100_000);
when(mockClient.query(any(QueryRequest.class))).thenAnswer((Answer<QueryResult>) invocationOnMock -> {
QueryRequest request = (QueryRequest) invocationOnMock.getArguments()[0];
assertEquals(expectedQuery, request.getQueryString().replace("\n", ""));
return mockResult;
});
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put("az", EquatableValueSet.newBuilder(allocator, Types.MinorType.VARCHAR.getType(), true, true).add("us-east-1a").add("us-east-1b").build());
S3SpillLocation splitLoc = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
Split.Builder splitBuilder = Split.newBuilder(splitLoc, keyFactory.create());
ReadRecordsRequest request = new ReadRecordsRequest(IDENTITY, DEFAULT_CATALOG, "queryId-" + System.currentTimeMillis(), new TableName(DEFAULT_SCHEMA, TEST_TABLE), schemaForRead, splitBuilder.build(), new Constraints(constraintsMap), // ~1.5MB so we should see some spill
1_500_000L, 0L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof RemoteReadRecordsResponse);
try (RemoteReadRecordsResponse response = (RemoteReadRecordsResponse) rawResponse) {
logger.info("doReadRecordsSpill: remoteBlocks[{}]", response.getRemoteBlocks().size());
assertTrue(response.getNumberBlocks() > 1);
int blockNum = 0;
for (SpillLocation next : response.getRemoteBlocks()) {
S3SpillLocation spillLocation = (S3SpillLocation) next;
try (Block block = spillReader.read(spillLocation, response.getEncryptionKey(), response.getSchema())) {
logger.info("doReadRecordsSpill: blockNum[{}] and recordCount[{}]", blockNum++, block.getRowCount());
// assertTrue(++blockNum < response.getRemoteBlocks().size() && block.getRowCount() > 10_000);
logger.info("doReadRecordsSpill: {}", BlockUtils.rowToString(block, 0));
assertNotNull(BlockUtils.rowToString(block, 0));
}
}
}
}
use of com.amazonaws.athena.connector.lambda.records.RecordResponse in project aws-athena-query-federation by awslabs.
the class TPCDSRecordHandlerTest method doReadRecordForTPCDSTIMETypeColumn.
@Test
public void doReadRecordForTPCDSTIMETypeColumn() throws Exception {
for (Table next : Table.getBaseTables()) {
if (next.getName().equals("dbgen_version")) {
table = next;
}
}
SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder();
for (Column nextCol : table.getColumns()) {
schemaBuilder.addField(TPCDSUtils.convertColumn(nextCol));
}
ReadRecordsRequest request = new ReadRecordsRequest(identity, "catalog", "queryId-" + System.currentTimeMillis(), new TableName("tpcds1", table.getName()), schemaBuilder.build(), Split.newBuilder(S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build(), keyFactory.create()).add(SPLIT_NUMBER_FIELD, "0").add(SPLIT_TOTAL_NUMBER_FIELD, "1000").add(SPLIT_SCALE_FACTOR_FIELD, "1").build(), new Constraints(ImmutableMap.of()), 100_000_000_000L, 100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof ReadRecordsResponse);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
logger.info("doReadRecordForTPCDSTIMETypeColumn: {}", BlockUtils.rowToString(response.getRecords(), 0));
// TPCDS for `dbgen_version` always generates 1 record.
assertEquals(1, response.getRecords().getRowCount());
logger.info("doReadRecordForTPCDSTIMETypeColumn: exit");
}
use of com.amazonaws.athena.connector.lambda.records.RecordResponse in project aws-athena-query-federation by awslabs.
the class ExampleRecordHandlerTest method doReadRecordsNoSpill.
@Test
public void doReadRecordsNoSpill() throws Exception {
if (!enableTests) {
// We do this because until you complete the tutorial these tests will fail. When you attempt to publis
// using ../toos/publish.sh ... it will set the publishing flag and force these tests. This is how we
// avoid breaking the build but still have a useful tutorial. We are also duplicateing this block
// on purpose since this is a somewhat odd pattern.
logger.info("doReadRecordsNoSpill: Tests are disabled, to enable them set the 'publishing' environment variable " + "using maven clean install -Dpublishing=true");
return;
}
for (int i = 0; i < 2; i++) {
Map<String, ValueSet> constraintsMap = new HashMap<>();
ReadRecordsRequest request = new ReadRecordsRequest(fakeIdentity(), "catalog", "queryId-" + System.currentTimeMillis(), new TableName("schema", "table"), schemaForRead, Split.newBuilder(makeSpillLocation(), null).add("year", "2017").add("month", "11").add("day", "1").build(), new Constraints(constraintsMap), // 100GB don't expect this to spill
100_000_000_000L, 100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof ReadRecordsResponse);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
logger.info("doReadRecordsNoSpill: rows[{}]", response.getRecordCount());
assertTrue(response.getRecords().getRowCount() > 0);
logger.info("doReadRecordsNoSpill: {}", BlockUtils.rowToString(response.getRecords(), 0));
}
}
use of com.amazonaws.athena.connector.lambda.records.RecordResponse in project aws-athena-query-federation by awslabs.
the class ElasticsearchRecordHandlerTest method doReadRecordsSpill.
@Test
public void doReadRecordsSpill() throws Exception {
logger.info("doReadRecordsSpill: enter");
int batchSize = handler.getQueryBatchSize();
SearchHit[] searchHit1 = new SearchHit[batchSize];
for (int i = 0; i < batchSize; ++i) {
searchHit1[i] = new SearchHit(i + 1);
}
SearchHit[] searchHit2 = new SearchHit[2];
searchHit2[0] = new SearchHit(batchSize + 1);
searchHit2[1] = new SearchHit(batchSize + 2);
SearchHits searchHits1 = new SearchHits(searchHit1, new TotalHits(batchSize, TotalHits.Relation.EQUAL_TO), 4);
SearchHits searchHits2 = new SearchHits(searchHit2, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 4);
when(mockResponse.getHits()).thenReturn(searchHits1, searchHits1, searchHits2, searchHits2);
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put("myshort", SortedRangeSet.copyOf(Types.MinorType.SMALLINT.getType(), ImmutableList.of(Range.range(allocator, Types.MinorType.SMALLINT.getType(), (short) 1955, false, (short) 1972, true)), false));
ReadRecordsRequest request = new ReadRecordsRequest(fakeIdentity(), "elasticsearch", "queryId-" + System.currentTimeMillis(), new TableName("movies", "mishmash"), mapping, split, new Constraints(constraintsMap), // 10KB Expect this to spill
10_000L, 0L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof RemoteReadRecordsResponse);
try (RemoteReadRecordsResponse response = (RemoteReadRecordsResponse) rawResponse) {
logger.info("doReadRecordsSpill: remoteBlocks[{}]", response.getRemoteBlocks().size());
assertEquals(3, response.getNumberBlocks());
int blockNum = 0;
for (SpillLocation next : response.getRemoteBlocks()) {
S3SpillLocation spillLocation = (S3SpillLocation) next;
try (Block block = spillReader.read(spillLocation, response.getEncryptionKey(), response.getSchema())) {
logger.info("doReadRecordsSpill: blockNum[{}] and recordCount[{}]", blockNum++, block.getRowCount());
logger.info("doReadRecordsSpill: {}", BlockUtils.rowToString(block, 0));
assertNotNull(BlockUtils.rowToString(block, 0));
}
}
}
logger.info("doReadRecordsSpill: exit");
}
Aggregations