use of com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest in project aws-athena-query-federation by awslabs.
the class MySqlMuxJdbcRecordHandlerTest method readWithConstraint.
@Test
public void readWithConstraint() {
BlockSpiller blockSpiller = Mockito.mock(BlockSpiller.class);
ReadRecordsRequest readRecordsRequest = Mockito.mock(ReadRecordsRequest.class);
Mockito.when(readRecordsRequest.getCatalogName()).thenReturn("mysql");
this.jdbcRecordHandler.readWithConstraint(blockSpiller, readRecordsRequest, queryStatusChecker);
Mockito.verify(this.mySqlRecordHandler, Mockito.times(1)).readWithConstraint(Mockito.eq(blockSpiller), Mockito.eq(readRecordsRequest), Mockito.eq(queryStatusChecker));
}
use of com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest in project aws-athena-query-federation by awslabs.
the class NeptuneRecordHandlerTest method doReadRecordsSpill.
@Test
public void doReadRecordsSpill() throws Exception {
S3SpillLocation splitLoc = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
allocator = new BlockAllocatorImpl();
// Greater Than filter
HashMap<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put("property1", SortedRangeSet.of(Range.greaterThan(allocator, Types.MinorType.INT.getType(), 9)));
buildGraphTraversal();
ReadRecordsRequest request = new ReadRecordsRequest(IDENTITY, DEFAULT_CATALOG, QUERY_ID, TABLE_NAME, schemaPGVertexForRead, Split.newBuilder(splitLoc, keyFactory.create()).build(), // ~1.5MB so we should see some spill
new Constraints(constraintsMap), // ~1.5MB so we should see some spill
1_500_000L, 0L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof RemoteReadRecordsResponse);
try (RemoteReadRecordsResponse response = (RemoteReadRecordsResponse) rawResponse) {
logger.info("doReadRecordsSpill: remoteBlocks[{}]", response.getRemoteBlocks().size());
assertTrue(response.getNumberBlocks() == 1);
int blockNum = 0;
for (SpillLocation next : response.getRemoteBlocks()) {
S3SpillLocation spillLocation = (S3SpillLocation) next;
try (Block block = spillReader.read(spillLocation, response.getEncryptionKey(), response.getSchema())) {
logger.info("doReadRecordsSpill: blockNum[{}] and recordCount[{}]", blockNum++, block.getRowCount());
logger.info("doReadRecordsSpill: {}", BlockUtils.rowToString(block, 0));
assertNotNull(BlockUtils.rowToString(block, 0));
}
}
}
}
use of com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest in project aws-athena-query-federation by awslabs.
the class NeptuneRecordHandlerTest method invokeAndAssert.
/**
* Used to invoke each test condition and assert
*
* @param constraintMap Constraint Map for Gremlin Query
* @param expectedRecordCount Expected Row Count as per Gremlin Query Response
*/
private void invokeAndAssert(Schema schemaPG, HashMap<String, ValueSet> constraintMap, Integer expectedRecordCount) throws Exception {
S3SpillLocation spillLoc = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
allocator = new BlockAllocatorImpl();
buildGraphTraversal();
ReadRecordsRequest request = new ReadRecordsRequest(IDENTITY, DEFAULT_CATALOG, QUERY_ID, TABLE_NAME, schemaPG, Split.newBuilder(spillLoc, null).build(), new Constraints(constraintMap), 100_000_000_000L, 100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof ReadRecordsResponse);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
assertTrue(response.getRecords().getRowCount() == expectedRecordCount);
logger.info("doReadRecordsNoSpill: {}", BlockUtils.rowToString(response.getRecords(), 0));
}
use of com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest in project aws-athena-query-federation by awslabs.
the class OracleMuxJdbcRecordHandlerTest method buildSplitSql.
@Test
public void buildSplitSql() throws SQLException {
ReadRecordsRequest readRecordsRequest = Mockito.mock(ReadRecordsRequest.class);
Mockito.when(readRecordsRequest.getCatalogName()).thenReturn("oracle");
Connection jdbcConnection = Mockito.mock(Connection.class);
TableName tableName = new TableName("testSchema", "tableName");
Schema schema = Mockito.mock(Schema.class);
Constraints constraints = Mockito.mock(Constraints.class);
Split split = Mockito.mock(Split.class);
this.jdbcRecordHandler.buildSplitSql(jdbcConnection, "oracle", tableName, schema, constraints, split);
Mockito.verify(this.oracleRecordHandler, Mockito.times(1)).buildSplitSql(Mockito.eq(jdbcConnection), Mockito.eq("oracle"), Mockito.eq(tableName), Mockito.eq(schema), Mockito.eq(constraints), Mockito.eq(split));
}
use of com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest in project aws-athena-query-federation by awslabs.
the class RedisRecordHandlerTest method doReadRecordsZset.
@Test
public void doReadRecordsZset() throws Exception {
// 4 keys per prefix
when(mockSyncCommands.scan(any(ScanCursor.class), any(ScanArgs.class))).then((InvocationOnMock invocationOnMock) -> {
ScanCursor cursor = (ScanCursor) invocationOnMock.getArguments()[0];
if (cursor == null || cursor.getCursor().equals("0")) {
List<String> result = new ArrayList<>();
result.add(UUID.randomUUID().toString());
result.add(UUID.randomUUID().toString());
result.add(UUID.randomUUID().toString());
MockKeyScanCursor<String> scanCursor = new MockKeyScanCursor<>();
scanCursor.setCursor("1");
scanCursor.setKeys(result);
return scanCursor;
} else {
List<String> result = new ArrayList<>();
result.add(UUID.randomUUID().toString());
MockKeyScanCursor<String> scanCursor = new MockKeyScanCursor<>();
scanCursor.setCursor("0");
scanCursor.setKeys(result);
scanCursor.setFinished(true);
return scanCursor;
}
});
// 4 rows per key
when(mockSyncCommands.zscan(anyString(), any(ScanCursor.class))).then((InvocationOnMock invocationOnMock) -> {
ScanCursor cursor = (ScanCursor) invocationOnMock.getArguments()[1];
if (cursor == null || cursor.getCursor().equals("0")) {
List<ScoredValue<String>> result = new ArrayList<>();
result.add(ScoredValue.just(0.0D, "1"));
result.add(ScoredValue.just(0.0D, "2"));
result.add(ScoredValue.just(0.0D, "3"));
MockScoredValueScanCursor<String> scanCursor = new MockScoredValueScanCursor<>();
scanCursor.setCursor("1");
scanCursor.setValues(result);
return scanCursor;
} else {
List<ScoredValue<String>> result = new ArrayList<>();
result.add(ScoredValue.just(0.0D, "4"));
MockScoredValueScanCursor<String> scanCursor = new MockScoredValueScanCursor<>();
scanCursor.setCursor("0");
scanCursor.setValues(result);
scanCursor.setFinished(true);
return scanCursor;
}
});
AtomicLong value = new AtomicLong(0);
when(mockSyncCommands.get(anyString())).thenAnswer((InvocationOnMock invocationOnMock) -> String.valueOf(value.getAndIncrement()));
S3SpillLocation splitLoc = S3SpillLocation.newBuilder().withBucket(UUID.randomUUID().toString()).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
Split split = Split.newBuilder(splitLoc, keyFactory.create()).add(REDIS_ENDPOINT_PROP, endpoint).add(KEY_TYPE, KeyType.PREFIX.getId()).add(KEY_PREFIX_TABLE_PROP, "key-*").add(VALUE_TYPE_TABLE_PROP, ValueType.ZSET.getId()).build();
Schema schemaForRead = SchemaBuilder.newBuilder().addField("_key_", Types.MinorType.VARCHAR.getType()).addField("intcol", Types.MinorType.INT.getType()).build();
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put("intcol", SortedRangeSet.copyOf(Types.MinorType.INT.getType(), ImmutableList.of(Range.greaterThan(allocator, Types.MinorType.INT.getType(), 1)), false));
ReadRecordsRequest request = new ReadRecordsRequest(IDENTITY, DEFAULT_CATALOG, "queryId-" + System.currentTimeMillis(), TABLE_NAME, schemaForRead, split, new Constraints(constraintsMap), // 100GB don't expect this to spill
100_000_000_000L, 100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof ReadRecordsResponse);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
logger.info("doReadRecordsZset: rows[{}]", response.getRecordCount());
logger.info("doReadRecordsZset: {}", BlockUtils.rowToString(response.getRecords(), 0));
assertTrue(response.getRecords().getRowCount() == 12);
FieldReader keyReader = response.getRecords().getFieldReader(KEY_COLUMN_NAME);
keyReader.setPosition(0);
assertNotNull(keyReader.readText());
FieldReader intCol = response.getRecords().getFieldReader("intcol");
intCol.setPosition(0);
assertNotNull(intCol.readInteger());
}
Aggregations