use of com.amazonaws.athena.connector.lambda.domain.predicate.Constraints in project aws-athena-query-federation by awslabs.
the class SelectQueryBuilderTest method build.
@Test
public void build() {
logger.info("build: enter");
String expected = "SELECT col1, col2, col3, col4 FROM \"myDatabase\".\"myTable\" WHERE (\"col4\" IN ('val1','val2')) AND ((\"col2\" < 1)) AND (\"col3\" IN (20000,10000)) AND ((\"col1\" > 1))";
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put("col1", SortedRangeSet.copyOf(Types.MinorType.INT.getType(), ImmutableList.of(Range.greaterThan(allocator, Types.MinorType.INT.getType(), 1)), false));
constraintsMap.put("col2", SortedRangeSet.copyOf(Types.MinorType.INT.getType(), ImmutableList.of(Range.lessThan(allocator, Types.MinorType.INT.getType(), 1)), false));
constraintsMap.put("col3", EquatableValueSet.newBuilder(allocator, Types.MinorType.INT.getType(), true, true).add(20000L).add(10000L).build());
constraintsMap.put("col4", EquatableValueSet.newBuilder(allocator, Types.MinorType.VARCHAR.getType(), true, true).add("val1").add("val2").build());
Schema schema = SchemaBuilder.newBuilder().addStringField("col1").addIntField("col2").addBigIntField("col3").addStringField("col4").build();
String actual = queryFactory.createSelectQueryBuilder(VIEW_METADATA_FIELD).withDatabaseName("myDatabase").withTableName("myTable").withProjection(schema).withConjucts(new Constraints(constraintsMap)).build().replace("\n", "");
logger.info("build: actual[{}]", actual);
assertEquals(expected, actual);
logger.info("build: exit");
}
use of com.amazonaws.athena.connector.lambda.domain.predicate.Constraints in project aws-athena-query-federation by awslabs.
the class VerticaMetadataHandlerTest method getPartitions.
@Test
public void getPartitions() throws Exception {
Schema tableSchema = SchemaBuilder.newBuilder().addIntField("day").addIntField("month").addIntField("year").addStringField("preparedStmt").addStringField("queryId").addStringField("awsRegionSql").build();
Set<String> partitionCols = new HashSet<>();
partitionCols.add("preparedStmt");
partitionCols.add("queryId");
partitionCols.add("awsRegionSql");
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put("day", SortedRangeSet.copyOf(org.apache.arrow.vector.types.Types.MinorType.INT.getType(), ImmutableList.of(Range.greaterThan(allocator, org.apache.arrow.vector.types.Types.MinorType.INT.getType(), 0)), false));
constraintsMap.put("month", SortedRangeSet.copyOf(org.apache.arrow.vector.types.Types.MinorType.INT.getType(), ImmutableList.of(Range.greaterThan(allocator, org.apache.arrow.vector.types.Types.MinorType.INT.getType(), 0)), false));
constraintsMap.put("year", SortedRangeSet.copyOf(org.apache.arrow.vector.types.Types.MinorType.INT.getType(), ImmutableList.of(Range.greaterThan(allocator, org.apache.arrow.vector.types.Types.MinorType.INT.getType(), 2000)), false));
GetTableLayoutRequest req = null;
GetTableLayoutResponse res = null;
String testSql = "Select * from schema1.table1";
String[] test = new String[] { "Select * from schema1.table1", "Select * from schema1.table1" };
String[] schema = { "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME", "TYPE_NAME" };
Object[][] values = { { "testSchema", "testTable1", "day", "int" }, { "testSchema", "testTable1", "month", "int" }, { "testSchema", "testTable1", "year", "int" }, { "testSchema", "testTable1", "preparedStmt", "varchar" }, { "testSchema", "testTable1", "queryId", "varchar" }, { "testSchema", "testTable1", "awsRegionSql", "varchar" } };
int[] types = { Types.INTEGER, Types.INTEGER, Types.INTEGER, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR };
List<TableName> expectedTables = new ArrayList<>();
expectedTables.add(new TableName("testSchema", "testTable1"));
AtomicInteger rowNumber = new AtomicInteger(-1);
ResultSet resultSet = mockResultSet(schema, types, values, rowNumber);
Mockito.when(connection.getMetaData().getColumns(null, "schema1", "table1", null)).thenReturn(resultSet);
Mockito.when(queryFactory.createVerticaExportQueryBuilder()).thenReturn(new VerticaExportQueryBuilder(new ST("templateVerticaExportQuery")));
Mockito.when(verticaMetadataHandlerMocked.getS3ExportBucket()).thenReturn("testS3Bucket");
try {
req = new GetTableLayoutRequest(this.federatedIdentity, "queryId", "default", new TableName("schema1", "table1"), new Constraints(constraintsMap), tableSchema, partitionCols);
res = verticaMetadataHandlerMocked.doGetTableLayout(allocator, req);
Block partitions = res.getPartitions();
String actualQueryID = partitions.getFieldReader("queryId").readText().toString();
String expectedExportSql = "EXPORT TO PARQUET(directory = 's3://testS3Bucket/" + actualQueryID + "', Compression='snappy', fileSizeMB=16, rowGroupSizeMB=16) " + "AS SELECT day,month,year,preparedStmt,queryId,awsRegionSql " + "FROM \"schema1\".\"table1\" " + "WHERE ((\"day\" > 0 )) AND ((\"month\" > 0 )) AND ((\"year\" > 2000 ))";
Assert.assertEquals(expectedExportSql, partitions.getFieldReader("preparedStmt").readText().toString());
for (int row = 0; row < partitions.getRowCount() && row < 1; row++) {
logger.info("doGetTableLayout:{} {}", row, BlockUtils.rowToString(partitions, row));
}
assertTrue(partitions.getRowCount() > 0);
logger.info("doGetTableLayout: partitions[{}]", partitions.getRowCount());
} finally {
try {
req.close();
res.close();
} catch (Exception ex) {
logger.error("doGetTableLayout: ", ex);
}
}
logger.info("doGetTableLayout - exit");
}
use of com.amazonaws.athena.connector.lambda.domain.predicate.Constraints in project aws-athena-query-federation by awslabs.
the class AwsCmdbMetadataHandlerTest method doGetSplits.
@Test
public void doGetSplits() {
GetSplitsRequest request = new GetSplitsRequest(identity, queryId, catalog, new TableName("schema1", "table1"), mockBlock, Collections.emptyList(), new Constraints(new HashMap<>()), null);
GetSplitsResponse response = handler.doGetSplits(blockAllocator, request);
assertNotNull(response);
}
use of com.amazonaws.athena.connector.lambda.domain.predicate.Constraints in project aws-athena-query-federation by awslabs.
the class AwsCmdbRecordHandlerTest method readWithConstraint.
@Test
public void readWithConstraint() {
ReadRecordsRequest request = new ReadRecordsRequest(identity, "catalog", "queryId", new TableName("schema", "table"), SchemaBuilder.newBuilder().build(), Split.newBuilder(S3SpillLocation.newBuilder().withBucket(bucket).withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build(), keyFactory.create()).build(), new Constraints(Collections.EMPTY_MAP), 100_000, 100_000);
handler.readWithConstraint(mockBlockSpiller, request, queryStatusChecker);
verify(mockTableProvider, times(1)).readWithConstraint(any(BlockSpiller.class), eq(request), eq(queryStatusChecker));
}
use of com.amazonaws.athena.connector.lambda.domain.predicate.Constraints in project aws-athena-query-federation by awslabs.
the class AbstractTableProviderTest method readTableTest.
@Test
public void readTableTest() {
GetTableRequest request = new GetTableRequest(identity, expectedQuery, expectedCatalog, expectedTableName);
GetTableResponse response = provider.getTable(allocator, request);
assertTrue(response.getSchema().getFields().size() > 1);
Map<String, ValueSet> constraintsMap = new HashMap<>();
constraintsMap.put(idField, EquatableValueSet.newBuilder(allocator, Types.MinorType.VARCHAR.getType(), true, false).add(idValue).build());
Constraints constraints = new Constraints(constraintsMap);
ConstraintEvaluator evaluator = new ConstraintEvaluator(allocator, response.getSchema(), constraints);
S3SpillLocation spillLocation = S3SpillLocation.newBuilder().withBucket("bucket").withPrefix("prefix").withSplitId(UUID.randomUUID().toString()).withQueryId(UUID.randomUUID().toString()).withIsDirectory(true).build();
ReadRecordsRequest readRequest = new ReadRecordsRequest(identity, expectedCatalog, "queryId", expectedTableName, response.getSchema(), Split.newBuilder(spillLocation, keyFactory.create()).build(), constraints, 100_000_000, 100_000_000);
SpillConfig spillConfig = SpillConfig.newBuilder().withSpillLocation(spillLocation).withMaxBlockBytes(3_000_000).withMaxInlineBlockBytes(0).withRequestId("queryid").withEncryptionKey(keyFactory.create()).build();
setUpRead();
BlockSpiller spiller = new S3BlockSpiller(amazonS3, spillConfig, allocator, response.getSchema(), evaluator);
provider.readWithConstraint(spiller, readRequest, queryStatusChecker);
validateRead(response.getSchema(), blockSpillReader, spiller.getSpillLocations(), spillConfig.getEncryptionKey());
}
Aggregations