use of com.amazonaws.athena.connector.lambda.metadata.GetTableRequest in project aws-athena-query-federation by awslabs.
the class DynamoDBRecordHandlerTest method testDateTimeSupportFromGlueTable.
@Test
public void testDateTimeSupportFromGlueTable() throws Exception {
TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
List<Column> columns = new ArrayList<>();
columns.add(new Column().withName("col0").withType("string"));
columns.add(new Column().withName("col1").withType("timestamp"));
columns.add(new Column().withName("col2").withType("timestamp"));
columns.add(new Column().withName("col3").withType("date"));
columns.add(new Column().withName("col4").withType("date"));
columns.add(new Column().withName("col5").withType("timestamptz"));
columns.add(new Column().withName("col6").withType("timestamptz"));
columns.add(new Column().withName("col7").withType("timestamptz"));
Map<String, String> param = ImmutableMap.of(SOURCE_TABLE_PROPERTY, TEST_TABLE3, COLUMN_NAME_MAPPING_PROPERTY, "col1=Col1 , col2=Col2 ,col3=Col3, col4=Col4,col5=Col5,col6=Col6,col7=Col7", DATETIME_FORMAT_MAPPING_PROPERTY, "col1=yyyyMMdd'S'HHmmss,col3=dd/MM/yyyy ");
Table table = new Table().withParameters(param).withPartitionKeys().withStorageDescriptor(new StorageDescriptor().withColumns(columns));
GetTableResult mockResult = new GetTableResult().withTable(table);
when(glueClient.getTable(any())).thenReturn(mockResult);
TableName tableName = new TableName(DEFAULT_SCHEMA, TEST_TABLE3);
GetTableRequest getTableRequest = new GetTableRequest(TEST_IDENTITY, TEST_QUERY_ID, TEST_CATALOG_NAME, tableName);
GetTableResponse getTableResponse = metadataHandler.doGetTable(allocator, getTableRequest);
logger.info("testDateTimeSupportFromGlueTable: GetTableResponse[{}]", getTableResponse);
logger.info("testDateTimeSupportFromGlueTable: GetTableResponse Schema[{}]", getTableResponse.getSchema());
Schema schema3 = getTableResponse.getSchema();
Split split = Split.newBuilder(SPILL_LOCATION, keyFactory.create()).add(TABLE_METADATA, TEST_TABLE3).add(SEGMENT_ID_PROPERTY, "0").add(SEGMENT_COUNT_METADATA, "1").build();
ReadRecordsRequest request = new ReadRecordsRequest(TEST_IDENTITY, TEST_CATALOG_NAME, TEST_QUERY_ID, TEST_TABLE_3_NAME, schema3, split, new Constraints(ImmutableMap.of()), // too big to spill
100_000_000_000L, 100_000_000_000L);
RecordResponse rawResponse = handler.doReadRecords(allocator, request);
assertTrue(rawResponse instanceof ReadRecordsResponse);
ReadRecordsResponse response = (ReadRecordsResponse) rawResponse;
LocalDate expectedDate = LocalDate.of(2020, 02, 27);
LocalDateTime expectedDateTime = LocalDateTime.of(2020, 2, 27, 9, 12, 27);
assertEquals(1, response.getRecords().getRowCount());
assertEquals(expectedDateTime, response.getRecords().getFieldReader("Col1").readLocalDateTime());
assertEquals(expectedDateTime, response.getRecords().getFieldReader("Col2").readLocalDateTime());
assertEquals(expectedDate, LocalDate.ofEpochDay(response.getRecords().getFieldReader("Col3").readInteger()));
assertEquals(expectedDate, LocalDate.ofEpochDay(response.getRecords().getFieldReader("Col4").readInteger()));
assertEquals(getPackedDateTimeWithZone("2015-12-21T17:42:34-05:00"), response.getRecords().getFieldReader("Col5").readLong().longValue());
assertEquals(getPackedDateTimeWithZone("2015-12-21T17:42:34Z"), response.getRecords().getFieldReader("Col6").readLong().longValue());
assertEquals(getPackedDateTimeWithZone("2015-12-21T17:42:34Z"), response.getRecords().getFieldReader("Col7").readLong().longValue());
}
Aggregations