use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class TestHiveGlueMetastore method doGetPartitionsFilterTest.
/**
* @param filterList should be same sized list as expectedValuesList
* @param expectedValuesList
* @throws Exception
*/
private void doGetPartitionsFilterTest(List<ColumnMetadata> columnMetadata, List<String> partitionColumnNames, List<PartitionValues> partitionValues, List<TupleDomain<String>> filterList, List<List<PartitionValues>> expectedValuesList) throws Exception {
try (CloseableSchamaTableName closeableTableName = new CloseableSchamaTableName(temporaryTable(("get_partitions")))) {
SchemaTableName tableName = closeableTableName.getSchemaTableName();
createDummyPartitionedTable(tableName, columnMetadata, partitionColumnNames, partitionValues);
HiveMetastore metastoreClient = getMetastoreClient();
for (int i = 0; i < filterList.size(); i++) {
TupleDomain<String> filter = filterList.get(i);
List<PartitionValues> expectedValues = expectedValuesList.get(i);
List<String> expectedResults = expectedValues.stream().map(expectedPartitionValues -> makePartName(partitionColumnNames, expectedPartitionValues.getValues())).collect(toImmutableList());
Optional<List<String>> partitionNames = metastoreClient.getPartitionNamesByFilter(tableName.getSchemaName(), tableName.getTableName(), partitionColumnNames, filter);
assertTrue(partitionNames.isPresent());
assertEquals(partitionNames.get(), expectedResults, format("lists \nactual: %s\nexpected: %s\nmismatch for filter %s (input index %d)\n", partitionNames.get(), expectedResults, filter, i));
}
}
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class TestHiveGlueMetastore method testGetPartitions.
@Override
public void testGetPartitions() throws Exception {
try {
SchemaTableName tableName = temporaryTable("get_partitions");
createDummyPartitionedTable(tableName, CREATE_TABLE_COLUMNS_PARTITIONED);
HiveMetastore metastoreClient = getMetastoreClient();
Optional<List<String>> partitionNames = metastoreClient.getPartitionNamesByFilter(tableName.getSchemaName(), tableName.getTableName(), ImmutableList.of("ds"), TupleDomain.all());
assertTrue(partitionNames.isPresent());
assertEquals(partitionNames.get(), ImmutableList.of("ds=2016-01-01", "ds=2016-01-02"));
} finally {
dropTable(tablePartitionFormat);
}
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class TestHiveGlueMetastore method testUpdatePartitionedStatisticsOnCreate.
@Test
public void testUpdatePartitionedStatisticsOnCreate() {
SchemaTableName tableName = temporaryTable("update_partitioned_statistics_create");
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
List<ColumnMetadata> columns = ImmutableList.of(new ColumnMetadata("a_column", BigintType.BIGINT), new ColumnMetadata("part_column", BigintType.BIGINT));
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(TEXTFILE, ImmutableList.of("part_column")));
ConnectorOutputTableHandle createTableHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
// write data
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, createTableHandle);
MaterializedResult data = MaterializedResult.resultBuilder(session, BigintType.BIGINT, BigintType.BIGINT).row(1L, 1L).row(2L, 1L).row(3L, 1L).row(4L, 2L).row(5L, 2L).build();
sink.appendPage(data.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
// prepare statistics
ComputedStatistics statistics1 = ComputedStatistics.builder(ImmutableList.of("part_column"), ImmutableList.of(singleValueBlock(1))).addTableStatistic(TableStatisticType.ROW_COUNT, singleValueBlock(3)).addColumnStatistic(new ColumnStatisticMetadata("a_column", MIN_VALUE), singleValueBlock(1)).addColumnStatistic(new ColumnStatisticMetadata("a_column", MAX_VALUE), singleValueBlock(3)).addColumnStatistic(new ColumnStatisticMetadata("a_column", NUMBER_OF_DISTINCT_VALUES), singleValueBlock(3)).addColumnStatistic(new ColumnStatisticMetadata("a_column", NUMBER_OF_NON_NULL_VALUES), singleValueBlock(3)).build();
ComputedStatistics statistics2 = ComputedStatistics.builder(ImmutableList.of("part_column"), ImmutableList.of(singleValueBlock(2))).addTableStatistic(TableStatisticType.ROW_COUNT, singleValueBlock(2)).addColumnStatistic(new ColumnStatisticMetadata("a_column", MIN_VALUE), singleValueBlock(4)).addColumnStatistic(new ColumnStatisticMetadata("a_column", MAX_VALUE), singleValueBlock(5)).addColumnStatistic(new ColumnStatisticMetadata("a_column", NUMBER_OF_DISTINCT_VALUES), singleValueBlock(2)).addColumnStatistic(new ColumnStatisticMetadata("a_column", NUMBER_OF_NON_NULL_VALUES), singleValueBlock(2)).build();
// finish CTAS
metadata.finishCreateTable(session, createTableHandle, fragments, ImmutableList.of(statistics1, statistics2));
transaction.commit();
} finally {
dropTable(tableName);
}
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class TestHiveGlueMetastore method createDummyPartitionedTable.
private void createDummyPartitionedTable(SchemaTableName tableName, List<ColumnMetadata> columns, List<String> partitionColumnNames, List<PartitionValues> partitionValues) throws Exception {
doCreateEmptyTable(tableName, ORC, columns, partitionColumnNames);
HiveMetastoreClosure metastoreClient = new HiveMetastoreClosure(getMetastoreClient());
Table table = metastoreClient.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
List<PartitionWithStatistics> partitions = new ArrayList<>();
List<String> partitionNames = new ArrayList<>();
partitionValues.stream().map(partitionValue -> makePartName(partitionColumnNames, partitionValue.values)).forEach(partitionName -> {
partitions.add(new PartitionWithStatistics(createDummyPartition(table, partitionName), partitionName, PartitionStatistics.empty()));
partitionNames.add(partitionName);
});
metastoreClient.addPartitions(tableName.getSchemaName(), tableName.getTableName(), partitions);
partitionNames.forEach(partitionName -> metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), partitionName, currentStatistics -> EMPTY_TABLE_STATISTICS));
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class TestHiveGlueMetastore method testInvalidColumnStatisticsMetadata.
@Test
public void testInvalidColumnStatisticsMetadata() throws Exception {
SchemaTableName tableName = temporaryTable("test_statistics_invalid_column_metadata");
try {
List<ColumnMetadata> columns = List.of(new ColumnMetadata("column1", BIGINT));
Map<String, HiveColumnStatistics> columnStatistics = Map.of("column1", INTEGER_COLUMN_STATISTICS);
PartitionStatistics partitionStatistics = PartitionStatistics.builder().setBasicStatistics(HIVE_BASIC_STATISTICS).setColumnStatistics(columnStatistics).build();
doCreateEmptyTable(tableName, ORC, columns);
// set table statistics for column1
metastore.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), NO_ACID_TRANSACTION, actualStatistics -> {
assertThat(actualStatistics).isEqualTo(EMPTY_TABLE_STATISTICS);
return partitionStatistics;
});
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).get();
TableInput tableInput = GlueInputConverter.convertTable(table);
tableInput.setParameters(ImmutableMap.<String, String>builder().putAll(tableInput.getParameters()).put("column_stats_bad_data", "bad data").buildOrThrow());
getGlueClient().updateTable(new UpdateTableRequest().withDatabaseName(tableName.getSchemaName()).withTableInput(tableInput));
assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())).isEqualTo(partitionStatistics);
} finally {
dropTable(tableName);
}
}
Aggregations