use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method testGetDatabaseNames.
@Test
public void testGetDatabaseNames() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
List<String> databases = metadata.listSchemaNames(newSession());
assertTrue(databases.contains(database));
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method testGetRecordsInvalidColumn.
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = ".*" + INVALID_COLUMN + ".*")
public void testGetRecordsInvalidColumn() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata connectorMetadata = transaction.getMetadata();
ConnectorTableHandle table = getTableHandle(connectorMetadata, tableUnpartitioned);
ConnectorSession session = newSession();
connectorMetadata.beginQuery(session);
readTable(transaction, table, ImmutableList.of(invalidColumnHandle), session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method testGetPartitionSplitsBatch.
@Test
public void testGetPartitionSplitsBatch() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tablePartitionFormat);
ConnectorSplitSource splitSource = getSplits(splitManager, transaction, session, tableHandle);
assertEquals(getSplitCount(splitSource), tablePartitionFormatPartitions.size());
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method testGetPartitionSplitsBatchUnpartitioned.
@Test
public void testGetPartitionSplitsBatchUnpartitioned() {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableUnpartitioned);
ConnectorSplitSource splitSource = getSplits(splitManager, transaction, session, tableHandle);
assertEquals(getSplitCount(splitSource), 1);
}
}
use of io.trino.spi.connector.ConnectorMetadata in project trino by trinodb.
the class AbstractTestHive method doInsert.
private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
// creating the table
doCreateEmptyTable(tableName, storageFormat, CREATE_TABLE_COLUMNS);
MaterializedResult.Builder resultBuilder = MaterializedResult.resultBuilder(SESSION, CREATE_TABLE_DATA.getTypes());
for (int i = 0; i < 3; i++) {
insertData(tableName, CREATE_TABLE_DATA);
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
// load the new table
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
// verify the metadata
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), CREATE_TABLE_COLUMNS);
// verify the data
resultBuilder.rows(CREATE_TABLE_DATA.getMaterializedRows());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows());
// statistics
HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName);
assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * (i + 1));
assertEquals(tableStatistics.getFileCount().getAsLong(), i + 1L);
assertGreaterThan(tableStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L);
assertGreaterThan(tableStatistics.getOnDiskDataSizeInBytes().getAsLong(), 0L);
}
}
// test rollback
Set<String> existingFiles;
try (Transaction transaction = newTransaction()) {
existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
assertFalse(existingFiles.isEmpty());
}
Path stagingPathRoot;
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
// "stage" insert data
ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
sink.appendPage(CREATE_TABLE_DATA.toPage());
sink.appendPage(CREATE_TABLE_DATA.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
// statistics, visible from within transaction
HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName);
assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 5L);
try (Transaction otherTransaction = newTransaction()) {
// statistics, not visible from outside transaction
HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(otherTransaction, tableName);
assertEquals(otherTableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L);
}
// verify we did not modify the table directory
assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles);
// verify all temp files start with the unique prefix
stagingPathRoot = getStagingPathRoot(insertTableHandle);
HdfsContext context = new HdfsContext(session);
Set<String> tempFiles = listAllDataFiles(context, stagingPathRoot);
assertTrue(!tempFiles.isEmpty());
for (String filePath : tempFiles) {
assertThat(new Path(filePath).getName()).startsWith(session.getQueryId());
}
// rollback insert
transaction.rollback();
}
// verify temp directory is empty
HdfsContext context = new HdfsContext(newSession());
assertTrue(listAllDataFiles(context, stagingPathRoot).isEmpty());
// verify the data is unchanged
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows());
// verify we did not modify the table directory
assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles);
}
// verify statistics unchanged
try (Transaction transaction = newTransaction()) {
HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName);
assertEquals(statistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L);
assertEquals(statistics.getFileCount().getAsLong(), 3L);
}
}
Aggregations