use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class TestRaptorMetadata method testSortOrderProperty.
@Test
public void testSortOrderProperty() {
assertNull(metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS));
ConnectorTableMetadata ordersTable = getOrdersTable(ImmutableMap.of(ORDERING_PROPERTY, ImmutableList.of("orderdate", "custkey")));
metadata.createTable(SESSION, ordersTable, false);
ConnectorTableHandle tableHandle = metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS);
assertInstanceOf(tableHandle, RaptorTableHandle.class);
RaptorTableHandle raptorTableHandle = (RaptorTableHandle) tableHandle;
assertEquals(raptorTableHandle.getTableId(), 1);
long tableId = raptorTableHandle.getTableId();
MetadataDao metadataDao = dbi.onDemand(MetadataDao.class);
// verify sort columns
List<TableColumn> sortColumns = metadataDao.listSortColumns(tableId);
assertTableColumnsEqual(sortColumns, ImmutableList.of(new TableColumn(DEFAULT_TEST_ORDERS, "orderdate", DATE, 4, 3, OptionalInt.empty(), OptionalInt.of(0), false), new TableColumn(DEFAULT_TEST_ORDERS, "custkey", BIGINT, 2, 1, OptionalInt.empty(), OptionalInt.of(1), false)));
// verify temporal column is not set
assertEquals(metadataDao.getTemporalColumnId(tableId), null);
metadata.dropTable(SESSION, tableHandle);
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class TestRaptorMetadata method testTransactionDelete.
@Test
public void testTransactionDelete() {
// creating a table allocates a transaction
long transactionId = 1;
metadata.createTable(SESSION, getOrdersTable(), false);
assertTrue(transactionSuccessful(transactionId));
// start delete
transactionId++;
ConnectorTableHandle tableHandle = metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS);
tableHandle = metadata.beginDelete(SESSION, tableHandle);
// verify transaction is assigned for deletion handle
assertInstanceOf(tableHandle, RaptorTableHandle.class);
RaptorTableHandle raptorTableHandle = (RaptorTableHandle) tableHandle;
assertEquals(raptorTableHandle.getTableId(), 1);
assertEquals(raptorTableHandle.getTransactionId(), OptionalLong.of(transactionId));
// transaction is in progress
assertTrue(transactionExists(transactionId));
assertNull(transactionSuccessful(transactionId));
// rollback delete
metadata.rollback();
assertTrue(transactionExists(transactionId));
assertFalse(transactionSuccessful(transactionId));
// start another delete
transactionId++;
tableHandle = metadata.beginDelete(SESSION, tableHandle);
// transaction is in progress
assertTrue(transactionExists(transactionId));
assertNull(transactionSuccessful(transactionId));
// commit delete
metadata.finishDelete(SESSION, tableHandle, ImmutableList.of());
assertTrue(transactionExists(transactionId));
assertTrue(transactionSuccessful(transactionId));
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class TestThriftProjectionPushdown method testProjectionPushdown.
@Test
public void testProjectionPushdown() {
PushProjectionIntoTableScan pushProjectionIntoTableScan = new PushProjectionIntoTableScan(tester().getPlannerContext(), tester().getTypeAnalyzer(), new ScalarStatsCalculator(tester().getPlannerContext(), tester().getTypeAnalyzer()));
TableHandle inputTableHandle = NATION_TABLE;
String columnName = "orderstatus";
ColumnHandle columnHandle = new ThriftColumnHandle(columnName, VARCHAR, "", false);
ConnectorTableHandle projectedThriftHandle = new ThriftTableHandle(TINY_SCHEMA, "nation", TupleDomain.all(), Optional.of(ImmutableSet.of(columnHandle)));
tester().assertThat(pushProjectionIntoTableScan).on(p -> {
Symbol orderStatusSymbol = p.symbol(columnName, VARCHAR);
return p.project(Assignments.of(p.symbol("expr_2", VARCHAR), orderStatusSymbol.toSymbolReference()), p.tableScan(inputTableHandle, ImmutableList.of(orderStatusSymbol), ImmutableMap.of(orderStatusSymbol, columnHandle)));
}).withSession(SESSION).matches(project(ImmutableMap.of("expr_2", expression(new SymbolReference(columnName))), tableScan(projectedThriftHandle::equals, TupleDomain.all(), ImmutableMap.of(columnName, columnHandle::equals))));
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method testPartitionSchemaMismatch.
@Test(expectedExceptions = TrinoException.class, expectedExceptionsMessageRegExp = ".*The column 't_data' in table '.*\\.trino_test_partition_schema_change' is declared as type 'double', but partition 'ds=2012-12-29' declared column 't_data' as type 'string'.")
public void testPartitionSchemaMismatch() throws Exception {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorTableHandle table = getTableHandle(metadata, tablePartitionSchemaChange);
ConnectorSession session = newSession();
metadata.beginQuery(session);
readTable(transaction, table, ImmutableList.of(dsColumn), session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
}
}
use of io.trino.spi.connector.ConnectorTableHandle in project trino by trinodb.
the class AbstractTestHive method doInsertIntoExistingPartition.
private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception {
// creating the table
doCreateEmptyTable(tableName, storageFormat, CREATE_TABLE_COLUMNS_PARTITIONED);
MaterializedResult.Builder resultBuilder = MaterializedResult.resultBuilder(SESSION, CREATE_TABLE_PARTITIONED_DATA.getTypes());
for (int i = 0; i < 3; i++) {
// insert the data
insertData(tableName, CREATE_TABLE_PARTITIONED_DATA);
try (Transaction transaction = newTransaction()) {
ConnectorSession session = newSession();
ConnectorMetadata metadata = transaction.getMetadata();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
// verify partitions were created
List<String> partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new AssertionError("Table does not exist: " + tableName));
assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream().map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)).collect(toImmutableList()));
// load the new table
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
// verify the data
resultBuilder.rows(CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows());
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.of(storageFormat));
assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows());
// test statistics
for (String partitionName : partitionNames) {
HiveBasicStatistics statistics = getBasicStatisticsForPartition(transaction, tableName, partitionName);
assertEquals(statistics.getRowCount().getAsLong(), i + 1L);
assertEquals(statistics.getFileCount().getAsLong(), i + 1L);
assertGreaterThan(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L);
assertGreaterThan(statistics.getOnDiskDataSizeInBytes().getAsLong(), 0L);
}
}
}
// test rollback
Set<String> existingFiles;
Path stagingPathRoot;
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
existingFiles = listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName());
assertFalse(existingFiles.isEmpty());
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
// "stage" insert data
ConnectorInsertTableHandle insertTableHandle = metadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
stagingPathRoot = getStagingPathRoot(insertTableHandle);
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, insertTableHandle);
sink.appendPage(CREATE_TABLE_PARTITIONED_DATA.toPage());
sink.appendPage(CREATE_TABLE_PARTITIONED_DATA.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of());
// verify all temp files start with the unique prefix
HdfsContext context = new HdfsContext(session);
Set<String> tempFiles = listAllDataFiles(context, getStagingPathRoot(insertTableHandle));
assertTrue(!tempFiles.isEmpty());
for (String filePath : tempFiles) {
assertThat(new Path(filePath).getName()).startsWith(session.getQueryId());
}
// verify statistics are visible from within of the current transaction
List<String> partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new AssertionError("Table does not exist: " + tableName));
for (String partitionName : partitionNames) {
HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName);
assertEquals(partitionStatistics.getRowCount().getAsLong(), 5L);
}
// rollback insert
transaction.rollback();
}
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
metadata.beginQuery(session);
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
// verify the data is unchanged
MaterializedResult result = readTable(transaction, tableHandle, columnHandles, session, TupleDomain.all(), OptionalInt.empty(), Optional.empty());
assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows());
// verify we did not modify the table directory
assertEquals(listAllDataFiles(transaction, tableName.getSchemaName(), tableName.getTableName()), existingFiles);
// verify temp directory is empty
HdfsContext hdfsContext = new HdfsContext(session);
assertTrue(listAllDataFiles(hdfsContext, stagingPathRoot).isEmpty());
// verify statistics have been rolled back
List<String> partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new AssertionError("Table does not exist: " + tableName));
for (String partitionName : partitionNames) {
HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName);
assertEquals(partitionStatistics.getRowCount().getAsLong(), 3L);
}
}
}
Aggregations