use of io.trino.plugin.hive.AbstractTestHive.Transaction in project trino by trinodb.
the class AbstractTestHiveFileSystemAbfs method ensureTableExists.
private void ensureTableExists(SchemaTableName table, String tableDirectoryName, Map<String, Object> tableProperties) {
try (Transaction transaction = newTransaction()) {
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(table, ImmutableList.of(new ColumnMetadata("t_bigint", BIGINT)), ImmutableMap.<String, Object>builder().putAll(tableProperties).put(STORAGE_FORMAT_PROPERTY, HiveStorageFormat.TEXTFILE).put(EXTERNAL_LOCATION_PROPERTY, getBasePath().toString() + "/" + tableDirectoryName).put(BUCKET_COUNT_PROPERTY, 0).put(BUCKETED_BY_PROPERTY, ImmutableList.of()).put(SORTED_BY_PROPERTY, ImmutableList.of()).buildOrThrow());
if (!transaction.getMetadata().listTables(newSession(), Optional.of(table.getSchemaName())).contains(table)) {
transaction.getMetadata().createTable(newSession(), tableMetadata, false);
}
transaction.commit();
}
}
use of io.trino.plugin.hive.AbstractTestHive.Transaction in project trino by trinodb.
the class AbstractTestHiveFileSystem method dropTable.
private void dropTable(SchemaTableName table) {
try (Transaction transaction = newTransaction()) {
transaction.getMetastore().dropTable(newSession(), table.getSchemaName(), table.getTableName());
transaction.commit();
}
}
use of io.trino.plugin.hive.AbstractTestHive.Transaction in project trino by trinodb.
the class AbstractTestHiveFileSystem method createTable.
private void createTable(SchemaTableName tableName, HiveStorageFormat storageFormat) throws Exception {
List<ColumnMetadata> columns = ImmutableList.<ColumnMetadata>builder().add(new ColumnMetadata("id", BIGINT)).build();
MaterializedResult data = MaterializedResult.resultBuilder(newSession(), BIGINT).row(1L).row(3L).row(2L).build();
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
// begin creating the table
ConnectorTableMetadata tableMetadata = new ConnectorTableMetadata(tableName, columns, createTableProperties(storageFormat));
ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(session, tableMetadata, Optional.empty(), NO_RETRIES);
// write the records
ConnectorPageSink sink = pageSinkProvider.createPageSink(transaction.getTransactionHandle(), session, outputHandle);
sink.appendPage(data.toPage());
Collection<Slice> fragments = getFutureValue(sink.finish());
// commit the table
metadata.finishCreateTable(session, outputHandle, fragments, ImmutableList.of());
transaction.commit();
// Hack to work around the metastore not being configured for S3 or other FS.
// The metastore tries to validate the location when creating the
// table, which fails without explicit configuration for file system.
// We work around that by using a dummy location when creating the
// table and update it here to the correct location.
metastoreClient.updateTableLocation(database, tableName.getTableName(), locationService.getTableWriteInfo(((HiveOutputTableHandle) outputHandle).getLocationHandle(), false).getTargetPath().toString());
}
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
// load the new table
ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values());
// verify the metadata
ConnectorTableMetadata tableMetadata = metadata.getTableMetadata(session, getTableHandle(metadata, tableName));
assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), columns);
// verify the data
metadata.beginQuery(session);
ConnectorSplitSource splitSource = getSplits(splitManager, transaction, session, tableHandle);
ConnectorSplit split = getOnlyElement(getAllSplits(splitSource));
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, tableHandle, columnHandles, DynamicFilter.EMPTY)) {
MaterializedResult result = materializeSourceDataStream(session, pageSource, getTypes(columnHandles));
assertEqualsIgnoreOrder(result.getMaterializedRows(), data.getMaterializedRows());
}
metadata.cleanupQuery(session);
}
}
use of io.trino.plugin.hive.AbstractTestHive.Transaction in project trino by trinodb.
the class AbstractTestHiveFileSystem method readTable.
protected MaterializedResult readTable(SchemaTableName tableName) throws IOException {
try (Transaction transaction = newTransaction()) {
ConnectorMetadata metadata = transaction.getMetadata();
ConnectorSession session = newSession();
ConnectorTableHandle table = getTableHandle(metadata, tableName);
List<ColumnHandle> columnHandles = ImmutableList.copyOf(metadata.getColumnHandles(session, table).values());
metadata.beginQuery(session);
ConnectorSplitSource splitSource = getSplits(splitManager, transaction, session, table);
List<Type> allTypes = getTypes(columnHandles);
List<Type> dataTypes = getTypes(columnHandles.stream().filter(columnHandle -> !((HiveColumnHandle) columnHandle).isHidden()).collect(toImmutableList()));
MaterializedResult.Builder result = MaterializedResult.resultBuilder(session, dataTypes);
List<ConnectorSplit> splits = getAllSplits(splitSource);
for (ConnectorSplit split : splits) {
try (ConnectorPageSource pageSource = pageSourceProvider.createPageSource(transaction.getTransactionHandle(), session, split, table, columnHandles, DynamicFilter.EMPTY)) {
MaterializedResult pageSourceResult = materializeSourceDataStream(session, pageSource, allTypes);
for (MaterializedRow row : pageSourceResult.getMaterializedRows()) {
Object[] dataValues = IntStream.range(0, row.getFieldCount()).filter(channel -> !((HiveColumnHandle) columnHandles.get(channel)).isHidden()).mapToObj(row::getField).toArray();
result.row(dataValues);
}
}
}
metadata.cleanupQuery(session);
return result.build();
}
}
Aggregations