use of io.trino.spi.connector.ConnectorOutputTableHandle in project trino by trinodb.
the class HiveMetadata method finishCreateTable.
@Override
public Optional<ConnectorOutputMetadata> finishCreateTable(ConnectorSession session, ConnectorOutputTableHandle tableHandle, Collection<Slice> fragments, Collection<ComputedStatistics> computedStatistics) {
HiveOutputTableHandle handle = (HiveOutputTableHandle) tableHandle;
List<PartitionUpdate> partitionUpdates = fragments.stream().map(Slice::getBytes).map(partitionUpdateCodec::fromJson).collect(toImmutableList());
WriteInfo writeInfo = locationService.getQueryWriteInfo(handle.getLocationHandle());
Table table = buildTableObject(session.getQueryId(), handle.getSchemaName(), handle.getTableName(), handle.getTableOwner(), handle.getInputColumns(), handle.getTableStorageFormat(), handle.getPartitionedBy(), handle.getBucketProperty(), handle.getAdditionalTableParameters(), Optional.of(writeInfo.getTargetPath()), handle.isExternal(), prestoVersion, accessControlMetadata.isUsingSystemSecurity());
PrincipalPrivileges principalPrivileges = accessControlMetadata.isUsingSystemSecurity() ? NO_PRIVILEGES : buildInitialPrivilegeSet(handle.getTableOwner());
partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates);
if (handle.getBucketProperty().isPresent() && isCreateEmptyBucketFiles(session)) {
List<PartitionUpdate> partitionUpdatesForMissingBuckets = computePartitionUpdatesForMissingBuckets(session, handle, table, true, partitionUpdates);
// replace partitionUpdates before creating the empty files so that those files will be cleaned up if we end up rollback
partitionUpdates = PartitionUpdate.mergePartitionUpdates(concat(partitionUpdates, partitionUpdatesForMissingBuckets));
for (PartitionUpdate partitionUpdate : partitionUpdatesForMissingBuckets) {
Optional<Partition> partition = table.getPartitionColumns().isEmpty() ? Optional.empty() : Optional.of(buildPartitionObject(session, table, partitionUpdate));
createEmptyFiles(session, partitionUpdate.getWritePath(), table, partition, partitionUpdate.getFileNames());
}
if (handle.isTransactional()) {
AcidTransaction transaction = handle.getTransaction();
List<String> partitionNames = partitionUpdates.stream().map(PartitionUpdate::getName).collect(toImmutableList());
metastore.addDynamicPartitions(handle.getSchemaName(), handle.getTableName(), partitionNames, transaction.getAcidTransactionId(), transaction.getWriteId(), AcidOperation.CREATE_TABLE);
}
}
Map<String, Type> columnTypes = handle.getInputColumns().stream().collect(toImmutableMap(HiveColumnHandle::getName, column -> column.getHiveType().getType(typeManager)));
Map<List<String>, ComputedStatistics> partitionComputedStatistics = createComputedStatisticsToPartitionMap(computedStatistics, handle.getPartitionedBy(), columnTypes);
PartitionStatistics tableStatistics;
if (table.getPartitionColumns().isEmpty()) {
HiveBasicStatistics basicStatistics = partitionUpdates.stream().map(PartitionUpdate::getStatistics).reduce((first, second) -> reduce(first, second, ADD)).orElse(createZeroStatistics());
tableStatistics = createPartitionStatistics(basicStatistics, columnTypes, getColumnStatistics(partitionComputedStatistics, ImmutableList.of()));
} else {
tableStatistics = new PartitionStatistics(createEmptyStatistics(), ImmutableMap.of());
}
if (handle.getPartitionedBy().isEmpty()) {
List<String> fileNames;
if (partitionUpdates.isEmpty()) {
// creating empty table via CTAS ... WITH NO DATA
fileNames = ImmutableList.of();
} else {
fileNames = getOnlyElement(partitionUpdates).getFileNames();
}
metastore.createTable(session, table, principalPrivileges, Optional.of(writeInfo.getWritePath()), Optional.of(fileNames), false, tableStatistics, handle.isRetriesEnabled());
} else {
metastore.createTable(session, table, principalPrivileges, Optional.of(writeInfo.getWritePath()), Optional.empty(), false, tableStatistics, false);
}
if (!handle.getPartitionedBy().isEmpty()) {
if (isRespectTableFormat(session)) {
verify(handle.getPartitionStorageFormat() == handle.getTableStorageFormat());
}
for (PartitionUpdate update : partitionUpdates) {
Partition partition = buildPartitionObject(session, table, update);
PartitionStatistics partitionStatistics = createPartitionStatistics(update.getStatistics(), columnTypes, getColumnStatistics(partitionComputedStatistics, partition.getValues()));
metastore.addPartition(session, handle.getSchemaName(), handle.getTableName(), buildPartitionObject(session, table, update), update.getWritePath(), Optional.of(update.getFileNames()), partitionStatistics, handle.isRetriesEnabled());
}
}
return Optional.of(new HiveWrittenPartitions(partitionUpdates.stream().map(PartitionUpdate::getName).collect(toImmutableList())));
}
use of io.trino.spi.connector.ConnectorOutputTableHandle in project trino by trinodb.
the class TestRaptorMetadata method testTransactionTableWrite.
@Test
public void testTransactionTableWrite() {
// start table creation
long transactionId = 1;
ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(SESSION, getOrdersTable(), Optional.empty());
// transaction is in progress
assertTrue(transactionExists(transactionId));
assertNull(transactionSuccessful(transactionId));
// commit table creation
metadata.finishCreateTable(SESSION, outputHandle, ImmutableList.of(), ImmutableList.of());
assertTrue(transactionExists(transactionId));
assertTrue(transactionSuccessful(transactionId));
}
use of io.trino.spi.connector.ConnectorOutputTableHandle in project trino by trinodb.
the class TestRaptorMetadata method testCreateBucketedTableAsSelect.
@Test
public void testCreateBucketedTableAsSelect() {
assertNull(metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS));
ConnectorTableMetadata ordersTable = getOrdersTable(ImmutableMap.of(BUCKET_COUNT_PROPERTY, 32, BUCKETED_ON_PROPERTY, ImmutableList.of("orderkey", "custkey")));
ConnectorTableLayout layout = metadata.getNewTableLayout(SESSION, ordersTable).get();
assertEquals(layout.getPartitionColumns(), ImmutableList.of("orderkey", "custkey"));
assertTrue(layout.getPartitioning().isPresent());
assertInstanceOf(layout.getPartitioning().get(), RaptorPartitioningHandle.class);
RaptorPartitioningHandle partitioning = (RaptorPartitioningHandle) layout.getPartitioning().get();
assertEquals(partitioning.getDistributionId(), 1);
ConnectorOutputTableHandle outputHandle = metadata.beginCreateTable(SESSION, ordersTable, Optional.of(layout));
metadata.finishCreateTable(SESSION, outputHandle, ImmutableList.of(), ImmutableList.of());
ConnectorTableHandle tableHandle = metadata.getTableHandle(SESSION, DEFAULT_TEST_ORDERS);
assertInstanceOf(tableHandle, RaptorTableHandle.class);
RaptorTableHandle raptorTableHandle = (RaptorTableHandle) tableHandle;
assertEquals(raptorTableHandle.getTableId(), 1);
long tableId = raptorTableHandle.getTableId();
MetadataDao metadataDao = dbi.onDemand(MetadataDao.class);
assertTableColumnsEqual(metadataDao.listBucketColumns(tableId), ImmutableList.of(new TableColumn(DEFAULT_TEST_ORDERS, "orderkey", BIGINT, 1, 0, OptionalInt.of(0), OptionalInt.empty(), false), new TableColumn(DEFAULT_TEST_ORDERS, "custkey", BIGINT, 2, 1, OptionalInt.of(1), OptionalInt.empty(), false)));
assertEquals(raptorTableHandle.getBucketCount(), OptionalInt.of(32));
assertEquals(getTableDistributionId(tableId), Long.valueOf(1));
metadata.dropTable(SESSION, tableHandle);
}
use of io.trino.spi.connector.ConnectorOutputTableHandle in project trino by trinodb.
the class TestMemoryMetadata method testReadTableBeforeCreationCompleted.
@Test
public void testReadTableBeforeCreationCompleted() {
assertNoTables();
SchemaTableName tableName = new SchemaTableName("default", "temp_table");
ConnectorOutputTableHandle table = metadata.beginCreateTable(SESSION, new ConnectorTableMetadata(tableName, ImmutableList.of(), ImmutableMap.of()), Optional.empty(), NO_RETRIES);
List<SchemaTableName> tableNames = metadata.listTables(SESSION, Optional.empty());
assertEquals(tableNames.size(), 1, "Expected exactly one table");
metadata.finishCreateTable(SESSION, table, ImmutableList.of(), ImmutableList.of());
}
Aggregations