use of com.facebook.presto.hive.HdfsContext in project presto by prestodb.
the class RaptorPageSinkProvider method createPageSink.
@Override
public ConnectorPageSink createPageSink(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorOutputTableHandle tableHandle, PageSinkContext pageSinkContext) {
checkArgument(!pageSinkContext.isCommitRequired(), "Raptor connector does not support page sink commit");
RaptorOutputTableHandle handle = (RaptorOutputTableHandle) tableHandle;
return new RaptorPageSink(new HdfsContext(session, handle.getSchemaName(), handle.getTableName()), pageSorter, storageManager, temporalFunction, handle.getTransactionId(), toColumnIds(handle.getColumnHandles()), handle.getColumnTypes(), toColumnIds(handle.getSortColumnHandles()), handle.getSortOrders(), handle.getBucketCount(), toColumnIds(handle.getBucketColumnHandles()), handle.getTemporalColumnHandle(), getWriterMaxBufferSize(session), maxAllowedFilesPerWriter);
}
use of com.facebook.presto.hive.HdfsContext in project presto by prestodb.
the class RaptorPageSourceProvider method createPageSource.
@Override
public ConnectorPageSource createPageSource(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorSplit split, List<ColumnHandle> columns, SplitContext splitContext) {
RaptorSplit raptorSplit = (RaptorSplit) split;
OptionalInt bucketNumber = raptorSplit.getBucketNumber();
TupleDomain<RaptorColumnHandle> predicate = raptorSplit.getEffectivePredicate();
ReaderAttributes attributes = ReaderAttributes.from(session);
OptionalLong transactionId = raptorSplit.getTransactionId();
Optional<Map<String, Type>> columnTypes = raptorSplit.getColumnTypes();
boolean tableSupportsDeltaDelete = raptorSplit.isTableSupportsDeltaDelete();
HdfsContext context = new HdfsContext(session);
Map<UUID, UUID> shardDeltaMap = raptorSplit.getShardDeltaMap();
if (raptorSplit.getShardUuids().size() == 1) {
UUID shardUuid = raptorSplit.getShardUuids().iterator().next();
return createPageSource(context, DEFAULT_HIVE_FILE_CONTEXT, shardUuid, Optional.ofNullable(shardDeltaMap.get(shardUuid)), tableSupportsDeltaDelete, bucketNumber, columns, predicate, attributes, transactionId, columnTypes);
}
Iterator<ConnectorPageSource> iterator = raptorSplit.getShardUuids().stream().map(shardUuid -> createPageSource(context, DEFAULT_HIVE_FILE_CONTEXT, shardUuid, Optional.ofNullable(shardDeltaMap.get(shardUuid)), tableSupportsDeltaDelete, bucketNumber, columns, predicate, attributes, transactionId, columnTypes)).iterator();
return new ConcatPageSource(iterator);
}
use of com.facebook.presto.hive.HdfsContext in project presto by prestodb.
the class SemiTransactionalHiveMetastore method addPartition.
public synchronized void addPartition(ConnectorSession session, String databaseName, String tableName, String tablePath, boolean isNewTable, Partition partition, Path currentLocation, PartitionStatistics statistics) {
setShared();
checkArgument(getPrestoQueryId(partition).isPresent());
Map<List<String>, Action<PartitionAndMore>> partitionActionsOfTable = partitionActions.computeIfAbsent(new SchemaTableName(databaseName, tableName), k -> new HashMap<>());
Action<PartitionAndMore> oldPartitionAction = partitionActionsOfTable.get(partition.getValues());
HdfsContext context = new HdfsContext(session, databaseName, tableName, tablePath, isNewTable);
if (oldPartitionAction == null) {
partitionActionsOfTable.put(partition.getValues(), new Action<>(ActionType.ADD, new PartitionAndMore(partition, currentLocation, Optional.empty(), statistics, statistics), context));
return;
}
switch(oldPartitionAction.getType()) {
case DROP:
{
if (!oldPartitionAction.getContext().getIdentity().getUser().equals(session.getUser())) {
throw new PrestoException(TRANSACTION_CONFLICT, "Operation on the same partition with different user in the same transaction is not supported");
}
partitionActionsOfTable.put(partition.getValues(), new Action<>(ActionType.ALTER, new PartitionAndMore(partition, currentLocation, Optional.empty(), statistics, statistics), context));
break;
}
case ADD:
case ALTER:
case INSERT_EXISTING:
throw new PrestoException(ALREADY_EXISTS, format("Partition already exists for table '%s.%s': %s", databaseName, tableName, partition.getValues()));
default:
throw new IllegalStateException("Unknown action type");
}
}
use of com.facebook.presto.hive.HdfsContext in project presto by prestodb.
the class SemiTransactionalHiveMetastore method commitShared.
@GuardedBy("this")
private void commitShared() {
checkHoldsLock();
Committer committer = new Committer();
try {
for (Map.Entry<SchemaTableName, Action<TableAndMore>> entry : tableActions.entrySet()) {
SchemaTableName schemaTableName = entry.getKey();
Action<TableAndMore> action = entry.getValue();
HdfsContext hdfsContext = action.getContext();
MetastoreContext metastoreContext = new MetastoreContext(hdfsContext.getIdentity(), hdfsContext.getQueryId().orElse(""), hdfsContext.getClientInfo(), hdfsContext.getSource(), hdfsContext.getSession().flatMap(MetastoreUtil::getMetastoreHeaders), hdfsContext.getSession().map(MetastoreUtil::isUserDefinedTypeEncodingEnabled).orElse(false), columnConverterProvider);
switch(action.getType()) {
case DROP:
committer.prepareDropTable(metastoreContext, schemaTableName);
break;
case ALTER:
committer.prepareAlterTable();
break;
case ADD:
committer.prepareAddTable(metastoreContext, hdfsContext, action.getData());
break;
case INSERT_EXISTING:
committer.prepareInsertExistingTable(metastoreContext, hdfsContext, action.getData());
break;
default:
throw new IllegalStateException("Unknown action type");
}
}
for (Map.Entry<SchemaTableName, Map<List<String>, Action<PartitionAndMore>>> tableEntry : partitionActions.entrySet()) {
SchemaTableName schemaTableName = tableEntry.getKey();
for (Map.Entry<List<String>, Action<PartitionAndMore>> partitionEntry : tableEntry.getValue().entrySet()) {
List<String> partitionValues = partitionEntry.getKey();
Action<PartitionAndMore> action = partitionEntry.getValue();
HdfsContext hdfsContext = action.getContext();
MetastoreContext metastoreContext = new MetastoreContext(hdfsContext.getIdentity(), hdfsContext.getQueryId().orElse(""), hdfsContext.getClientInfo(), hdfsContext.getSource(), hdfsContext.getSession().flatMap(MetastoreUtil::getMetastoreHeaders), hdfsContext.getSession().map(MetastoreUtil::isUserDefinedTypeEncodingEnabled).orElse(false), columnConverterProvider);
switch(action.getType()) {
case DROP:
committer.prepareDropPartition(metastoreContext, schemaTableName, partitionValues);
break;
case ALTER:
committer.prepareAlterPartition(metastoreContext, hdfsContext, action.getData());
break;
case ADD:
committer.prepareAddPartition(metastoreContext, hdfsContext, action.getData());
break;
case INSERT_EXISTING:
committer.prepareInsertExistingPartition(metastoreContext, hdfsContext, action.getData());
break;
default:
throw new IllegalStateException("Unknown action type");
}
}
}
// Wait for all renames submitted for "INSERT_EXISTING" action to finish
ListenableFuture<?> listenableFutureAggregate = whenAllSucceed(committer.getFileRenameFutures()).call(() -> null, directExecutor());
try {
getFutureValue(listenableFutureAggregate, PrestoException.class);
} catch (RuntimeException e) {
listenableFutureAggregate.cancel(true);
throw e;
}
// At this point, all file system operations, whether asynchronously issued or not, have completed successfully.
// We are moving on to metastore operations now.
committer.executeAddTableOperations();
committer.executeAlterPartitionOperations();
committer.executeAddPartitionOperations();
committer.executeUpdateStatisticsOperations();
} catch (Throwable t) {
committer.cancelUnstartedAsyncRenames();
committer.undoUpdateStatisticsOperations();
committer.undoAddPartitionOperations();
committer.undoAddTableOperations();
committer.waitForAsyncRenamesSuppressThrowables();
// fileRenameFutures must all come back before any file system cleanups are carried out.
// Otherwise, files that should be deleted may be created after cleanup is done.
committer.executeCleanupTasksForAbort(declaredIntentionsToWrite);
committer.executeRenameTasksForAbort();
// Partition directory must be put back before relevant metastore operation can be undone
committer.undoAlterPartitionOperations();
rollbackShared();
throw t;
}
try {
if (!committer.metastoreDeleteOperations.isEmpty()) {
committer.executeMetastoreDeleteOperations();
}
// If control flow reached this point, this commit is considered successful no matter
// what happens later. The only kind of operations that haven't been carried out yet
// are cleanups.
// The program control flow will go to finally next. And cleanup will run because
// moveForwardInFinally has been set to false.
} finally {
// In this method, all operations are best-effort clean up operations.
// If any operation fails, the error will be logged and ignored.
// Additionally, other clean up operations should still be attempted.
// Execute deletion tasks
committer.executeDeletionTasksForFinish();
// Clean up temporary tables
deleteTemporaryTableDirectories(declaredIntentionsToWrite, hdfsEnvironment);
// Clean up empty staging directories (that may recursively contain empty directories)
committer.deleteEmptyStagingDirectories(declaredIntentionsToWrite);
// Clean up root temp directories
deleteTempPathRootDirectory(declaredIntentionsToWrite, hdfsEnvironment);
}
}
use of com.facebook.presto.hive.HdfsContext in project presto by prestodb.
the class IcebergPageSinkProvider method createPageSink.
private ConnectorPageSink createPageSink(ConnectorSession session, IcebergWritableTableHandle tableHandle) {
HdfsContext hdfsContext = new HdfsContext(session, tableHandle.getSchemaName(), tableHandle.getTableName());
Schema schema = SchemaParser.fromJson(tableHandle.getSchemaAsJson());
PartitionSpec partitionSpec = PartitionSpecParser.fromJson(schema, tableHandle.getPartitionSpecAsJson());
LocationProvider locationProvider = getLocationProvider(new SchemaTableName(tableHandle.getSchemaName(), tableHandle.getTableName()), tableHandle.getOutputPath(), tableHandle.getStorageProperties());
return new IcebergPageSink(schema, partitionSpec, locationProvider, fileWriterFactory, pageIndexerFactory, hdfsEnvironment, hdfsContext, tableHandle.getInputColumns(), jsonCodec, session, tableHandle.getFileFormat());
}
Aggregations