use of io.trino.plugin.hive.LocationService.WriteInfo in project trino by trinodb.
the class HiveMetadata method beginOptimize.
private BeginTableExecuteResult<ConnectorTableExecuteHandle, ConnectorTableHandle> beginOptimize(ConnectorSession session, ConnectorTableExecuteHandle tableExecuteHandle, ConnectorTableHandle sourceTableHandle) {
HiveTableExecuteHandle hiveExecuteHandle = (HiveTableExecuteHandle) tableExecuteHandle;
HiveTableHandle hiveSourceTableHandle = (HiveTableHandle) sourceTableHandle;
WriteInfo writeInfo = locationService.getQueryWriteInfo(hiveExecuteHandle.getLocationHandle());
String writeDeclarationId = metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), hiveExecuteHandle.getSchemaTableName());
return new BeginTableExecuteResult<>(hiveExecuteHandle.withWriteDeclarationId(writeDeclarationId), hiveSourceTableHandle.withMaxScannedFileSize(hiveExecuteHandle.getMaxScannedFileSize()).withRecordScannedFiles(true));
}
use of io.trino.plugin.hive.LocationService.WriteInfo in project trino by trinodb.
the class HiveMetadata method beginDelete.
@Override
public ConnectorTableHandle beginDelete(ConnectorSession session, ConnectorTableHandle tableHandle, RetryMode retryMode) {
HiveTableHandle handle = (HiveTableHandle) tableHandle;
SchemaTableName tableName = handle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
ensureTableSupportsDelete(table);
if (retryMode != NO_RETRIES) {
throw new TrinoException(NOT_SUPPORTED, "Deleting from Hive tables is not supported with query retries enabled");
}
if (!autoCommit) {
throw new TrinoException(NOT_SUPPORTED, "Deleting from Hive transactional tables is not supported in explicit transactions (use autocommit mode)");
}
if (isSparkBucketedTable(table)) {
throw new TrinoException(NOT_SUPPORTED, "Deleting from Spark bucketed tables is not supported");
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
AcidTransaction transaction = metastore.beginDelete(session, table);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), handle.getSchemaTableName());
return handle.withTransaction(transaction);
}
use of io.trino.plugin.hive.LocationService.WriteInfo in project trino by trinodb.
the class HiveMetadata method finishDelete.
@Override
public void finishDelete(ConnectorSession session, ConnectorTableHandle tableHandle, Collection<Slice> fragments) {
HiveTableHandle handle = (HiveTableHandle) tableHandle;
checkArgument(handle.isAcidDelete(), "handle should be a delete handle, but is %s", handle);
requireNonNull(fragments, "fragments is null");
SchemaTableName tableName = handle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
ensureTableSupportsDelete(table);
List<PartitionAndStatementId> partitionAndStatementIds = fragments.stream().map(Slice::getBytes).map(PartitionAndStatementId.CODEC::fromJson).collect(toImmutableList());
HdfsContext context = new HdfsContext(session);
for (PartitionAndStatementId ps : partitionAndStatementIds) {
createOrcAcidVersionFile(context, new Path(ps.getDeleteDeltaDirectory()));
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.finishRowLevelDelete(session, table.getDatabaseName(), table.getTableName(), writeInfo.getWritePath(), partitionAndStatementIds);
}
use of io.trino.plugin.hive.LocationService.WriteInfo in project trino by trinodb.
the class HiveMetadata method beginInsert.
@Override
public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle, List<ColumnHandle> columns, RetryMode retryMode) {
SchemaTableName tableName = ((HiveTableHandle) tableHandle).getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
checkTableIsWritable(table, writesToNonManagedTablesEnabled);
for (Column column : table.getDataColumns()) {
if (!isWritableType(column.getType())) {
throw new TrinoException(NOT_SUPPORTED, format("Inserting into Hive table %s with column type %s not supported", tableName, column.getType()));
}
}
boolean isTransactional = isTransactionalTable(table.getParameters());
if (isTransactional && retryMode != NO_RETRIES) {
throw new TrinoException(NOT_SUPPORTED, "Inserting into Hive transactional tables is not supported with query retries enabled");
}
if (isTransactional && !autoCommit) {
throw new TrinoException(NOT_SUPPORTED, "Inserting into Hive transactional tables is not supported in explicit transactions (use autocommit mode)");
}
if (isSparkBucketedTable(table)) {
throw new TrinoException(NOT_SUPPORTED, "Inserting into Spark bucketed tables is not supported");
}
List<HiveColumnHandle> handles = hiveColumnHandles(table, typeManager, getTimestampPrecision(session)).stream().filter(columnHandle -> !columnHandle.isHidden()).collect(toImmutableList());
HiveStorageFormat tableStorageFormat = extractHiveStorageFormat(table);
Optional.ofNullable(table.getParameters().get(SKIP_HEADER_COUNT_KEY)).map(Integer::parseInt).ifPresent(headerSkipCount -> {
if (headerSkipCount > 1) {
throw new TrinoException(NOT_SUPPORTED, format("Inserting into Hive table with value of %s property greater than 1 is not supported", SKIP_HEADER_COUNT_KEY));
}
});
if (table.getParameters().containsKey(SKIP_FOOTER_COUNT_KEY)) {
throw new TrinoException(NOT_SUPPORTED, format("Inserting into Hive table with %s property not supported", SKIP_FOOTER_COUNT_KEY));
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
AcidTransaction transaction = isTransactional ? metastore.beginInsert(session, table) : NO_ACID_TRANSACTION;
HiveInsertTableHandle result = new HiveInsertTableHandle(tableName.getSchemaName(), tableName.getTableName(), handles, metastore.generatePageSinkMetadata(tableName), locationHandle, table.getStorage().getBucketProperty(), tableStorageFormat, isRespectTableFormat(session) ? tableStorageFormat : getHiveStorageFormat(session), transaction, retryMode != NO_RETRIES);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
if (getInsertExistingPartitionsBehavior(session) == InsertExistingPartitionsBehavior.OVERWRITE && writeInfo.getWriteMode() == DIRECT_TO_TARGET_EXISTING_DIRECTORY) {
if (isTransactional) {
throw new TrinoException(NOT_SUPPORTED, "Overwriting existing partition in transactional tables doesn't support DIRECT_TO_TARGET_EXISTING_DIRECTORY write mode");
}
// Partition overwrite operation is nonatomic thus can't and shouldn't be used in non autocommit context.
if (!autoCommit) {
throw new TrinoException(NOT_SUPPORTED, "Overwriting existing partition in non auto commit context doesn't support DIRECT_TO_TARGET_EXISTING_DIRECTORY write mode");
}
}
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), tableName);
return result;
}
use of io.trino.plugin.hive.LocationService.WriteInfo in project trino by trinodb.
the class HiveMetadata method finishUpdate.
@Override
public void finishUpdate(ConnectorSession session, ConnectorTableHandle tableHandle, Collection<Slice> fragments) {
HiveTableHandle handle = (HiveTableHandle) tableHandle;
checkArgument(handle.isAcidUpdate(), "handle should be a update handle, but is %s", handle);
requireNonNull(fragments, "fragments is null");
SchemaTableName tableName = handle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
List<PartitionAndStatementId> partitionAndStatementIds = fragments.stream().map(Slice::getBytes).map(PartitionAndStatementId.CODEC::fromJson).collect(toImmutableList());
HdfsContext context = new HdfsContext(session);
for (PartitionAndStatementId ps : partitionAndStatementIds) {
createOrcAcidVersionFile(context, new Path(ps.getDeleteDeltaDirectory()));
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.finishUpdate(session, table.getDatabaseName(), table.getTableName(), writeInfo.getWritePath(), partitionAndStatementIds);
}
Aggregations