use of io.trino.spi.connector.BeginTableExecuteResult in project trino by trinodb.
the class MetadataManager method beginTableExecute.
@Override
public BeginTableExecuteResult<TableExecuteHandle, TableHandle> beginTableExecute(Session session, TableExecuteHandle tableExecuteHandle, TableHandle sourceHandle) {
CatalogName catalogName = tableExecuteHandle.getCatalogName();
CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName);
ConnectorMetadata metadata = catalogMetadata.getMetadata(session);
BeginTableExecuteResult<ConnectorTableExecuteHandle, ConnectorTableHandle> connectorBeginResult = metadata.beginTableExecute(session.toConnectorSession(), tableExecuteHandle.getConnectorHandle(), sourceHandle.getConnectorHandle());
return new BeginTableExecuteResult<>(tableExecuteHandle.withConnectorHandle(connectorBeginResult.getTableExecuteHandle()), sourceHandle.withConnectorHandle(connectorBeginResult.getSourceHandle()));
}
use of io.trino.spi.connector.BeginTableExecuteResult in project trino by trinodb.
the class HiveMetadata method beginOptimize.
private BeginTableExecuteResult<ConnectorTableExecuteHandle, ConnectorTableHandle> beginOptimize(ConnectorSession session, ConnectorTableExecuteHandle tableExecuteHandle, ConnectorTableHandle sourceTableHandle) {
HiveTableExecuteHandle hiveExecuteHandle = (HiveTableExecuteHandle) tableExecuteHandle;
HiveTableHandle hiveSourceTableHandle = (HiveTableHandle) sourceTableHandle;
WriteInfo writeInfo = locationService.getQueryWriteInfo(hiveExecuteHandle.getLocationHandle());
String writeDeclarationId = metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), hiveExecuteHandle.getSchemaTableName());
return new BeginTableExecuteResult<>(hiveExecuteHandle.withWriteDeclarationId(writeDeclarationId), hiveSourceTableHandle.withMaxScannedFileSize(hiveExecuteHandle.getMaxScannedFileSize()).withRecordScannedFiles(true));
}
use of io.trino.spi.connector.BeginTableExecuteResult in project trino by trinodb.
the class IcebergMetadata method beginOptimize.
private BeginTableExecuteResult<ConnectorTableExecuteHandle, ConnectorTableHandle> beginOptimize(ConnectorSession session, IcebergTableExecuteHandle executeHandle, IcebergTableHandle table) {
IcebergOptimizeHandle optimizeHandle = (IcebergOptimizeHandle) executeHandle.getProcedureHandle();
Table icebergTable = catalog.loadTable(session, table.getSchemaTableName());
verify(transaction == null, "transaction already set");
transaction = icebergTable.newTransaction();
return new BeginTableExecuteResult<>(executeHandle, table.forOptimize(true, optimizeHandle.getMaxScannedFileSize()));
}
use of io.trino.spi.connector.BeginTableExecuteResult in project trino by trinodb.
the class DeltaLakeMetadata method beginOptimize.
private BeginTableExecuteResult<ConnectorTableExecuteHandle, ConnectorTableHandle> beginOptimize(ConnectorSession session, DeltaLakeTableExecuteHandle executeHandle, DeltaLakeTableHandle table) {
DeltaTableOptimizeHandle optimizeHandle = (DeltaTableOptimizeHandle) executeHandle.getProcedureHandle();
if (!allowWrite(session, table)) {
String fileSystem = new Path(table.getLocation()).toUri().getScheme();
throw new TrinoException(NOT_SUPPORTED, format("Optimize is not supported on the %s filesystem", fileSystem));
}
checkSupportedWriterVersion(session, table.getSchemaTableName());
return new BeginTableExecuteResult<>(executeHandle.withProcedureHandle(optimizeHandle.withCurrentVersion(table.getReadVersion())), table.forOptimize(true, optimizeHandle.getMaxScannedFileSize()));
}
Aggregations