use of io.trino.spi.connector.TableNotFoundException in project trino by trinodb.
the class SystemSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
SystemTableHandle table = (SystemTableHandle) tableHandle;
TupleDomain<ColumnHandle> constraint = table.getConstraint();
SystemTable systemTable = tables.getSystemTable(session, table.getSchemaTableName()).orElseThrow(() -> new TableNotFoundException(table.getSchemaTableName()));
Distribution tableDistributionMode = systemTable.getDistribution();
if (tableDistributionMode == SINGLE_COORDINATOR) {
HostAddress address = nodeManager.getCurrentNode().getHostAndPort();
ConnectorSplit split = new SystemSplit(address, constraint);
return new FixedSplitSource(ImmutableList.of(split));
}
ImmutableList.Builder<ConnectorSplit> splits = ImmutableList.builder();
ImmutableSet.Builder<InternalNode> nodes = ImmutableSet.builder();
if (tableDistributionMode == ALL_COORDINATORS) {
nodes.addAll(nodeManager.getCoordinators());
} else if (tableDistributionMode == ALL_NODES) {
nodes.addAll(nodeManager.getNodes(ACTIVE));
}
Set<InternalNode> nodeSet = nodes.build();
for (InternalNode node : nodeSet) {
splits.add(new SystemSplit(node.getHostAndPort(), constraint));
}
return new FixedSplitSource(splits.build());
}
use of io.trino.spi.connector.TableNotFoundException in project trino by trinodb.
the class ExampleSplitManager method getSplits.
@Override
public ConnectorSplitSource getSplits(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorTableHandle connectorTableHandle, SplitSchedulingStrategy splitSchedulingStrategy, DynamicFilter dynamicFilter) {
ExampleTableHandle tableHandle = (ExampleTableHandle) connectorTableHandle;
ExampleTable table = exampleClient.getTable(tableHandle.getSchemaName(), tableHandle.getTableName());
// this can happen if table is removed during a query
if (table == null) {
throw new TableNotFoundException(tableHandle.toSchemaTableName());
}
List<ConnectorSplit> splits = new ArrayList<>();
for (URI uri : table.getSources()) {
splits.add(new ExampleSplit(uri.toString()));
}
Collections.shuffle(splits);
return new FixedSplitSource(splits);
}
use of io.trino.spi.connector.TableNotFoundException in project trino by trinodb.
the class HiveMetadata method finishOptimize.
private void finishOptimize(ConnectorSession session, ConnectorTableExecuteHandle tableExecuteHandle, Collection<Slice> fragments, List<Object> splitSourceInfo) {
// TODO lots of that is copied from finishInsert; rafactoring opportunity
HiveTableExecuteHandle handle = (HiveTableExecuteHandle) tableExecuteHandle;
checkArgument(handle.getWriteDeclarationId().isPresent(), "no write declaration id present in tableExecuteHandle");
List<PartitionUpdate> partitionUpdates = fragments.stream().map(Slice::getBytes).map(partitionUpdateCodec::fromJson).collect(toImmutableList());
HiveStorageFormat tableStorageFormat = handle.getTableStorageFormat();
partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates);
Table table = metastore.getTable(handle.getSchemaName(), handle.getTableName()).orElseThrow(() -> new TableNotFoundException(handle.getSchemaTableName()));
if (!table.getStorage().getStorageFormat().getInputFormat().equals(tableStorageFormat.getInputFormat()) && isRespectTableFormat(session)) {
throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Table format changed during optimize");
}
// Support for bucketed tables disabled mostly so we do not need to think about grouped execution in an initial version. Possibly no change apart from testing required.
verify(handle.getBucketProperty().isEmpty(), "bucketed table not supported");
for (PartitionUpdate partitionUpdate : partitionUpdates) {
// sanity check
verify(partitionUpdate.getUpdateMode() == APPEND, "Expected partionUpdate mode to be APPEND but got %s", partitionUpdate.getUpdateMode());
if (partitionUpdate.getName().isEmpty()) {
// operating on an unpartitioned table
if (!table.getStorage().getStorageFormat().getInputFormat().equals(handle.getPartitionStorageFormat().getInputFormat()) && isRespectTableFormat(session)) {
throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Table format changed during optimize");
}
metastore.finishInsertIntoExistingTable(session, handle.getSchemaName(), handle.getTableName(), partitionUpdate.getWritePath(), partitionUpdate.getFileNames(), PartitionStatistics.empty(), handle.isRetriesEnabled());
} else {
// operating on a partition
List<String> partitionValues = toPartitionValues(partitionUpdate.getName());
metastore.finishInsertIntoExistingPartition(session, handle.getSchemaName(), handle.getTableName(), partitionValues, partitionUpdate.getWritePath(), partitionUpdate.getFileNames(), PartitionStatistics.empty(), handle.isRetriesEnabled());
}
}
// get filesystem
FileSystem fs;
try {
fs = hdfsEnvironment.getFileSystem(new HdfsContext(session), new Path(table.getStorage().getLocation()));
} catch (IOException e) {
throw new TrinoException(HIVE_FILESYSTEM_ERROR, e);
}
// path to be deleted
Set<Path> scannedPaths = splitSourceInfo.stream().map(file -> new Path((String) file)).collect(toImmutableSet());
// track remaining files to be delted for error reporting
Set<Path> remainingFilesToDelete = new HashSet<>(scannedPaths);
// delete loop
boolean someDeleted = false;
Optional<Path> firstScannedPath = Optional.empty();
try {
for (Path scannedPath : scannedPaths) {
if (firstScannedPath.isEmpty()) {
firstScannedPath = Optional.of(scannedPath);
}
retry().run("delete " + scannedPath, () -> fs.delete(scannedPath, false));
someDeleted = true;
remainingFilesToDelete.remove(scannedPath);
}
} catch (Exception e) {
if (!someDeleted && (firstScannedPath.isEmpty() || exists(fs, firstScannedPath.get()))) {
// fs.delete above could throw exception but file was actually deleted.
throw new TrinoException(HIVE_FILESYSTEM_ERROR, "Error while deleting original files", e);
}
// If we already deleted some original files we disable rollback routine so written files are not deleted.
// The reported exception message and log entry lists files which need to be cleaned up by user manually.
// Until table is cleaned up there will duplicate rows present.
metastore.dropDeclaredIntentionToWrite(handle.getWriteDeclarationId().get());
String errorMessage = "Error while deleting data files in FINISH phase of OPTIMIZE for table " + table.getTableName() + "; remaining files need to be deleted manually: " + remainingFilesToDelete;
log.error(e, "%s", errorMessage);
throw new TrinoException(HIVE_FILESYSTEM_ERROR, errorMessage, e);
}
}
use of io.trino.spi.connector.TableNotFoundException in project trino by trinodb.
the class HiveMetadata method beginDelete.
@Override
public ConnectorTableHandle beginDelete(ConnectorSession session, ConnectorTableHandle tableHandle, RetryMode retryMode) {
HiveTableHandle handle = (HiveTableHandle) tableHandle;
SchemaTableName tableName = handle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
ensureTableSupportsDelete(table);
if (retryMode != NO_RETRIES) {
throw new TrinoException(NOT_SUPPORTED, "Deleting from Hive tables is not supported with query retries enabled");
}
if (!autoCommit) {
throw new TrinoException(NOT_SUPPORTED, "Deleting from Hive transactional tables is not supported in explicit transactions (use autocommit mode)");
}
if (isSparkBucketedTable(table)) {
throw new TrinoException(NOT_SUPPORTED, "Deleting from Spark bucketed tables is not supported");
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
AcidTransaction transaction = metastore.beginDelete(session, table);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.declareIntentionToWrite(session, writeInfo.getWriteMode(), writeInfo.getWritePath(), handle.getSchemaTableName());
return handle.withTransaction(transaction);
}
use of io.trino.spi.connector.TableNotFoundException in project trino by trinodb.
the class HiveMetadata method finishDelete.
@Override
public void finishDelete(ConnectorSession session, ConnectorTableHandle tableHandle, Collection<Slice> fragments) {
HiveTableHandle handle = (HiveTableHandle) tableHandle;
checkArgument(handle.isAcidDelete(), "handle should be a delete handle, but is %s", handle);
requireNonNull(fragments, "fragments is null");
SchemaTableName tableName = handle.getSchemaTableName();
Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).orElseThrow(() -> new TableNotFoundException(tableName));
ensureTableSupportsDelete(table);
List<PartitionAndStatementId> partitionAndStatementIds = fragments.stream().map(Slice::getBytes).map(PartitionAndStatementId.CODEC::fromJson).collect(toImmutableList());
HdfsContext context = new HdfsContext(session);
for (PartitionAndStatementId ps : partitionAndStatementIds) {
createOrcAcidVersionFile(context, new Path(ps.getDeleteDeltaDirectory()));
}
LocationHandle locationHandle = locationService.forExistingTable(metastore, session, table);
WriteInfo writeInfo = locationService.getQueryWriteInfo(locationHandle);
metastore.finishRowLevelDelete(session, table.getDatabaseName(), table.getTableName(), writeInfo.getWritePath(), partitionAndStatementIds);
}
Aggregations