use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class CreateEmptyPartitionProcedure method doCreateEmptyPartition.
private void doCreateEmptyPartition(ConnectorSession session, ConnectorAccessControl accessControl, String schemaName, String tableName, List<String> partitionColumnNames, List<String> partitionValues) {
TransactionalMetadata hiveMetadata = hiveMetadataFactory.create(session.getIdentity(), true);
HiveTableHandle tableHandle = (HiveTableHandle) hiveMetadata.getTableHandle(session, new SchemaTableName(schemaName, tableName));
if (tableHandle == null) {
throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, format("Table '%s' does not exist", new SchemaTableName(schemaName, tableName)));
}
accessControl.checkCanInsertIntoTable(null, new SchemaTableName(schemaName, tableName));
List<String> actualPartitionColumnNames = tableHandle.getPartitionColumns().stream().map(HiveColumnHandle::getName).collect(toImmutableList());
if (!Objects.equals(partitionColumnNames, actualPartitionColumnNames)) {
throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, "Provided partition column names do not match actual partition column names: " + actualPartitionColumnNames);
}
HiveMetastoreClosure metastore = hiveMetadata.getMetastore().unsafeGetRawHiveMetastoreClosure();
if (metastore.getPartition(schemaName, tableName, partitionValues).isPresent()) {
throw new TrinoException(ALREADY_EXISTS, "Partition already exists");
}
HiveInsertTableHandle hiveInsertTableHandle = (HiveInsertTableHandle) hiveMetadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES);
String partitionName = FileUtils.makePartName(actualPartitionColumnNames, partitionValues);
WriteInfo writeInfo = locationService.getPartitionWriteInfo(hiveInsertTableHandle.getLocationHandle(), Optional.empty(), partitionName);
Slice serializedPartitionUpdate = Slices.wrappedBuffer(partitionUpdateJsonCodec.toJsonBytes(new PartitionUpdate(partitionName, UpdateMode.NEW, writeInfo.getWritePath(), writeInfo.getTargetPath(), ImmutableList.of(), 0, 0, 0)));
hiveMetadata.finishInsert(session, hiveInsertTableHandle, ImmutableList.of(serializedPartitionUpdate), ImmutableList.of());
hiveMetadata.commit();
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class ThriftHiveMetastore method acquireSharedLock.
private void acquireSharedLock(HiveIdentity identity, AcidTransactionOwner transactionOwner, String queryId, long transactionId, List<SchemaTableName> fullTables, List<HivePartition> partitions, DataOperationType operation, boolean isDynamicPartitionWrite) {
requireNonNull(operation, "operation is null");
requireNonNull(transactionOwner, "transactionOwner is null");
requireNonNull(queryId, "queryId is null");
if (fullTables.isEmpty() && partitions.isEmpty()) {
return;
}
LockRequestBuilder request = new LockRequestBuilder(queryId).setTransactionId(transactionId).setUser(transactionOwner.toString());
for (SchemaTableName table : fullTables) {
request.addLockComponent(createLockComponentForOperation(table, operation, isDynamicPartitionWrite, Optional.empty()));
}
for (HivePartition partition : partitions) {
request.addLockComponent(createLockComponentForOperation(partition.getTableName(), operation, isDynamicPartitionWrite, Optional.of(partition.getPartitionId())));
}
acquireLock(identity, format("hive transaction %s for query %s", transactionId, queryId), request.build());
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class SyncPartitionMetadataProcedure method doSyncPartitionMetadata.
private void doSyncPartitionMetadata(ConnectorSession session, ConnectorAccessControl accessControl, String schemaName, String tableName, String mode, boolean caseSensitive) {
SyncMode syncMode = toSyncMode(mode);
HdfsContext hdfsContext = new HdfsContext(session);
SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore();
SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName);
Table table = metastore.getTable(schemaName, tableName).orElseThrow(() -> new TableNotFoundException(schemaTableName));
if (table.getPartitionColumns().isEmpty()) {
throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, "Table is not partitioned: " + schemaTableName);
}
if (syncMode == SyncMode.ADD || syncMode == SyncMode.FULL) {
accessControl.checkCanInsertIntoTable(null, new SchemaTableName(schemaName, tableName));
}
if (syncMode == SyncMode.DROP || syncMode == SyncMode.FULL) {
accessControl.checkCanDeleteFromTable(null, new SchemaTableName(schemaName, tableName));
}
Path tableLocation = new Path(table.getStorage().getLocation());
Set<String> partitionsToAdd;
Set<String> partitionsToDrop;
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(hdfsContext, tableLocation);
List<String> partitionsInMetastore = metastore.getPartitionNames(schemaName, tableName).orElseThrow(() -> new TableNotFoundException(schemaTableName));
List<String> partitionsInFileSystem = listDirectory(fileSystem, fileSystem.getFileStatus(tableLocation), table.getPartitionColumns(), table.getPartitionColumns().size(), caseSensitive).stream().map(fileStatus -> fileStatus.getPath().toUri()).map(uri -> tableLocation.toUri().relativize(uri).getPath()).collect(toImmutableList());
// partitions in file system but not in metastore
partitionsToAdd = difference(partitionsInFileSystem, partitionsInMetastore);
// partitions in metastore but not in file system
partitionsToDrop = difference(partitionsInMetastore, partitionsInFileSystem);
} catch (IOException e) {
throw new TrinoException(HIVE_FILESYSTEM_ERROR, e);
}
syncPartitions(partitionsToAdd, partitionsToDrop, syncMode, metastore, session, table);
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class UnregisterPartitionProcedure method doUnregisterPartition.
private void doUnregisterPartition(ConnectorSession session, ConnectorAccessControl accessControl, String schemaName, String tableName, List<String> partitionColumn, List<String> partitionValues) {
SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName);
SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore();
Table table = metastore.getTable(schemaName, tableName).orElseThrow(() -> new TableNotFoundException(schemaTableName));
accessControl.checkCanDeleteFromTable(null, schemaTableName);
checkIsPartitionedTable(table);
checkPartitionColumns(table, partitionColumn);
String partitionName = FileUtils.makePartName(partitionColumn, partitionValues);
Partition partition = metastore.unsafeGetRawHiveMetastoreClosure().getPartition(schemaName, tableName, partitionValues).orElseThrow(() -> new TrinoException(NOT_FOUND, format("Partition '%s' does not exist", partitionName)));
metastore.dropPartition(session, table.getDatabaseName(), table.getTableName(), partition.getValues(), false);
metastore.commit();
}
use of io.trino.spi.connector.SchemaTableName in project trino by trinodb.
the class AbstractTestHive method setup.
protected final void setup(String databaseName, HiveConfig hiveConfig, HiveMetastore hiveMetastore, HdfsEnvironment hdfsConfiguration) {
setupHive(databaseName);
metastoreClient = hiveMetastore;
hdfsEnvironment = hdfsConfiguration;
HivePartitionManager partitionManager = new HivePartitionManager(hiveConfig);
locationService = new HiveLocationService(hdfsEnvironment);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(new CatalogName("hive"), HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, partitionManager, 10, 10, 10, false, false, false, true, true, false, 1000, Optional.empty(), true, TESTING_TYPE_MANAGER, NOOP_METADATA_PROVIDER, locationService, partitionUpdateCodec, newFixedThreadPool(2), heartbeatService, TEST_SERVER_VERSION, (session, tableHandle) -> {
if (!tableHandle.getTableName().contains("apply_redirection_tester")) {
return Optional.empty();
}
return Optional.of(new TableScanRedirectApplicationResult(new CatalogSchemaTableName("hive", databaseName, "mock_redirection_target"), ImmutableMap.of(), TupleDomain.all()));
}, ImmutableSet.of(new PartitionsSystemTableProvider(partitionManager, TESTING_TYPE_MANAGER), new PropertiesSystemTableProvider()), metastore -> new NoneHiveMaterializedViewMetadata() {
@Override
public Optional<ConnectorMaterializedViewDefinition> getMaterializedView(ConnectorSession session, SchemaTableName viewName) {
if (!viewName.getTableName().contains("materialized_view_tester")) {
return Optional.empty();
}
return Optional.of(new ConnectorMaterializedViewDefinition("dummy_view_sql", Optional.empty(), Optional.empty(), Optional.empty(), ImmutableList.of(new ConnectorMaterializedViewDefinition.Column("abc", TypeId.of("type"))), Optional.empty(), Optional.of("alice"), ImmutableMap.of()));
}
}, SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS, TableInvalidationCallback.NOOP);
transactionManager = new HiveTransactionManager(metadataFactory);
splitManager = new HiveSplitManager(transactionManager, partitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(hiveConfig), directExecutor(), new CounterStat(), 100, hiveConfig.getMaxOutstandingSplitsSize(), hiveConfig.getMinPartitionBatchSize(), hiveConfig.getMaxPartitionBatchSize(), hiveConfig.getMaxInitialSplits(), hiveConfig.getSplitLoaderConcurrency(), hiveConfig.getMaxSplitsPerSecond(), false, TESTING_TYPE_MANAGER);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(hiveConfig, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(JOIN_COMPILER, BLOCK_TYPE_OPERATORS), TESTING_TYPE_MANAGER, getHiveConfig(), locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(hiveConfig), new HiveWriterStats());
pageSourceProvider = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, hiveConfig, getDefaultHivePageSourceFactories(hdfsEnvironment, hiveConfig), getDefaultHiveRecordCursorProviders(hiveConfig, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, hiveConfig), Optional.empty());
nodePartitioningProvider = new HiveNodePartitioningProvider(new TestingNodeManager("fake-environment"), TESTING_TYPE_MANAGER);
}
Aggregations