use of io.trino.plugin.hive.authentication.HiveIdentity in project trino by trinodb.
the class HiveMetastoreTableOperations method commitToExistingTable.
@Override
protected void commitToExistingTable(TableMetadata base, TableMetadata metadata) {
String newMetadataLocation = writeNewMetadata(metadata, version + 1);
HiveIdentity identity = new HiveIdentity(session.getIdentity());
long lockId = thriftMetastore.acquireTableExclusiveLock(identity, new AcidTransactionOwner(session.getUser()), session.getQueryId(), database, tableName);
try {
Table table;
try {
Table currentTable = fromMetastoreApiTable(thriftMetastore.getTable(identity, database, tableName).orElseThrow(() -> new TableNotFoundException(getSchemaTableName())));
checkState(currentMetadataLocation != null, "No current metadata location for existing table");
String metadataLocation = currentTable.getParameters().get(METADATA_LOCATION_PROP);
if (!currentMetadataLocation.equals(metadataLocation)) {
throw new CommitFailedException("Metadata location [%s] is not same as table metadata location [%s] for %s", currentMetadataLocation, metadataLocation, getSchemaTableName());
}
table = Table.builder(currentTable).setDataColumns(toHiveColumns(metadata.schema().columns())).withStorage(storage -> storage.setLocation(metadata.location())).setParameter(METADATA_LOCATION_PROP, newMetadataLocation).setParameter(PREVIOUS_METADATA_LOCATION_PROP, currentMetadataLocation).build();
} catch (RuntimeException e) {
try {
io().deleteFile(newMetadataLocation);
} catch (RuntimeException ex) {
e.addSuppressed(ex);
}
throw new TrinoException(ICEBERG_COMMIT_ERROR, format("Failed to commit to table %s.%s", database, tableName), e);
}
// todo privileges should not be replaced for an alter
PrincipalPrivileges privileges = table.getOwner().map(MetastoreUtil::buildInitialPrivilegeSet).orElse(NO_PRIVILEGES);
metastore.replaceTable(database, tableName, table, privileges);
} finally {
thriftMetastore.releaseTableLock(identity, lockId);
}
shouldRefresh = true;
}
use of io.trino.plugin.hive.authentication.HiveIdentity in project trino by trinodb.
the class AbstractTestHiveFileSystem method setup.
protected void setup(String host, int port, String databaseName, boolean s3SelectPushdownEnabled, HdfsConfiguration hdfsConfiguration) {
database = databaseName;
table = new SchemaTableName(database, "trino_test_external_fs");
tableWithHeader = new SchemaTableName(database, "trino_test_external_fs_with_header");
tableWithHeaderAndFooter = new SchemaTableName(database, "trino_test_external_fs_with_header_and_footer");
String random = randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
temporaryCreateTable = new SchemaTableName(database, "tmp_trino_test_create_" + random);
config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
Optional<HostAndPort> proxy = Optional.ofNullable(System.getProperty("hive.metastore.thrift.client.socks-proxy")).map(HostAndPort::fromString);
MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port));
HivePartitionManager hivePartitionManager = new HivePartitionManager(config);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, new HdfsConfig(), new NoHdfsAuthentication());
MetastoreConfig metastoreConfig = new MetastoreConfig();
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new HiveConfig(), metastoreConfig, new ThriftMetastoreConfig(), hdfsEnvironment, false), new HiveIdentity(getHiveSession(config).getIdentity())), getBasePath(), hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(new CatalogName("hive"), config, metastoreConfig, HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), heartbeatService, TESTING_TYPE_MANAGER, NOOP_METADATA_PROVIDER, locationService, partitionUpdateCodec, new NodeVersion("test_version"), new NoneHiveRedirectionsProvider(), ImmutableSet.of(new PartitionsSystemTableProvider(hivePartitionManager, TESTING_TYPE_MANAGER), new PropertiesSystemTableProvider()), new DefaultHiveMaterializedViewMetadataFactory(), SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS, TableInvalidationCallback.NOOP);
transactionManager = new HiveTransactionManager(metadataFactory);
splitManager = new HiveSplitManager(transactionManager, hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executor, config.getMaxSplitIteratorThreads()), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), TESTING_TYPE_MANAGER);
TypeOperators typeOperators = new TypeOperators();
BlockTypeOperators blockTypeOperators = new BlockTypeOperators(typeOperators);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), getHiveSessionProperties(config), new HiveWriterStats());
pageSourceProvider = new HivePageSourceProvider(TESTING_TYPE_MANAGER, hdfsEnvironment, config, getDefaultHivePageSourceFactories(hdfsEnvironment, config), getDefaultHiveRecordCursorProviders(config, hdfsEnvironment), new GenericHiveRecordCursorProvider(hdfsEnvironment, config), Optional.empty());
onSetupComplete();
}
Aggregations