use of io.prestosql.plugin.hive.metastore.Database in project hetu-core by openlookeng.
the class GlueHiveMetastore method createDatabase.
@Override
public void createDatabase(HiveIdentity identity, Database inputDatabase) {
Database database = inputDatabase;
if (!database.getLocation().isPresent() && defaultDir.isPresent()) {
String databaseLocation = new Path(defaultDir.get(), database.getDatabaseName()).toString();
database = Database.builder(database).setLocation(Optional.of(databaseLocation)).build();
}
try {
DatabaseInput databaseInput = GlueInputConverter.convertDatabase(database);
glueClient.createDatabase(new CreateDatabaseRequest().withCatalogId(catalogId).withDatabaseInput(databaseInput));
} catch (AlreadyExistsException e) {
throw new SchemaAlreadyExistsException(database.getDatabaseName());
} catch (AmazonServiceException e) {
throw new PrestoException(HiveErrorCode.HIVE_METASTORE_ERROR, e);
}
if (database.getLocation().isPresent()) {
HiveWriteUtils.createDirectory(hdfsContext, hdfsEnvironment, new Path(database.getLocation().get()));
}
}
use of io.prestosql.plugin.hive.metastore.Database in project hetu-core by openlookeng.
the class GlueHiveMetastore method getAllViews.
@Override
public Optional<List<String>> getAllViews(String databaseName) {
try {
List<String> views = new ArrayList<>();
String nextToken = null;
do {
GetTablesResult result = glueClient.getTables(new GetTablesRequest().withCatalogId(catalogId).withDatabaseName(databaseName).withNextToken(nextToken));
result.getTableList().stream().filter(table -> VIRTUAL_VIEW.name().equals(table.getTableType())).forEach(table -> views.add(table.getName()));
nextToken = result.getNextToken();
} while (nextToken != null);
return Optional.of(views);
} catch (EntityNotFoundException e) {
// database does not exist
return Optional.empty();
} catch (AmazonServiceException e) {
throw new PrestoException(HiveErrorCode.HIVE_METASTORE_ERROR, e);
}
}
use of io.prestosql.plugin.hive.metastore.Database in project hetu-core by openlookeng.
the class AbstractTestHiveFileSystem method setup.
protected void setup(String host, int port, String databaseName, Function<HiveConfig, HdfsConfiguration> hdfsConfigurationProvider, boolean s3SelectPushdownEnabled) {
database = databaseName;
table = new SchemaTableName(database, "presto_test_external_fs");
String random = UUID.randomUUID().toString().toLowerCase(ENGLISH).replace("-", "");
temporaryCreateTable = new SchemaTableName(database, "tmp_presto_test_create_" + random);
config = new HiveConfig().setS3SelectPushdownEnabled(s3SelectPushdownEnabled);
String proxy = System.getProperty("hive.metastore.thrift.client.socks-proxy");
if (proxy != null) {
config.setMetastoreSocksProxy(HostAndPort.fromString(proxy));
}
MetastoreLocator metastoreLocator = new TestingMetastoreLocator(config, host, port);
ExecutorService executors = newCachedThreadPool(daemonThreadsNamed("hive-%s"));
ExecutorService executorRefresh = newCachedThreadPool(daemonThreadsNamed("hive-refresh-%s"));
HivePartitionManager hivePartitionManager = new HivePartitionManager(TYPE_MANAGER, config);
HdfsConfiguration hdfsConfiguration = hdfsConfigurationProvider.apply(config);
hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, config, new NoHdfsAuthentication());
metastoreClient = new TestingHiveMetastore(new BridgingHiveMetastore(new ThriftHiveMetastore(metastoreLocator, new ThriftHiveMetastoreConfig())), executors, executorRefresh, config, getBasePath(), hdfsEnvironment);
locationService = new HiveLocationService(hdfsEnvironment);
JsonCodec<PartitionUpdate> partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class);
metadataFactory = new HiveMetadataFactory(config, metastoreClient, hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), vacuumExecutorService, heartbeatService, vacuumExecutorService, TYPE_MANAGER, locationService, partitionUpdateCodec, new HiveTypeTranslator(), new NodeVersion("test_version"), SqlStandardAccessControlMetadata::new);
transactionManager = new HiveTransactionManager();
splitManager = new HiveSplitManager(transactionHandle -> ((HiveMetadata) transactionManager.get(transactionHandle)).getMetastore(), hivePartitionManager, new NamenodeStats(), hdfsEnvironment, new CachingDirectoryLister(new HiveConfig()), new BoundedExecutor(executors, config.getMaxSplitIteratorThreads()), new HiveCoercionPolicy(TYPE_MANAGER), new CounterStat(), config.getMaxOutstandingSplits(), config.getMaxOutstandingSplitsSize(), config.getMinPartitionBatchSize(), config.getMaxPartitionBatchSize(), config.getMaxInitialSplits(), config.getSplitLoaderConcurrency(), config.getMaxSplitsPerSecond(), config.getRecursiveDirWalkerEnabled(), null, config);
pageSinkProvider = new HivePageSinkProvider(getDefaultHiveFileWriterFactories(config), hdfsEnvironment, PAGE_SORTER, metastoreClient, new GroupByHashPageIndexerFactory(new JoinCompiler(createTestMetadataManager())), TYPE_MANAGER, config, locationService, partitionUpdateCodec, new TestingNodeManager("fake-environment"), new HiveEventClient(), new HiveSessionProperties(config, new OrcFileWriterConfig(), new ParquetFileWriterConfig()), new HiveWriterStats(), getDefaultOrcFileWriterFactory(config));
pageSourceProvider = new HivePageSourceProvider(config, hdfsEnvironment, getDefaultHiveRecordCursorProvider(config), getDefaultHiveDataStreamFactories(config), TYPE_MANAGER, getNoOpIndexCache(), getDefaultHiveSelectiveFactories(config));
}
use of io.prestosql.plugin.hive.metastore.Database in project hetu-core by openlookeng.
the class SqlStandardAccessControl method isDatabaseOwner.
private boolean isDatabaseOwner(ConnectorTransactionHandle transaction, ConnectorIdentity identity, String databaseName) {
// all users are "owners" of the default database
if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(databaseName)) {
return true;
}
if (isAdmin(transaction, identity)) {
return true;
}
SemiTransactionalHiveMetastore metastore = metastoreProvider.apply(((HiveTransactionHandle) transaction));
Optional<Database> databaseMetadata = metastore.getDatabase(databaseName);
if (!databaseMetadata.isPresent()) {
return false;
}
Database database = databaseMetadata.get();
// a database can be owned by a user or role
if (database.getOwnerType() == USER && identity.getUser().equals(database.getOwnerName())) {
return true;
}
if (database.getOwnerType() == ROLE && isRoleEnabled(identity, metastore::listRoleGrants, database.getOwnerName())) {
return true;
}
return false;
}
use of io.prestosql.plugin.hive.metastore.Database in project boostkit-bigdata by kunpengcompute.
the class GlueHiveMetastore method getAllViews.
@Override
public Optional<List<String>> getAllViews(String databaseName) {
try {
List<String> views = new ArrayList<>();
String nextToken = null;
do {
GetTablesResult result = glueClient.getTables(new GetTablesRequest().withCatalogId(catalogId).withDatabaseName(databaseName).withNextToken(nextToken));
result.getTableList().stream().filter(table -> VIRTUAL_VIEW.name().equals(table.getTableType())).forEach(table -> views.add(table.getName()));
nextToken = result.getNextToken();
} while (nextToken != null);
return Optional.of(views);
} catch (EntityNotFoundException e) {
// database does not exist
return Optional.empty();
} catch (AmazonServiceException e) {
throw new PrestoException(HiveErrorCode.HIVE_METASTORE_ERROR, e);
}
}
Aggregations