use of io.prestosql.plugin.hive.HiveTransactionHandle in project hetu-core by openlookeng.
the class SqlStandardAccessControl method hasAdminOptionForRoles.
private boolean hasAdminOptionForRoles(ConnectorTransactionHandle transaction, ConnectorIdentity identity, Set<String> roles) {
if (isAdmin(transaction, identity)) {
return true;
}
SemiTransactionalHiveMetastore metastore = metastoreProvider.apply(((HiveTransactionHandle) transaction));
Set<String> rolesWithGrantOption = listApplicableRoles(new HivePrincipal(USER, identity.getUser()), metastore::listRoleGrants).filter(RoleGrant::isGrantable).map(RoleGrant::getRoleName).collect(toSet());
return rolesWithGrantOption.containsAll(roles);
}
use of io.prestosql.plugin.hive.HiveTransactionHandle in project hetu-core by openlookeng.
the class CarbondataSplitManager method getSplitsForCompaction.
/*
* Convert the splits into batches based on task id and wrap around ConnectorSplitSource to send back
*/
public ConnectorSplitSource getSplitsForCompaction(HiveIdentity identity, ConnectorTransactionHandle transactionHandle, ConnectorTableHandle tableHandle, String tablePath, Map<String, Object> queryProperties, String queryId, ImmutableList.Builder<ConnectorSplit> allSplitsForComp, Configuration configuration) throws PrestoException {
HiveTableHandle hiveTable = (HiveTableHandle) tableHandle;
SchemaTableName schemaTableName = hiveTable.getSchemaTableName();
List<List<LoadMetadataDetails>> allGroupedSegList;
// Step 1: Get table handles and metadata
SemiTransactionalHiveMetastore metaStore = metastoreProvider.apply((HiveTransactionHandle) transactionHandle);
Table table = metaStore.getTable(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName()).orElseThrow(() -> new TableNotFoundException(schemaTableName));
Properties hiveSchema = MetastoreUtil.getHiveSchema(table);
CarbonLoadModel carbonLoadModel = null;
try {
carbonLoadModel = HiveCarbonUtil.getCarbonLoadModel(hiveSchema, configuration);
} catch (Exception e) {
LOGGER.error("Cannot create carbon load model");
throw new PrestoException(GENERIC_INTERNAL_ERROR, "Cannot create carbon load model");
}
CompactionType compactionType = (queryProperties.get("FULL") == Boolean.valueOf("true")) ? CompactionType.MAJOR : CompactionType.MINOR;
// Step 2: Get segments to be merged based on configuration passed
allGroupedSegList = CarbondataHetuCompactorUtil.identifyAndGroupSegmentsToBeMerged(carbonLoadModel, configuration, compactionType, carbondataConfig.getMajorVacuumSegSize(), carbondataConfig.getMinorVacuumSegCount());
// All the splits are grouped based on taskIds and compaction level into one builder
ImmutableList.Builder<ConnectorSplit> cSplits = ImmutableList.builder();
Gson gson = new Gson();
for (List<LoadMetadataDetails> segmentsToBeMerged : allGroupedSegList) {
String mergedLoadName = CarbonDataMergerUtil.getMergedLoadName(segmentsToBeMerged);
// Step 3: Get all the splits for the required segments and divide them based on task ids
Map<String, List<CarbondataLocalInputSplit>> taskIdToSplitMapping = new HashMap<>();
for (ConnectorSplit connectorSplit : allSplitsForComp.build()) {
HiveSplit currSplit = ((HiveSplitWrapper) connectorSplit).getSplits().get(0);
CarbondataLocalMultiBlockSplit currSplits = gson.fromJson(currSplit.getSchema().getProperty("carbonSplit"), CarbondataLocalMultiBlockSplit.class);
for (CarbondataLocalInputSplit split : currSplits.getSplitList()) {
CarbonInputSplit carbonInputSplit = CarbondataLocalInputSplit.convertSplit(split);
String taskId = carbonInputSplit.taskId;
String segmentNo = carbonInputSplit.getSegmentId();
for (LoadMetadataDetails load : segmentsToBeMerged) {
if (load.getLoadName().equals(segmentNo)) {
List<CarbondataLocalInputSplit> currList = taskIdToSplitMapping.computeIfAbsent(taskId, k -> new ArrayList<>());
currList.add(split);
}
}
}
}
// Step 4: Create the ConnectorSplitSource with the splits divided and return
long index = 0;
for (Map.Entry<String, List<CarbondataLocalInputSplit>> splitEntry : taskIdToSplitMapping.entrySet()) {
CarbondataLocalMultiBlockSplit currSplit = new CarbondataLocalMultiBlockSplit(splitEntry.getValue(), splitEntry.getValue().stream().flatMap(f -> Arrays.stream(getLocations(f))).distinct().toArray(String[]::new));
index++;
Properties properties = new Properties();
for (Map.Entry<String, String> entry : table.getStorage().getSerdeParameters().entrySet()) {
properties.setProperty(entry.getKey(), entry.getValue());
}
// TODO: Use the existing CarbondataLocalInputSplit list to convert
properties.setProperty("tablePath", tablePath);
properties.setProperty("carbonSplit", currSplit.getJsonString());
properties.setProperty("queryId", queryId);
properties.setProperty("index", String.valueOf(index));
properties.setProperty("mergeLoadName", mergedLoadName);
properties.setProperty("compactionType", compactionType.toString());
properties.setProperty("taskNo", splitEntry.getKey());
cSplits.add(HiveSplitWrapper.wrap(new HiveSplit(schemaTableName.getSchemaName(), schemaTableName.getTableName(), schemaTableName.getTableName(), tablePath, 0L, 0L, 0L, 0L, properties, new ArrayList(), getHostAddresses(currSplit.getLocations()), OptionalInt.empty(), false, new HashMap<>(), Optional.empty(), false, Optional.empty(), Optional.empty(), false, ImmutableMap.of())));
}
}
LOGGER.info("Splits for compaction built and ready");
return new FixedSplitSource(cSplits.build());
}
use of io.prestosql.plugin.hive.HiveTransactionHandle in project hetu-core by openlookeng.
the class SqlStandardAccessControl method isDatabaseOwner.
private boolean isDatabaseOwner(ConnectorTransactionHandle transaction, ConnectorIdentity identity, String databaseName) {
// all users are "owners" of the default database
if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(databaseName)) {
return true;
}
if (isAdmin(transaction, identity)) {
return true;
}
SemiTransactionalHiveMetastore metastore = metastoreProvider.apply(((HiveTransactionHandle) transaction));
Optional<Database> databaseMetadata = metastore.getDatabase(databaseName);
if (!databaseMetadata.isPresent()) {
return false;
}
Database database = databaseMetadata.get();
// a database can be owned by a user or role
if (database.getOwnerType() == USER && identity.getUser().equals(database.getOwnerName())) {
return true;
}
if (database.getOwnerType() == ROLE && isRoleEnabled(identity, metastore::listRoleGrants, database.getOwnerName())) {
return true;
}
return false;
}
use of io.prestosql.plugin.hive.HiveTransactionHandle in project boostkit-bigdata by kunpengcompute.
the class SqlStandardAccessControl method hasAdminOptionForRoles.
private boolean hasAdminOptionForRoles(ConnectorTransactionHandle transaction, ConnectorIdentity identity, Set<String> roles) {
if (isAdmin(transaction, identity)) {
return true;
}
SemiTransactionalHiveMetastore metastore = metastoreProvider.apply(((HiveTransactionHandle) transaction));
Set<String> rolesWithGrantOption = listApplicableRoles(new HivePrincipal(USER, identity.getUser()), metastore::listRoleGrants).filter(RoleGrant::isGrantable).map(RoleGrant::getRoleName).collect(toSet());
return rolesWithGrantOption.containsAll(roles);
}
use of io.prestosql.plugin.hive.HiveTransactionHandle in project boostkit-bigdata by kunpengcompute.
the class SqlStandardAccessControl method isDatabaseOwner.
private boolean isDatabaseOwner(ConnectorTransactionHandle transaction, ConnectorIdentity identity, String databaseName) {
// all users are "owners" of the default database
if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(databaseName)) {
return true;
}
if (isAdmin(transaction, identity)) {
return true;
}
SemiTransactionalHiveMetastore metastore = metastoreProvider.apply(((HiveTransactionHandle) transaction));
Optional<Database> databaseMetadata = metastore.getDatabase(databaseName);
if (!databaseMetadata.isPresent()) {
return false;
}
Database database = databaseMetadata.get();
// a database can be owned by a user or role
if (database.getOwnerType() == USER && identity.getUser().equals(database.getOwnerName())) {
return true;
}
if (database.getOwnerType() == ROLE && isRoleEnabled(identity, metastore::listRoleGrants, database.getOwnerName())) {
return true;
}
return false;
}
Aggregations