use of org.apache.drill.exec.server.options.OptionManager in project drill by apache.
the class DrillRoot method getClusterInfoJSON.
@GET
@Path("/cluster.json")
@Produces(MediaType.APPLICATION_JSON)
public ClusterInfo getClusterInfoJSON() {
final Collection<DrillbitInfo> drillbits = Sets.newTreeSet();
final Collection<String> mismatchedVersions = Sets.newTreeSet();
final DrillbitContext dbContext = work.getContext();
final DrillbitEndpoint currentDrillbit = dbContext.getEndpoint();
final String currentVersion = currentDrillbit.getVersion();
final DrillConfig config = dbContext.getConfig();
final boolean userEncryptionEnabled = config.getBoolean(ExecConstants.USER_ENCRYPTION_SASL_ENABLED) || config.getBoolean(ExecConstants.USER_SSL_ENABLED);
final boolean bitEncryptionEnabled = config.getBoolean(ExecConstants.BIT_ENCRYPTION_SASL_ENABLED);
OptionManager optionManager = work.getContext().getOptionManager();
final boolean isUserLoggedIn = AuthDynamicFeature.isUserLoggedIn(sc);
final boolean shouldShowAdminInfo = isUserLoggedIn && ((DrillUserPrincipal) sc.getUserPrincipal()).isAdminUser();
for (DrillbitEndpoint endpoint : work.getContext().getAvailableBits()) {
final DrillbitInfo drillbit = new DrillbitInfo(endpoint, isDrillbitsTheSame(currentDrillbit, endpoint), currentVersion.equals(endpoint.getVersion()));
if (!drillbit.isVersionMatch()) {
mismatchedVersions.add(drillbit.getVersion());
}
drillbits.add(drillbit);
}
// For all other cases the user info need-not or should-not be displayed
if (shouldShowAdminInfo) {
final String processUser = ImpersonationUtil.getProcessUserName();
final String processUserGroups = Joiner.on(", ").join(ImpersonationUtil.getProcessUserGroupNames());
String adminUsers = ExecConstants.ADMIN_USERS_VALIDATOR.getAdminUsers(optionManager);
String adminUserGroups = ExecConstants.ADMIN_USER_GROUPS_VALIDATOR.getAdminUserGroups(optionManager);
logger.debug("Admin info: user: " + adminUsers + " user group: " + adminUserGroups + " userLoggedIn " + isUserLoggedIn + " shouldShowAdminInfo: " + shouldShowAdminInfo);
return new ClusterInfo(drillbits, currentVersion, mismatchedVersions, userEncryptionEnabled, bitEncryptionEnabled, shouldShowAdminInfo, QueueInfo.build(dbContext.getResourceManager()), processUser, processUserGroups, adminUsers, adminUserGroups, authEnabled.get());
}
return new ClusterInfo(drillbits, currentVersion, mismatchedVersions, userEncryptionEnabled, bitEncryptionEnabled, shouldShowAdminInfo, QueueInfo.build(dbContext.getResourceManager()), authEnabled.get());
}
use of org.apache.drill.exec.server.options.OptionManager in project drill by apache.
the class DefaultSqlHandler method convertToPrel.
/**
* Applies physical rules and certain transformations to convert drill relational node into physical one.
*
* @param drel relational node
* @param validatedRowType final output row type
* @return physical relational node
* @throws RelConversionException
* @throws SqlUnsupportedException
*/
protected Prel convertToPrel(RelNode drel, RelDataType validatedRowType) throws RelConversionException, SqlUnsupportedException {
Preconditions.checkArgument(drel.getConvention() == DrillRel.DRILL_LOGICAL);
final RelTraitSet traits = drel.getTraitSet().plus(Prel.DRILL_PHYSICAL).plus(DrillDistributionTrait.SINGLETON);
Prel phyRelNode;
try {
final Stopwatch watch = Stopwatch.createStarted();
final RelNode relNode = transform(PlannerType.VOLCANO, PlannerPhase.PHYSICAL, drel, traits, false);
phyRelNode = (Prel) relNode.accept(new PrelFinalizer());
// log externally as we need to finalize before traversing the tree.
log(PlannerType.VOLCANO, PlannerPhase.PHYSICAL, phyRelNode, logger, watch);
} catch (RelOptPlanner.CannotPlanException ex) {
logger.error(ex.getMessage());
if (JoinUtils.checkCartesianJoin(drel)) {
throw JoinUtils.cartesianJoinPlanningException();
} else {
throw ex;
}
}
OptionManager queryOptions = context.getOptions();
if (context.getPlannerSettings().isMemoryEstimationEnabled() && !MemoryEstimationVisitor.enoughMemory(phyRelNode, queryOptions, context.getActiveEndpoints().size())) {
log("Not enough memory for this plan", phyRelNode, logger, null);
logger.debug("Re-planning without hash operations.");
queryOptions.setLocalOption(PlannerSettings.HASHJOIN.getOptionName(), false);
queryOptions.setLocalOption(PlannerSettings.HASHAGG.getOptionName(), false);
try {
final RelNode relNode = transform(PlannerType.VOLCANO, PlannerPhase.PHYSICAL, drel, traits);
phyRelNode = (Prel) relNode.accept(new PrelFinalizer());
} catch (RelOptPlanner.CannotPlanException ex) {
logger.error(ex.getMessage());
if (JoinUtils.checkCartesianJoin(drel)) {
throw JoinUtils.cartesianJoinPlanningException();
} else {
throw ex;
}
}
}
// Handy way to visualize the plan while debugging
// ExplainHandler.printPlan(phyRelNode, context);
/* The order of the following transformations is important */
/*
* 0.)
* Add top project before screen operator or writer to ensure that final output column names are preserved.
*/
phyRelNode = TopProjectVisitor.insertTopProject(phyRelNode, validatedRowType);
/*
* 1.) For select * from join query, we need insert project on top of scan and a top project just
* under screen operator. The project on top of scan will rename from * to T1*, while the top project
* will rename T1* to *, before it output the final result. Only the top project will allow
* duplicate columns, since user could "explicitly" ask for duplicate columns ( select *, col, *).
* The rest of projects will remove the duplicate column when we generate POP in json format.
*/
phyRelNode = StarColumnConverter.insertRenameProject(phyRelNode);
log("Physical RelNode after Top and Rename Project inserting: ", phyRelNode, logger, null);
/*
* 2.)
* Join might cause naming conflicts from its left and right child.
* In such case, we have to insert Project to rename the conflicting names.
* Unnest operator might need to adjust the correlated field after the physical planning.
*/
phyRelNode = AdjustOperatorsSchemaVisitor.adjustSchema(phyRelNode);
/*
* 2.1) Swap left / right for INNER hash join, if left's row count is < (1 + margin) right's row count.
* We want to have smaller dataset on the right side, since hash table builds on right side.
*/
if (context.getPlannerSettings().isHashJoinSwapEnabled()) {
phyRelNode = SwapHashJoinVisitor.swapHashJoin(phyRelNode, context.getPlannerSettings().getHashJoinSwapMarginFactor());
}
if (context.getPlannerSettings().isParquetRowGroupFilterPushdownPlanningEnabled()) {
phyRelNode = (Prel) transform(PlannerType.HEP_BOTTOM_UP, PlannerPhase.PHYSICAL_PARTITION_PRUNING, phyRelNode);
}
/*
* 2.2) Break up all expressions with complex outputs into their own project operations
*/
phyRelNode = phyRelNode.accept(new SplitUpComplexExpressions(config.getConverter().getTypeFactory(), context.getPlannerSettings().functionImplementationRegistry, phyRelNode.getCluster().getRexBuilder()), null);
/*
* 2.3) Projections that contain reference to flatten are rewritten as Flatten operators followed by Project
*/
phyRelNode = phyRelNode.accept(new RewriteProjectToFlatten(config.getConverter().getTypeFactory(), context.getDrillOperatorTable()), null);
/*
* 3.)
* Since our operators work via names rather than indices, we have to reorder any
* output before we return data to the user as we may have accidentally shuffled things.
* This adds a trivial project to reorder columns prior to output.
*/
phyRelNode = FinalColumnReorderer.addFinalColumnOrdering(phyRelNode);
/*
* 4.)
* If two fragments are both estimated to be parallelization one, remove the exchange
* separating them.
*/
phyRelNode = ExcessiveExchangeIdentifier.removeExcessiveExchanges(phyRelNode, targetSliceSize);
/* Insert the IMPLICIT_COLUMN in the lateral unnest pipeline */
phyRelNode = LateralUnnestRowIDVisitor.insertRowID(phyRelNode);
/* 6.)
* if the client does not support complex types (Map, Repeated)
* insert a project which would convert
*/
if (!context.getSession().isSupportComplexTypes()) {
logger.debug("Client does not support complex types, add ComplexToJson operator.");
phyRelNode = ComplexToJsonPrelVisitor.addComplexToJsonPrel(phyRelNode);
}
/* 7.)
* Insert LocalExchange (mux and/or demux) nodes
*/
phyRelNode = InsertLocalExchangeVisitor.insertLocalExchanges(phyRelNode, queryOptions);
/*
* 8.)
* Insert RuntimeFilter over Scan nodes
*/
if (context.isRuntimeFilterEnabled()) {
phyRelNode = RuntimeFilterVisitor.addRuntimeFilter(phyRelNode, context);
}
/* 9.)
* Next, we add any required selection vector removers given the supported encodings of each
* operator. This will ultimately move to a new trait but we're managing here for now to avoid
* introducing new issues in planning before the next release
*/
phyRelNode = SelectionVectorPrelVisitor.addSelectionRemoversWhereNecessary(phyRelNode);
/*
* 10.)
* Insert project above the screen operator or writer to ensure that final output column names are preserved after all optimizations.
*/
phyRelNode = TopProjectVisitor.insertTopProject(phyRelNode, validatedRowType);
/* 11.)
* Finally, Make sure that the no rels are repeats.
* This could happen in the case of querying the same table twice as Optiq may canonicalize these.
*/
phyRelNode = RelUniqifier.uniqifyGraph(phyRelNode);
return phyRelNode;
}
use of org.apache.drill.exec.server.options.OptionManager in project drill by apache.
the class ResetOptionHandler method getPlan.
/**
* Handles {@link DrillSqlResetOption} query
*/
@Override
public final PhysicalPlan getPlan(SqlNode sqlNode) throws ForemanSetupException {
QueryOptionManager options = context.getOptions();
SqlSetOption statement = unwrap(sqlNode, SqlSetOption.class);
OptionScope optionScope = getScope(statement, context.getOptions());
if (optionScope == OptionValue.OptionScope.SYSTEM) {
checkAdminPrivileges(options);
}
OptionManager optionManager = options.getOptionManager(optionScope);
String optionName = statement.getName().toString();
if ("ALL".equalsIgnoreCase(optionName)) {
optionManager.deleteAllLocalOptions();
} else {
optionManager.deleteLocalOption(optionName);
}
return DirectPlan.createDirectPlan(context, true, String.format("%s updated.", optionName));
}
use of org.apache.drill.exec.server.options.OptionManager in project drill by apache.
the class ParquetFormatPlugin method getRecordWriter.
public RecordWriter getRecordWriter(FragmentContext context, ParquetWriter writer) throws IOException, OutOfMemoryException {
Map<String, String> writerOpts = new HashMap<>();
OptionManager contextOpts = context.getOptions();
writerOpts.put("location", writer.getLocation());
FragmentHandle handle = context.getHandle();
String fragmentId = String.format("%d_%d", handle.getMajorFragmentId(), handle.getMinorFragmentId());
writerOpts.put("prefix", fragmentId);
// Many options which follow may be set as Drill config options or in the parquet format
// plugin config. If there is a Drill option set at session scope or narrower it takes precendence.
OptionValue.OptionScope minScope = OptionValue.OptionScope.SESSION;
writerOpts.put(ExecConstants.PARQUET_BLOCK_SIZE, ObjectUtils.firstNonNull(contextOpts.getOption(ExecConstants.PARQUET_BLOCK_SIZE).getValueMinScope(minScope), config.getBlockSize(), contextOpts.getInt(ExecConstants.PARQUET_BLOCK_SIZE)).toString());
writerOpts.put(ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK, ObjectUtils.firstNonNull(contextOpts.getOption(ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK).getValueMinScope(minScope), config.getUseSingleFSBlock(), contextOpts.getBoolean(ExecConstants.PARQUET_WRITER_USE_SINGLE_FS_BLOCK)).toString());
writerOpts.put(ExecConstants.PARQUET_PAGE_SIZE, ObjectUtils.firstNonNull(contextOpts.getOption(ExecConstants.PARQUET_PAGE_SIZE).getValueMinScope(minScope), config.getPageSize(), contextOpts.getInt(ExecConstants.PARQUET_PAGE_SIZE)).toString());
// "internal use" so not settable in format config
writerOpts.put(ExecConstants.PARQUET_DICT_PAGE_SIZE, contextOpts.getOption(ExecConstants.PARQUET_DICT_PAGE_SIZE).num_val.toString());
// "internal use" so not settable in format config
writerOpts.put(ExecConstants.PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING, contextOpts.getOption(ExecConstants.PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING).bool_val.toString());
writerOpts.put(ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE, ObjectUtils.firstNonNull(contextOpts.getOption(ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE).getValueMinScope(minScope), config.getWriterCompressionType(), contextOpts.getString(ExecConstants.PARQUET_WRITER_COMPRESSION_TYPE)).toString());
writerOpts.put(ExecConstants.PARQUET_WRITER_LOGICAL_TYPE_FOR_DECIMALS, ObjectUtils.firstNonNull(contextOpts.getOption(ExecConstants.PARQUET_WRITER_LOGICAL_TYPE_FOR_DECIMALS).getValueMinScope(minScope), config.getWriterLogicalTypeForDecimals(), contextOpts.getString(ExecConstants.PARQUET_WRITER_LOGICAL_TYPE_FOR_DECIMALS)).toString());
writerOpts.put(ExecConstants.PARQUET_WRITER_USE_PRIMITIVE_TYPES_FOR_DECIMALS, ObjectUtils.firstNonNull(contextOpts.getOption(ExecConstants.PARQUET_WRITER_USE_PRIMITIVE_TYPES_FOR_DECIMALS).getValueMinScope(minScope), config.getWriterUsePrimitivesForDecimals(), contextOpts.getBoolean(ExecConstants.PARQUET_WRITER_USE_PRIMITIVE_TYPES_FOR_DECIMALS)).toString());
writerOpts.put(ExecConstants.PARQUET_WRITER_FORMAT_VERSION, ObjectUtils.firstNonNull(contextOpts.getOption(ExecConstants.PARQUET_WRITER_FORMAT_VERSION).getValueMinScope(minScope), config.getWriterFormatVersion(), contextOpts.getString(ExecConstants.PARQUET_WRITER_FORMAT_VERSION)).toString());
RecordWriter recordWriter = new ParquetRecordWriter(context, writer);
recordWriter.init(writerOpts);
return recordWriter;
}
Aggregations