use of io.prestosql.spi.metadata.TableHandle in project hetu-core by openlookeng.
the class TestCostCalculator method tableScan.
private TableScanNode tableScan(String id, String... symbols) {
List<Symbol> symbolsList = Arrays.stream(symbols).map(Symbol::new).collect(toImmutableList());
ImmutableMap.Builder<Symbol, ColumnHandle> assignments = ImmutableMap.builder();
for (Symbol symbol : symbolsList) {
assignments.put(symbol, new TpchColumnHandle("orderkey", BIGINT));
}
TpchTableHandle tableHandle = new TpchTableHandle("orders", 1.0);
return new TableScanNode(new PlanNodeId(id), new TableHandle(new CatalogName("tpch"), tableHandle, INSTANCE, Optional.of(new TpchTableLayoutHandle(tableHandle, TupleDomain.all()))), symbolsList, assignments.build(), TupleDomain.all(), Optional.empty(), ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, new UUID(0, 0), 0, false);
}
use of io.prestosql.spi.metadata.TableHandle in project hetu-core by openlookeng.
the class MetadataManager method applyFilter.
@Override
public Optional<ConstraintApplicationResult<TableHandle>> applyFilter(Session session, TableHandle table, Constraint constraint) {
CatalogName catalogName = table.getCatalogName();
ConnectorMetadata metadata = getMetadata(session, catalogName);
if (metadata.usesLegacyTableLayouts()) {
return Optional.empty();
}
ConnectorSession connectorSession = session.toConnectorSession(catalogName);
return metadata.applyFilter(connectorSession, table.getConnectorHandle(), constraint).map(result -> new ConstraintApplicationResult<>(new TableHandle(catalogName, result.getHandle(), table.getTransaction(), Optional.empty()), result.getRemainingFilter()));
}
use of io.prestosql.spi.metadata.TableHandle in project hetu-core by openlookeng.
the class MetadataManager method getLayout.
@Override
public Optional<TableLayoutResult> getLayout(Session session, TableHandle table, Constraint constraint, Optional<Set<ColumnHandle>> desiredColumns) {
if (constraint.getSummary().isNone()) {
return Optional.empty();
}
CatalogName catalogName = table.getCatalogName();
ConnectorTableHandle connectorTable = table.getConnectorHandle();
CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName);
ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName);
checkState(metadata.usesLegacyTableLayouts(), "getLayout() was called even though connector doesn't support legacy Table Layout");
ConnectorTransactionHandle transaction = catalogMetadata.getTransactionHandleFor(catalogName);
ConnectorSession connectorSession = session.toConnectorSession(catalogName);
List<ConnectorTableLayoutResult> layouts = metadata.getTableLayouts(connectorSession, connectorTable, constraint, desiredColumns);
if (layouts.isEmpty()) {
return Optional.empty();
}
if (layouts.size() > 1) {
throw new PrestoException(NOT_SUPPORTED, format("Connector returned multiple layouts for table %s", table));
}
ConnectorTableLayout tableLayout = layouts.get(0).getTableLayout();
return Optional.of(new TableLayoutResult(new TableHandle(catalogName, connectorTable, transaction, Optional.of(tableLayout.getHandle())), new TableProperties(catalogName, transaction, new ConnectorTableProperties(tableLayout)), layouts.get(0).getUnenforcedConstraint()));
}
use of io.prestosql.spi.metadata.TableHandle in project hetu-core by openlookeng.
the class StatementAnalyzer method validateCreateIndex.
private void validateCreateIndex(Table table, Optional<Scope> scope) {
CreateIndex createIndex = (CreateIndex) analysis.getOriginalStatement();
QualifiedObjectName tableFullName = createQualifiedObjectName(session, createIndex, createIndex.getTableName());
accessControl.checkCanCreateIndex(session.getRequiredTransactionId(), session.getIdentity(), tableFullName);
String tableName = tableFullName.toString();
// check whether catalog support create index
if (!metadata.isHeuristicIndexSupported(session, tableFullName)) {
throw new SemanticException(NOT_SUPPORTED, createIndex, "CREATE INDEX is not supported in catalog '%s'", tableFullName.getCatalogName());
}
List<String> partitions = new ArrayList<>();
String partitionColumn = null;
if (createIndex.getExpression().isPresent()) {
partitions = HeuristicIndexUtils.extractPartitions(createIndex.getExpression().get());
// check partition name validate, create index …… where pt_d = xxx;
// pt_d must be partition column
Set<String> partitionColumns = partitions.stream().map(k -> k.substring(0, k.indexOf("="))).collect(Collectors.toSet());
if (partitionColumns.size() > 1) {
// currently only support one partition column
throw new IllegalArgumentException("Heuristic index only supports predicates on one column");
}
// The only entry in set should be the only partition column name
partitionColumn = partitionColumns.iterator().next();
}
Optional<TableHandle> tableHandle = metadata.getTableHandle(session, tableFullName);
if (tableHandle.isPresent()) {
if (!tableHandle.get().getConnectorHandle().isHeuristicIndexSupported()) {
throw new SemanticException(NOT_SUPPORTED, table, "Catalog supported, but table storage format is not supported by heuristic index");
}
TableMetadata tableMetadata = metadata.getTableMetadata(session, tableHandle.get());
List<String> availableColumns = tableMetadata.getColumns().stream().map(ColumnMetadata::getName).collect(Collectors.toList());
for (Identifier column : createIndex.getColumnAliases()) {
if (!availableColumns.contains(column.getValue().toLowerCase(Locale.ROOT))) {
throw new SemanticException(MISSING_ATTRIBUTE, table, "Column '%s' cannot be resolved", column.getValue());
}
}
if (partitionColumn != null && !tableHandle.get().getConnectorHandle().isPartitionColumn(partitionColumn)) {
throw new SemanticException(NOT_SUPPORTED, table, "Heuristic index creation is only supported for predicates on partition columns");
}
} else {
throw new SemanticException(MISSING_ATTRIBUTE, table, "Table '%s' is invalid", tableFullName);
}
List<Pair<String, Type>> indexColumns = new LinkedList<>();
for (Identifier i : createIndex.getColumnAliases()) {
indexColumns.add(new Pair<>(i.toString(), UNKNOWN));
}
// For now, creating index for multiple columns is not supported
if (indexColumns.size() > 1) {
throw new SemanticException(NOT_SUPPORTED, table, "Multi-column indexes are currently not supported");
}
try {
// Use this place holder to check the existence of index and lock the place
Properties properties = new Properties();
properties.setProperty(INPROGRESS_PROPERTY_KEY, "TRUE");
CreateIndexMetadata placeHolder = new CreateIndexMetadata(createIndex.getIndexName().toString(), tableName, createIndex.getIndexType(), 0L, indexColumns, partitions, properties, session.getUser(), UNDEFINED);
synchronized (StatementAnalyzer.class) {
IndexClient.RecordStatus recordStatus = heuristicIndexerManager.getIndexClient().lookUpIndexRecord(placeHolder);
switch(recordStatus) {
case SAME_NAME:
throw new SemanticException(INDEX_ALREADY_EXISTS, createIndex, "Index '%s' already exists", createIndex.getIndexName().toString());
case SAME_CONTENT:
throw new SemanticException(INDEX_ALREADY_EXISTS, createIndex, "Index with same (table,column,indexType) already exists");
case SAME_INDEX_PART_CONFLICT:
throw new SemanticException(INDEX_ALREADY_EXISTS, createIndex, "Index with same (table,column,indexType) already exists and partition(s) contain conflicts");
case IN_PROGRESS_SAME_NAME:
throw new SemanticException(INDEX_ALREADY_EXISTS, createIndex, "Index '%s' is being created by another user. Check running queries for details. If there is no running query for this index, " + "the index may be in an unexpected error state and should be dropped using 'DROP INDEX %s'", createIndex.getIndexName().toString(), createIndex.getIndexName().toString());
case IN_PROGRESS_SAME_CONTENT:
throw new SemanticException(INDEX_ALREADY_EXISTS, createIndex, "Index with same (table,column,indexType) is being created by another user. Check running queries for details. " + "If there is no running query for this index, the index may be in an unexpected error state and should be dropped using 'DROP INDEX'");
case IN_PROGRESS_SAME_INDEX_PART_CONFLICT:
if (partitions.isEmpty()) {
throw new SemanticException(INDEX_ALREADY_EXISTS, createIndex, "Index with same (table,column,indexType) is being created by another user. Check running queries for details. " + "If there is no running query for this index, the index may be in an unexpected error state and should be dropped using 'DROP INDEX %s'", createIndex.getIndexName().toString());
}
// allow different queries to run with explicitly same partitions
case SAME_INDEX_PART_CAN_MERGE:
case IN_PROGRESS_SAME_INDEX_PART_CAN_MERGE:
break;
case NOT_FOUND:
heuristicIndexerManager.getIndexClient().addIndexRecord(placeHolder);
}
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
use of io.prestosql.spi.metadata.TableHandle in project hetu-core by openlookeng.
the class PushPredicateIntoTableScan method pushPredicateIntoTableScan.
/**
* For RowExpression {@param predicate}
*/
public static Optional<PlanNode> pushPredicateIntoTableScan(TableScanNode node, RowExpression predicate, boolean pruneWithPredicateExpression, Session session, PlanNodeIdAllocator idAllocator, PlanSymbolAllocator planSymbolAllocator, Metadata metadata, RowExpressionDomainTranslator domainTranslator, boolean pushPartitionsOnly) {
// don't include non-deterministic predicates
LogicalRowExpressions logicalRowExpressions = new LogicalRowExpressions(new RowExpressionDeterminismEvaluator(metadata), new FunctionResolution(metadata.getFunctionAndTypeManager()), metadata.getFunctionAndTypeManager());
RowExpression deterministicPredicate = logicalRowExpressions.filterDeterministicConjuncts(predicate);
RowExpressionDomainTranslator.ExtractionResult<VariableReferenceExpression> decomposedPredicate = domainTranslator.fromPredicate(session.toConnectorSession(), deterministicPredicate);
TupleDomain<ColumnHandle> newDomain = decomposedPredicate.getTupleDomain().transform(variableName -> node.getAssignments().get(new Symbol(variableName.getName()))).intersect(node.getEnforcedConstraint());
Map<ColumnHandle, Symbol> assignments = ImmutableBiMap.copyOf(node.getAssignments()).inverse();
Set<ColumnHandle> allColumnHandles = new HashSet<>();
assignments.keySet().stream().forEach(allColumnHandles::add);
Constraint constraint;
List<Constraint> disjunctConstraints = ImmutableList.of();
if (!pushPartitionsOnly) {
List<RowExpression> orSet = LogicalRowExpressions.extractDisjuncts(decomposedPredicate.getRemainingExpression());
List<RowExpressionDomainTranslator.ExtractionResult<VariableReferenceExpression>> disjunctPredicates = orSet.stream().map(e -> domainTranslator.fromPredicate(session.toConnectorSession(), e)).collect(Collectors.toList());
/* Check if any Branch yeild all records; then no need to process OR branches */
if (!disjunctPredicates.stream().anyMatch(e -> e.getTupleDomain().isAll())) {
List<TupleDomain<ColumnHandle>> orDomains = disjunctPredicates.stream().map(er -> er.getTupleDomain().transform(variableName -> node.getAssignments().get(new Symbol(variableName.getName())))).collect(Collectors.toList());
disjunctConstraints = orDomains.stream().filter(d -> !d.isAll() && !d.isNone()).map(d -> new Constraint(d)).collect(Collectors.toList());
}
}
if (pruneWithPredicateExpression) {
LayoutConstraintEvaluatorForRowExpression evaluator = new LayoutConstraintEvaluatorForRowExpression(metadata, session, node.getAssignments(), logicalRowExpressions.combineConjuncts(deterministicPredicate, // which would be expensive to evaluate in the call to isCandidate below.
domainTranslator.toPredicate(newDomain.simplify().transform(column -> {
if (assignments.size() == 0 || assignments.getOrDefault(column, null) == null) {
return null;
} else {
return new VariableReferenceExpression(assignments.getOrDefault(column, null).getName(), planSymbolAllocator.getSymbols().get(assignments.getOrDefault(column, null)));
}
}))));
constraint = new Constraint(newDomain, evaluator::isCandidate);
} else {
// Currently, invoking the expression interpreter is very expensive.
// TODO invoke the interpreter unconditionally when the interpreter becomes cheap enough.
constraint = new Constraint(newDomain);
}
TableHandle newTable;
TupleDomain<ColumnHandle> remainingFilter;
if (!metadata.usesLegacyTableLayouts(session, node.getTable())) {
if (newDomain.isNone()) {
// to turn the subtree into a Values node
return Optional.of(new ValuesNode(idAllocator.getNextId(), node.getOutputSymbols(), ImmutableList.of()));
}
Optional<ConstraintApplicationResult<TableHandle>> result = metadata.applyFilter(session, node.getTable(), constraint, disjunctConstraints, allColumnHandles, pushPartitionsOnly);
if (!result.isPresent()) {
return Optional.empty();
}
newTable = result.get().getHandle();
if (metadata.getTableProperties(session, newTable).getPredicate().isNone()) {
return Optional.of(new ValuesNode(idAllocator.getNextId(), node.getOutputSymbols(), ImmutableList.of()));
}
remainingFilter = result.get().getRemainingFilter();
} else {
Optional<TableLayoutResult> layout = metadata.getLayout(session, node.getTable(), constraint, Optional.of(node.getOutputSymbols().stream().map(node.getAssignments()::get).collect(toImmutableSet())));
if (!layout.isPresent() || layout.get().getTableProperties().getPredicate().isNone()) {
return Optional.of(new ValuesNode(idAllocator.getNextId(), node.getOutputSymbols(), ImmutableList.of()));
}
newTable = layout.get().getNewTableHandle();
remainingFilter = layout.get().getUnenforcedConstraint();
}
TableScanNode tableScan = new TableScanNode(node.getId(), newTable, node.getOutputSymbols(), node.getAssignments(), computeEnforced(newDomain, remainingFilter), Optional.of(deterministicPredicate), node.getStrategy(), node.getReuseTableScanMappingId(), 0, node.isForDelete());
// The order of the arguments to combineConjuncts matters:
// * Unenforced constraints go first because they can only be simple column references,
// which are not prone to logic errors such as out-of-bound access, div-by-zero, etc.
// * Conjuncts in non-deterministic expressions and non-TupleDomain-expressible expressions should
// retain their original (maybe intermixed) order from the input predicate. However, this is not implemented yet.
// * Short of implementing the previous bullet point, the current order of non-deterministic expressions
// and non-TupleDomain-expressible expressions should be retained. Changing the order can lead
// to failures of previously successful queries.
RowExpression resultingPredicate;
if (remainingFilter.isAll() && newTable.getConnectorHandle().hasDisjunctFiltersPushdown()) {
resultingPredicate = logicalRowExpressions.combineConjuncts(domainTranslator.toPredicate(remainingFilter.transform(assignments::get), planSymbolAllocator.getSymbols()), logicalRowExpressions.filterNonDeterministicConjuncts(predicate));
} else {
resultingPredicate = logicalRowExpressions.combineConjuncts(domainTranslator.toPredicate(remainingFilter.transform(assignments::get), planSymbolAllocator.getSymbols()), logicalRowExpressions.filterNonDeterministicConjuncts(predicate), decomposedPredicate.getRemainingExpression());
}
if (!TRUE_CONSTANT.equals(resultingPredicate)) {
return Optional.of(new FilterNode(idAllocator.getNextId(), tableScan, resultingPredicate));
}
return Optional.of(tableScan);
}
Aggregations