use of org.apache.hyracks.algebricks.core.algebra.operators.physical.HashPartitionExchangePOperator in project asterixdb by apache.
the class RequiredCapacityVisitorTest method testParallelGroupBy.
@Test
public void testParallelGroupBy() throws AlgebricksException {
IClusterCapacity clusterCapacity = new ClusterCapacity();
RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity);
// Constructs a parallel group-by query plan.
GroupByOperator globalGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.PARTITIONED);
ExchangeOperator exchange = new ExchangeOperator();
exchange.setPhysicalOperator(new HashPartitionExchangePOperator(Collections.emptyList(), null));
GroupByOperator localGby = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.LOCAL);
globalGby.getInputs().add(new MutableObject<>(exchange));
exchange.getInputs().add(new MutableObject<>(localGby));
// Verifies the calculated cluster capacity requirement for the test quer plan.
globalGby.accept(visitor, null);
Assert.assertTrue(clusterCapacity.getAggregatedCores() == PARALLELISM);
Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 2 * MEMORY_BUDGET * PARALLELISM + 2 * FRAME_SIZE * PARALLELISM * PARALLELISM);
}
use of org.apache.hyracks.algebricks.core.algebra.operators.physical.HashPartitionExchangePOperator in project asterixdb by apache.
the class RequiredCapacityVisitorTest method testParallelJoin.
@Test
public void testParallelJoin() throws AlgebricksException {
IClusterCapacity clusterCapacity = new ClusterCapacity();
RequiredCapacityVisitor visitor = makeComputationCapacityVisitor(PARALLELISM, clusterCapacity);
// Constructs a join query plan.
InnerJoinOperator join = makeJoinOperator(AbstractLogicalOperator.ExecutionMode.PARTITIONED);
// Left child plan of the join.
ExchangeOperator leftChildExchange = new ExchangeOperator();
leftChildExchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.PARTITIONED);
leftChildExchange.setPhysicalOperator(new HashPartitionExchangePOperator(Collections.emptyList(), null));
InnerJoinOperator leftChild = makeJoinOperator(AbstractLogicalOperator.ExecutionMode.PARTITIONED);
join.getInputs().add(new MutableObject<>(leftChildExchange));
leftChildExchange.getInputs().add(new MutableObject<>(leftChild));
EmptyTupleSourceOperator ets = new EmptyTupleSourceOperator();
ets.setExecutionMode(AbstractLogicalOperator.ExecutionMode.PARTITIONED);
leftChild.getInputs().add(new MutableObject<>(ets));
leftChild.getInputs().add(new MutableObject<>(ets));
// Right child plan of the join.
ExchangeOperator rightChildExchange = new ExchangeOperator();
rightChildExchange.setExecutionMode(AbstractLogicalOperator.ExecutionMode.PARTITIONED);
rightChildExchange.setPhysicalOperator(new HashPartitionExchangePOperator(Collections.emptyList(), null));
GroupByOperator rightChild = makeGroupByOperator(AbstractLogicalOperator.ExecutionMode.LOCAL);
join.getInputs().add(new MutableObject<>(rightChildExchange));
rightChildExchange.getInputs().add(new MutableObject<>(rightChild));
rightChild.getInputs().add(new MutableObject<>(ets));
// Verifies the calculated cluster capacity requirement for the test quer plan.
join.accept(visitor, null);
Assert.assertTrue(clusterCapacity.getAggregatedCores() == PARALLELISM);
Assert.assertTrue(clusterCapacity.getAggregatedMemoryByteSize() == 3 * MEMORY_BUDGET * PARALLELISM + 2 * 2L * PARALLELISM * PARALLELISM * FRAME_SIZE + 3 * FRAME_SIZE * PARALLELISM);
}
use of org.apache.hyracks.algebricks.core.algebra.operators.physical.HashPartitionExchangePOperator in project asterixdb by apache.
the class RemoveUnnecessarySortMergeExchange method rewritePost.
@Override
public boolean rewritePost(Mutable<ILogicalOperator> opRef, IOptimizationContext context) throws AlgebricksException {
AbstractLogicalOperator op1 = (AbstractLogicalOperator) opRef.getValue();
if (op1.getPhysicalOperator() == null || (op1.getPhysicalOperator().getOperatorTag() != PhysicalOperatorTag.HASH_PARTITION_EXCHANGE && op1.getPhysicalOperator().getOperatorTag() != PhysicalOperatorTag.HASH_PARTITION_MERGE_EXCHANGE)) {
return false;
}
Mutable<ILogicalOperator> currentOpRef = op1.getInputs().get(0);
AbstractLogicalOperator currentOp = (AbstractLogicalOperator) currentOpRef.getValue();
// Goes down the pipeline to find a qualified SortMergeExchange to eliminate.
while (currentOp != null) {
IPhysicalOperator physicalOp = currentOp.getPhysicalOperator();
if (physicalOp == null) {
return false;
} else if (physicalOp.getOperatorTag() == PhysicalOperatorTag.SORT_MERGE_EXCHANGE) {
break;
} else if (!currentOp.isMap() || currentOp.getOperatorTag() == LogicalOperatorTag.UNNEST || currentOp.getOperatorTag() == LogicalOperatorTag.LIMIT) {
// we need to use his new property in logical operator to check order sensitivity.
return false;
} else if (currentOp.getInputs().size() == 1) {
currentOpRef = currentOp.getInputs().get(0);
currentOp = (AbstractLogicalOperator) currentOpRef.getValue();
} else {
currentOp = null;
}
}
if (currentOp == null) {
// There is no such qualified SortMergeExchange.
return false;
}
if (op1.getPhysicalOperator().getOperatorTag() == PhysicalOperatorTag.HASH_PARTITION_MERGE_EXCHANGE) {
// If op1 is a hash_partition_merge_exchange, the sort_merge_exchange can be simply removed.
currentOpRef.setValue(currentOp.getInputs().get(0).getValue());
op1.computeDeliveredPhysicalProperties(context);
return true;
}
// Checks whether sort columns in the SortMergeExchange are still available at op1.
// If yes, we use HashMergeExchange; otherwise, we use HashExchange.
SortMergeExchangePOperator sme = (SortMergeExchangePOperator) currentOp.getPhysicalOperator();
HashPartitionExchangePOperator hpe = (HashPartitionExchangePOperator) op1.getPhysicalOperator();
Set<LogicalVariable> liveVars = new HashSet<LogicalVariable>();
VariableUtilities.getLiveVariables(op1, liveVars);
boolean usingHashMergeExchange = true;
for (OrderColumn oc : sme.getSortColumns()) {
if (!liveVars.contains(oc.getColumn())) {
usingHashMergeExchange = false;
}
}
if (usingHashMergeExchange) {
// Add sort columns from the SortMergeExchange into a new HashMergeExchange.
List<OrderColumn> ocList = new ArrayList<OrderColumn>();
for (OrderColumn oc : sme.getSortColumns()) {
ocList.add(oc);
}
HashPartitionMergeExchangePOperator hpme = new HashPartitionMergeExchangePOperator(ocList, hpe.getHashFields(), hpe.getDomain());
op1.setPhysicalOperator(hpme);
}
// Remove the SortMergeExchange op.
currentOpRef.setValue(currentOp.getInputs().get(0).getValue());
// Re-compute delivered properties at op1.
op1.computeDeliveredPhysicalProperties(context);
return true;
}
use of org.apache.hyracks.algebricks.core.algebra.operators.physical.HashPartitionExchangePOperator in project asterixdb by apache.
the class EnforceStructuralPropertiesRule method addPartitioningEnforcers.
private void addPartitioningEnforcers(ILogicalOperator op, int i, IPartitioningProperty pp, IPhysicalPropertiesVector required, IPhysicalPropertiesVector deliveredByChild, INodeDomain domain, IOptimizationContext context) throws AlgebricksException {
if (pp != null) {
IPhysicalOperator pop;
switch(pp.getPartitioningType()) {
case UNPARTITIONED:
{
List<OrderColumn> ordCols = computeOrderColumns(deliveredByChild);
if (ordCols.isEmpty()) {
pop = new RandomMergeExchangePOperator();
} else {
if (op.getAnnotations().containsKey(OperatorAnnotations.USE_RANGE_CONNECTOR)) {
IRangeMap rangeMap = (IRangeMap) op.getAnnotations().get(OperatorAnnotations.USE_RANGE_CONNECTOR);
pop = new RangePartitionMergeExchangePOperator(ordCols, domain, rangeMap);
} else {
OrderColumn[] sortColumns = new OrderColumn[ordCols.size()];
sortColumns = ordCols.toArray(sortColumns);
pop = new SortMergeExchangePOperator(sortColumns);
}
}
break;
}
case UNORDERED_PARTITIONED:
{
List<LogicalVariable> varList = new ArrayList<>(((UnorderedPartitionedProperty) pp).getColumnSet());
String hashMergeHint = context.getMetadataProvider().getConfig().get(HASH_MERGE);
if (hashMergeHint == null || !hashMergeHint.equalsIgnoreCase(TRUE_CONSTANT)) {
pop = new HashPartitionExchangePOperator(varList, domain);
break;
}
List<ILocalStructuralProperty> cldLocals = deliveredByChild.getLocalProperties();
List<ILocalStructuralProperty> reqdLocals = required.getLocalProperties();
boolean propWasSet = false;
pop = null;
if (reqdLocals != null && cldLocals != null && allAreOrderProps(cldLocals)) {
AbstractLogicalOperator c = (AbstractLogicalOperator) op.getInputs().get(i).getValue();
Map<LogicalVariable, EquivalenceClass> ecs = context.getEquivalenceClassMap(c);
List<FunctionalDependency> fds = context.getFDList(c);
if (PropertiesUtil.matchLocalProperties(reqdLocals, cldLocals, ecs, fds)) {
List<OrderColumn> orderColumns = getOrderColumnsFromGroupingProperties(reqdLocals, cldLocals);
pop = new HashPartitionMergeExchangePOperator(orderColumns, varList, domain);
propWasSet = true;
}
}
if (!propWasSet) {
pop = new HashPartitionExchangePOperator(varList, domain);
}
break;
}
case ORDERED_PARTITIONED:
{
pop = new RangePartitionExchangePOperator(((OrderedPartitionedProperty) pp).getOrderColumns(), domain, null);
break;
}
case BROADCAST:
{
pop = new BroadcastExchangePOperator(domain);
break;
}
case RANDOM:
{
RandomPartitioningProperty rpp = (RandomPartitioningProperty) pp;
INodeDomain nd = rpp.getNodeDomain();
pop = new RandomPartitionExchangePOperator(nd);
break;
}
default:
{
throw new NotImplementedException("Enforcer for " + pp.getPartitioningType() + " partitioning type has not been implemented.");
}
}
Mutable<ILogicalOperator> ci = op.getInputs().get(i);
ExchangeOperator exchg = new ExchangeOperator();
exchg.setPhysicalOperator(pop);
setNewOp(ci, exchg, context);
exchg.setExecutionMode(AbstractLogicalOperator.ExecutionMode.PARTITIONED);
OperatorPropertiesUtil.computeSchemaAndPropertiesRecIfNull(exchg, context);
context.computeAndSetTypeEnvironmentForOperator(exchg);
if (AlgebricksConfig.DEBUG) {
AlgebricksConfig.ALGEBRICKS_LOGGER.fine(">>>> Added partitioning enforcer " + exchg.getPhysicalOperator() + ".\n");
printOp((AbstractLogicalOperator) op);
}
}
}
Aggregations