use of org.apache.flink.optimizer.dataproperties.GlobalProperties in project flink by apache.
the class SemanticPropertiesAPIToPlanTest method forwardFieldsTestMapReduce.
@Test
public void forwardFieldsTestMapReduce() {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
DataSet<Tuple3<Integer, Integer, Integer>> set = env.readCsvFile(IN_FILE).types(Integer.class, Integer.class, Integer.class);
set = set.map(new MockMapper()).withForwardedFields("*").groupBy(0).reduce(new MockReducer()).withForwardedFields("f0->f1").map(new MockMapper()).withForwardedFields("*").groupBy(1).reduce(new MockReducer()).withForwardedFields("*");
set.output(new DiscardingOutputFormat<Tuple3<Integer, Integer, Integer>>());
Plan plan = env.createProgramPlan();
OptimizedPlan oPlan = compileWithStats(plan);
oPlan.accept(new Visitor<PlanNode>() {
@Override
public boolean preVisit(PlanNode visitable) {
if (visitable instanceof SingleInputPlanNode && visitable.getProgramOperator() instanceof ReduceOperatorBase) {
for (Channel input : visitable.getInputs()) {
GlobalProperties gprops = visitable.getGlobalProperties();
LocalProperties lprops = visitable.getLocalProperties();
Assert.assertTrue("Reduce should just forward the input if it is already partitioned", input.getShipStrategy() == ShipStrategyType.FORWARD);
Assert.assertTrue("Wrong GlobalProperties on Reducer", gprops.isPartitionedOnFields(new FieldSet(1)));
Assert.assertTrue("Wrong GlobalProperties on Reducer", gprops.getPartitioning() == PartitioningProperty.HASH_PARTITIONED);
Assert.assertTrue("Wrong LocalProperties on Reducer", lprops.getGroupedFields().contains(1));
}
}
if (visitable instanceof SingleInputPlanNode && visitable.getProgramOperator() instanceof MapOperatorBase) {
for (Channel input : visitable.getInputs()) {
GlobalProperties gprops = visitable.getGlobalProperties();
LocalProperties lprops = visitable.getLocalProperties();
Assert.assertTrue("Map should just forward the input if it is already partitioned", input.getShipStrategy() == ShipStrategyType.FORWARD);
Assert.assertTrue("Wrong GlobalProperties on Mapper", gprops.isPartitionedOnFields(new FieldSet(1)));
Assert.assertTrue("Wrong GlobalProperties on Mapper", gprops.getPartitioning() == PartitioningProperty.HASH_PARTITIONED);
Assert.assertTrue("Wrong LocalProperties on Mapper", lprops.getGroupedFields().contains(1));
}
return false;
}
return true;
}
@Override
public void postVisit(PlanNode visitable) {
}
});
}
use of org.apache.flink.optimizer.dataproperties.GlobalProperties in project flink by apache.
the class PartitioningReusageTest method checkValidCoGroupInputProperties.
private void checkValidCoGroupInputProperties(DualInputPlanNode coGroup) {
GlobalProperties inProps1 = coGroup.getInput1().getGlobalProperties();
GlobalProperties inProps2 = coGroup.getInput2().getGlobalProperties();
if (inProps1.getPartitioning() == PartitioningProperty.HASH_PARTITIONED && inProps2.getPartitioning() == PartitioningProperty.HASH_PARTITIONED) {
// check that both inputs are hash partitioned on the same fields
FieldList pFields1 = inProps1.getPartitioningFields();
FieldList pFields2 = inProps2.getPartitioningFields();
assertTrue("Inputs are not the same number of fields. Input 1: " + pFields1 + ", Input 2: " + pFields2, pFields1.size() == pFields2.size());
FieldList reqPFields1 = coGroup.getKeysForInput1();
FieldList reqPFields2 = coGroup.getKeysForInput2();
for (int i = 0; i < pFields1.size(); i++) {
// get fields
int f1 = pFields1.get(i);
int f2 = pFields2.get(i);
// check that field positions in original key field list are identical
int pos1 = getPosInFieldList(f1, reqPFields1);
int pos2 = getPosInFieldList(f2, reqPFields2);
if (pos1 < 0) {
fail("Input 1 is partitioned on field " + f1 + " which is not contained in the key set " + reqPFields1);
}
if (pos2 < 0) {
fail("Input 2 is partitioned on field " + f2 + " which is not contained in the key set " + reqPFields2);
}
if (pos1 != pos2) {
fail("Inputs are not partitioned on the same key fields");
}
}
} else {
throw new UnsupportedOperationException("This method has only been implemented to check for hash partitioned coGroup inputs");
}
}
use of org.apache.flink.optimizer.dataproperties.GlobalProperties in project flink by apache.
the class TwoInputNode method instantiate.
protected void instantiate(OperatorDescriptorDual operator, Channel in1, Channel in2, List<Set<? extends NamedChannel>> broadcastPlanChannels, List<PlanNode> target, CostEstimator estimator, RequestedGlobalProperties globPropsReq1, RequestedGlobalProperties globPropsReq2, RequestedLocalProperties locPropsReq1, RequestedLocalProperties locPropsReq2) {
final PlanNode inputSource1 = in1.getSource();
final PlanNode inputSource2 = in2.getSource();
for (List<NamedChannel> broadcastChannelsCombination : Sets.cartesianProduct(broadcastPlanChannels)) {
boolean validCombination = true;
// check whether the broadcast inputs use the same plan candidate at the branching point
for (int i = 0; i < broadcastChannelsCombination.size(); i++) {
NamedChannel nc = broadcastChannelsCombination.get(i);
PlanNode bcSource = nc.getSource();
if (!(areBranchCompatible(bcSource, inputSource1) || areBranchCompatible(bcSource, inputSource2))) {
validCombination = false;
break;
}
// check branch compatibility against all other broadcast variables
for (int k = 0; k < i; k++) {
PlanNode otherBcSource = broadcastChannelsCombination.get(k).getSource();
if (!areBranchCompatible(bcSource, otherBcSource)) {
validCombination = false;
break;
}
}
}
if (!validCombination) {
continue;
}
placePipelineBreakersIfNecessary(operator.getStrategy(), in1, in2);
DualInputPlanNode node = operator.instantiate(in1, in2, this);
node.setBroadcastInputs(broadcastChannelsCombination);
SemanticProperties semPropsGlobalPropFiltering = getSemanticPropertiesForGlobalPropertyFiltering();
GlobalProperties gp1 = in1.getGlobalProperties().clone().filterBySemanticProperties(semPropsGlobalPropFiltering, 0);
GlobalProperties gp2 = in2.getGlobalProperties().clone().filterBySemanticProperties(semPropsGlobalPropFiltering, 1);
GlobalProperties combined = operator.computeGlobalProperties(gp1, gp2);
SemanticProperties semPropsLocalPropFiltering = getSemanticPropertiesForLocalPropertyFiltering();
LocalProperties lp1 = in1.getLocalProperties().clone().filterBySemanticProperties(semPropsLocalPropFiltering, 0);
LocalProperties lp2 = in2.getLocalProperties().clone().filterBySemanticProperties(semPropsLocalPropFiltering, 1);
LocalProperties locals = operator.computeLocalProperties(lp1, lp2);
node.initProperties(combined, locals);
node.updatePropertiesWithUniqueSets(getUniqueFields());
target.add(node);
}
}
use of org.apache.flink.optimizer.dataproperties.GlobalProperties in project flink by apache.
the class AbstractJoinDescriptor method computeGlobalProperties.
@Override
public GlobalProperties computeGlobalProperties(GlobalProperties in1, GlobalProperties in2) {
GlobalProperties gp = GlobalProperties.combine(in1, in2);
if (gp.getUniqueFieldCombination() != null && gp.getUniqueFieldCombination().size() > 0 && gp.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED) {
gp.setAnyPartitioning(gp.getUniqueFieldCombination().iterator().next().toFieldList());
}
gp.clearUniqueFieldCombinations();
return gp;
}
use of org.apache.flink.optimizer.dataproperties.GlobalProperties in project flink by apache.
the class CartesianProductDescriptor method computeGlobalProperties.
@Override
public GlobalProperties computeGlobalProperties(GlobalProperties in1, GlobalProperties in2) {
GlobalProperties gp = GlobalProperties.combine(in1, in2);
if (gp.getUniqueFieldCombination() != null && gp.getUniqueFieldCombination().size() > 0 && gp.getPartitioning() == PartitioningProperty.RANDOM_PARTITIONED) {
gp.setAnyPartitioning(gp.getUniqueFieldCombination().iterator().next().toFieldList());
}
gp.clearUniqueFieldCombinations();
return gp;
}
Aggregations