use of org.apache.flink.table.runtime.operators.join.FlinkJoinType in project flink by apache.
the class BatchExecHashJoin method translateToPlanInternal.
@Override
@SuppressWarnings("unchecked")
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
ExecEdge leftInputEdge = getInputEdges().get(0);
ExecEdge rightInputEdge = getInputEdges().get(1);
Transformation<RowData> leftInputTransform = (Transformation<RowData>) leftInputEdge.translateToPlan(planner);
Transformation<RowData> rightInputTransform = (Transformation<RowData>) rightInputEdge.translateToPlan(planner);
// get input types
RowType leftType = (RowType) leftInputEdge.getOutputType();
RowType rightType = (RowType) rightInputEdge.getOutputType();
JoinUtil.validateJoinSpec(joinSpec, leftType, rightType, false);
int[] leftKeys = joinSpec.getLeftKeys();
int[] rightKeys = joinSpec.getRightKeys();
LogicalType[] keyFieldTypes = IntStream.of(leftKeys).mapToObj(leftType::getTypeAt).toArray(LogicalType[]::new);
RowType keyType = RowType.of(keyFieldTypes);
GeneratedJoinCondition condFunc = JoinUtil.generateConditionFunction(config.getTableConfig(), joinSpec.getNonEquiCondition().orElse(null), leftType, rightType);
// projection for equals
GeneratedProjection leftProj = ProjectionCodeGenerator.generateProjection(new CodeGeneratorContext(config.getTableConfig()), "HashJoinLeftProjection", leftType, keyType, leftKeys);
GeneratedProjection rightProj = ProjectionCodeGenerator.generateProjection(new CodeGeneratorContext(config.getTableConfig()), "HashJoinRightProjection", rightType, keyType, rightKeys);
Transformation<RowData> buildTransform;
Transformation<RowData> probeTransform;
GeneratedProjection buildProj;
GeneratedProjection probeProj;
int[] buildKeys;
int[] probeKeys;
RowType buildType;
RowType probeType;
int buildRowSize;
long buildRowCount;
long probeRowCount;
boolean reverseJoin = !leftIsBuild;
if (leftIsBuild) {
buildTransform = leftInputTransform;
buildProj = leftProj;
buildType = leftType;
buildRowSize = estimatedLeftAvgRowSize;
buildRowCount = estimatedLeftRowCount;
buildKeys = leftKeys;
probeTransform = rightInputTransform;
probeProj = rightProj;
probeType = rightType;
probeRowCount = estimatedLeftRowCount;
probeKeys = rightKeys;
} else {
buildTransform = rightInputTransform;
buildProj = rightProj;
buildType = rightType;
buildRowSize = estimatedRightAvgRowSize;
buildRowCount = estimatedRightRowCount;
buildKeys = rightKeys;
probeTransform = leftInputTransform;
probeProj = leftProj;
probeType = leftType;
probeRowCount = estimatedLeftRowCount;
probeKeys = leftKeys;
}
// operator
StreamOperatorFactory<RowData> operator;
FlinkJoinType joinType = joinSpec.getJoinType();
HashJoinType hashJoinType = HashJoinType.of(leftIsBuild, joinType.isLeftOuter(), joinType.isRightOuter(), joinType == FlinkJoinType.SEMI, joinType == FlinkJoinType.ANTI);
if (LongHashJoinGenerator.support(hashJoinType, keyType, joinSpec.getFilterNulls())) {
operator = LongHashJoinGenerator.gen(config.getTableConfig(), hashJoinType, keyType, buildType, probeType, buildKeys, probeKeys, buildRowSize, buildRowCount, reverseJoin, condFunc);
} else {
operator = SimpleOperatorFactory.of(HashJoinOperator.newHashJoinOperator(hashJoinType, condFunc, reverseJoin, joinSpec.getFilterNulls(), buildProj, probeProj, tryDistinctBuildRow, buildRowSize, buildRowCount, probeRowCount, keyType));
}
long managedMemory = config.get(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_HASH_JOIN_MEMORY).getBytes();
return ExecNodeUtil.createTwoInputTransformation(buildTransform, probeTransform, createTransformationName(config), createTransformationDescription(config), operator, InternalTypeInfo.of(getOutputType()), probeTransform.getParallelism(), managedMemory);
}
use of org.apache.flink.table.runtime.operators.join.FlinkJoinType in project flink by apache.
the class StreamExecJoin method translateToPlanInternal.
@Override
@SuppressWarnings("unchecked")
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge leftInputEdge = getInputEdges().get(0);
final ExecEdge rightInputEdge = getInputEdges().get(1);
final Transformation<RowData> leftTransform = (Transformation<RowData>) leftInputEdge.translateToPlan(planner);
final Transformation<RowData> rightTransform = (Transformation<RowData>) rightInputEdge.translateToPlan(planner);
final RowType leftType = (RowType) leftInputEdge.getOutputType();
final RowType rightType = (RowType) rightInputEdge.getOutputType();
JoinUtil.validateJoinSpec(joinSpec, leftType, rightType, true);
final int[] leftJoinKey = joinSpec.getLeftKeys();
final int[] rightJoinKey = joinSpec.getRightKeys();
final InternalTypeInfo<RowData> leftTypeInfo = InternalTypeInfo.of(leftType);
final JoinInputSideSpec leftInputSpec = JoinUtil.analyzeJoinInput(leftTypeInfo, leftJoinKey, leftUniqueKeys);
final InternalTypeInfo<RowData> rightTypeInfo = InternalTypeInfo.of(rightType);
final JoinInputSideSpec rightInputSpec = JoinUtil.analyzeJoinInput(rightTypeInfo, rightJoinKey, rightUniqueKeys);
GeneratedJoinCondition generatedCondition = JoinUtil.generateConditionFunction(config.getTableConfig(), joinSpec, leftType, rightType);
long minRetentionTime = config.getStateRetentionTime();
AbstractStreamingJoinOperator operator;
FlinkJoinType joinType = joinSpec.getJoinType();
if (joinType == FlinkJoinType.ANTI || joinType == FlinkJoinType.SEMI) {
operator = new StreamingSemiAntiJoinOperator(joinType == FlinkJoinType.ANTI, leftTypeInfo, rightTypeInfo, generatedCondition, leftInputSpec, rightInputSpec, joinSpec.getFilterNulls(), minRetentionTime);
} else {
boolean leftIsOuter = joinType == FlinkJoinType.LEFT || joinType == FlinkJoinType.FULL;
boolean rightIsOuter = joinType == FlinkJoinType.RIGHT || joinType == FlinkJoinType.FULL;
operator = new StreamingJoinOperator(leftTypeInfo, rightTypeInfo, generatedCondition, leftInputSpec, rightInputSpec, leftIsOuter, rightIsOuter, joinSpec.getFilterNulls(), minRetentionTime);
}
final RowType returnType = (RowType) getOutputType();
final TwoInputTransformation<RowData, RowData, RowData> transform = ExecNodeUtil.createTwoInputTransformation(leftTransform, rightTransform, createTransformationMeta(JOIN_TRANSFORMATION, config), operator, InternalTypeInfo.of(returnType), leftTransform.getParallelism());
// set KeyType and Selector for state
RowDataKeySelector leftSelect = KeySelectorUtil.getRowDataSelector(leftJoinKey, leftTypeInfo);
RowDataKeySelector rightSelect = KeySelectorUtil.getRowDataSelector(rightJoinKey, rightTypeInfo);
transform.setStateKeySelectors(leftSelect, rightSelect);
transform.setStateKeyType(leftSelect.getProducedType());
return transform;
}
use of org.apache.flink.table.runtime.operators.join.FlinkJoinType in project flink by apache.
the class StreamExecTemporalJoin method translateToPlanInternal.
@Override
@SuppressWarnings("unchecked")
protected Transformation<RowData> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
ExecEdge leftInputEdge = getInputEdges().get(0);
ExecEdge rightInputEdge = getInputEdges().get(1);
RowType leftInputType = (RowType) leftInputEdge.getOutputType();
RowType rightInputType = (RowType) rightInputEdge.getOutputType();
JoinUtil.validateJoinSpec(joinSpec, leftInputType, rightInputType, true);
FlinkJoinType joinType = joinSpec.getJoinType();
if (isTemporalFunctionJoin) {
if (joinType != FlinkJoinType.INNER) {
throw new ValidationException("Temporal table function join currently only support INNER JOIN, " + "but was " + joinType + " JOIN.");
}
} else {
if (joinType != FlinkJoinType.LEFT && joinType != FlinkJoinType.INNER) {
throw new TableException("Temporal table join currently only support INNER JOIN and LEFT JOIN, " + "but was " + joinType + " JOIN.");
}
}
RowType returnType = (RowType) getOutputType();
TwoInputStreamOperator<RowData, RowData, RowData> joinOperator = getJoinOperator(config, leftInputType, rightInputType);
Transformation<RowData> leftTransform = (Transformation<RowData>) leftInputEdge.translateToPlan(planner);
Transformation<RowData> rightTransform = (Transformation<RowData>) rightInputEdge.translateToPlan(planner);
TwoInputTransformation<RowData, RowData, RowData> ret = ExecNodeUtil.createTwoInputTransformation(leftTransform, rightTransform, createTransformationMeta(TEMPORAL_JOIN_TRANSFORMATION, config), joinOperator, InternalTypeInfo.of(returnType), leftTransform.getParallelism());
// set KeyType and Selector for state
RowDataKeySelector leftKeySelector = getLeftKeySelector(leftInputType);
RowDataKeySelector rightKeySelector = getRightKeySelector(rightInputType);
ret.setStateKeySelectors(leftKeySelector, rightKeySelector);
ret.setStateKeyType(leftKeySelector.getProducedType());
return ret;
}
Aggregations