use of org.apache.flink.table.planner.delegation.PlannerBase in project flink by apache.
the class HiveTableSourceITCase method testParallelismWithoutParallelismInfer.
@Test
public void testParallelismWithoutParallelismInfer() throws Exception {
final String dbName = "source_db";
final String tblName = "test_parallelism_no_infer";
TableEnvironment tEnv = TableEnvironment.create(EnvironmentSettings.inBatchMode());
tEnv.getConfig().setSqlDialect(SqlDialect.HIVE);
tEnv.registerCatalog("hive", hiveCatalog);
tEnv.useCatalog("hive");
tEnv.getConfig().getConfiguration().setBoolean(HiveOptions.TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM, false);
tEnv.executeSql("CREATE TABLE source_db.test_parallelism_no_infer " + "(`year` STRING, `value` INT) partitioned by (pt int)");
HiveTestUtils.createTextTableInserter(hiveCatalog, dbName, tblName).addRow(new Object[] { "2014", 3 }).addRow(new Object[] { "2014", 4 }).commit("pt=0");
HiveTestUtils.createTextTableInserter(hiveCatalog, dbName, tblName).addRow(new Object[] { "2015", 2 }).addRow(new Object[] { "2015", 5 }).commit("pt=1");
Table table = tEnv.sqlQuery("select * from hive.source_db.test_parallelism_no_infer limit 1");
PlannerBase planner = (PlannerBase) ((TableEnvironmentImpl) tEnv).getPlanner();
RelNode relNode = planner.optimize(TableTestUtil.toRelNode(table));
ExecNode<?> execNode = planner.translateToExecNodeGraph(toScala(Collections.singletonList(relNode))).getRootNodes().get(0);
Transformation<?> transformation = (execNode.translateToPlan(planner).getInputs().get(0)).getInputs().get(0);
// when there's no infer, should use the default parallelism
Assert.assertEquals(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM.defaultValue().intValue(), transformation.getParallelism());
}
use of org.apache.flink.table.planner.delegation.PlannerBase in project flink by apache.
the class HiveTableSourceITCase method testParallelismSettingTranslateAndAssert.
private void testParallelismSettingTranslateAndAssert(int expected, Table table, TableEnvironment tEnv) {
PlannerBase planner = (PlannerBase) ((TableEnvironmentImpl) tEnv).getPlanner();
RelNode relNode = planner.optimize(TableTestUtil.toRelNode(table));
ExecNode<?> execNode = planner.translateToExecNodeGraph(toScala(Collections.singletonList(relNode))).getRootNodes().get(0);
Transformation<?> transformation = execNode.translateToPlan(planner);
Assert.assertEquals(expected, transformation.getParallelism());
}
use of org.apache.flink.table.planner.delegation.PlannerBase in project flink by apache.
the class HiveTableSourceITCase method testParallelismOnLimitPushDown.
@Test
public void testParallelismOnLimitPushDown() throws Exception {
final String dbName = "source_db";
final String tblName = "test_parallelism_limit_pushdown";
TableEnvironment tEnv = createTableEnv();
tEnv.getConfig().getConfiguration().setBoolean(HiveOptions.TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM, false);
tEnv.getConfig().getConfiguration().setInteger(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 2);
tEnv.executeSql("CREATE TABLE source_db.test_parallelism_limit_pushdown " + "(`year` STRING, `value` INT) partitioned by (pt int)");
HiveTestUtils.createTextTableInserter(hiveCatalog, dbName, tblName).addRow(new Object[] { "2014", 3 }).addRow(new Object[] { "2014", 4 }).commit("pt=0");
HiveTestUtils.createTextTableInserter(hiveCatalog, dbName, tblName).addRow(new Object[] { "2015", 2 }).addRow(new Object[] { "2015", 5 }).commit("pt=1");
Table table = tEnv.sqlQuery("select * from hive.source_db.test_parallelism_limit_pushdown limit 1");
PlannerBase planner = (PlannerBase) ((TableEnvironmentImpl) tEnv).getPlanner();
RelNode relNode = planner.optimize(TableTestUtil.toRelNode(table));
ExecNode<?> execNode = planner.translateToExecNodeGraph(toScala(Collections.singletonList(relNode))).getRootNodes().get(0);
Transformation<?> transformation = (execNode.translateToPlan(planner).getInputs().get(0)).getInputs().get(0);
// when there's no infer, should use the default parallelism configured
Assert.assertEquals(2, transformation.getParallelism());
}
use of org.apache.flink.table.planner.delegation.PlannerBase in project flink by apache.
the class StreamExecSink method translateToPlanInternal.
@SuppressWarnings("unchecked")
@Override
protected Transformation<Object> translateToPlanInternal(PlannerBase planner, ExecNodeConfig config) {
final ExecEdge inputEdge = getInputEdges().get(0);
final Transformation<RowData> inputTransform = (Transformation<RowData>) inputEdge.translateToPlan(planner);
final RowType inputRowType = (RowType) inputEdge.getOutputType();
final DynamicTableSink tableSink = tableSinkSpec.getTableSink(planner.getFlinkContext());
final boolean isCollectSink = tableSink instanceof CollectDynamicSink;
final List<Integer> rowtimeFieldIndices = new ArrayList<>();
for (int i = 0; i < inputRowType.getFieldCount(); ++i) {
if (TypeCheckUtils.isRowTime(inputRowType.getTypeAt(i))) {
rowtimeFieldIndices.add(i);
}
}
final int rowtimeFieldIndex;
if (rowtimeFieldIndices.size() > 1 && !isCollectSink) {
throw new TableException(String.format("The query contains more than one rowtime attribute column [%s] for writing into table '%s'.\n" + "Please select the column that should be used as the event-time timestamp " + "for the table sink by casting all other columns to regular TIMESTAMP or TIMESTAMP_LTZ.", rowtimeFieldIndices.stream().map(i -> inputRowType.getFieldNames().get(i)).collect(Collectors.joining(", ")), tableSinkSpec.getContextResolvedTable().getIdentifier().asSummaryString()));
} else if (rowtimeFieldIndices.size() == 1) {
rowtimeFieldIndex = rowtimeFieldIndices.get(0);
} else {
rowtimeFieldIndex = -1;
}
return createSinkTransformation(planner.getExecEnv(), config, inputTransform, tableSink, rowtimeFieldIndex, upsertMaterialize);
}
use of org.apache.flink.table.planner.delegation.PlannerBase in project flink by apache.
the class CatalogStatisticsTest method testGetPartitionStatsWithSomeUnknownColumnStats.
@Test
public void testGetPartitionStatsWithSomeUnknownColumnStats() throws Exception {
TestPartitionableSourceFactory.createTemporaryTable(tEnv, "PartT", true);
createPartitionStats("A", 1);
createPartitionColumnStats("A", 1, true);
createPartitionStats("A", 2);
createPartitionColumnStats("A", 2);
RelNode t1 = ((PlannerBase) ((TableEnvironmentImpl) tEnv).getPlanner()).optimize(TableTestUtil.toRelNode(tEnv.sqlQuery("select id, name from PartT where part1 = 'A'")));
FlinkRelMetadataQuery mq = FlinkRelMetadataQuery.reuseOrCreate(t1.getCluster().getMetadataQuery());
assertEquals(200.0, mq.getRowCount(t1), 0.0);
// long type
assertNull(mq.getDistinctRowCount(t1, ImmutableBitSet.of(0), null));
assertNull(mq.getColumnNullCount(t1, 0));
assertNull(mq.getColumnInterval(t1, 0));
// string type
assertNull(mq.getDistinctRowCount(t1, ImmutableBitSet.of(1), null));
assertNull(mq.getColumnNullCount(t1, 1));
}
Aggregations