use of org.apache.flink.table.api.TableConfig in project flink by apache.
the class StreamTableEnvironment method create.
/**
* Creates a table environment that is the entry point and central context for creating Table
* and SQL API programs that integrate with the Java-specific {@link DataStream} API.
*
* <p>It is unified for bounded and unbounded data processing.
*
* <p>A stream table environment is responsible for:
*
* <ul>
* <li>Convert a {@link DataStream} into {@link Table} and vice-versa.
* <li>Connecting to external systems.
* <li>Registering and retrieving {@link Table}s and other meta objects from a catalog.
* <li>Executing SQL statements.
* <li>Offering further configuration options.
* </ul>
*
* <p>Note: If you don't intend to use the {@link DataStream} API, {@link TableEnvironment} is
* meant for pure table programs.
*
* @param executionEnvironment The Java {@link StreamExecutionEnvironment} of the {@link
* TableEnvironment}.
* @param settings The environment settings used to instantiate the {@link TableEnvironment}.
*/
static StreamTableEnvironment create(StreamExecutionEnvironment executionEnvironment, EnvironmentSettings settings) {
TableConfig tableConfig = new TableConfig();
tableConfig.addConfiguration(settings.toConfiguration());
return StreamTableEnvironmentImpl.create(executionEnvironment, settings, tableConfig);
}
use of org.apache.flink.table.api.TableConfig in project flink by apache.
the class LookupKeySerdeTest method testLookupKey.
@Test
public void testLookupKey() throws IOException {
TableConfig tableConfig = TableConfig.getDefault();
ModuleManager moduleManager = new ModuleManager();
CatalogManager catalogManager = CatalogManager.newBuilder().classLoader(Thread.currentThread().getContextClassLoader()).config(tableConfig.getConfiguration()).defaultCatalog("default_catalog", new GenericInMemoryCatalog("default_db")).build();
FlinkContext flinkContext = new FlinkContextImpl(false, tableConfig, moduleManager, new FunctionCatalog(tableConfig, catalogManager, moduleManager), catalogManager, null);
SerdeContext serdeCtx = new SerdeContext(null, flinkContext, Thread.currentThread().getContextClassLoader(), FlinkTypeFactory.INSTANCE(), FlinkSqlOperatorTable.instance());
ObjectReader objectReader = JsonSerdeUtil.createObjectReader(serdeCtx);
ObjectWriter objectWriter = JsonSerdeUtil.createObjectWriter(serdeCtx);
LookupJoinUtil.LookupKey[] lookupKeys = new LookupJoinUtil.LookupKey[] { new LookupJoinUtil.ConstantLookupKey(new BigIntType(), new RexBuilder(FlinkTypeFactory.INSTANCE()).makeLiteral("a")), new LookupJoinUtil.FieldRefLookupKey(3) };
for (LookupJoinUtil.LookupKey lookupKey : lookupKeys) {
LookupJoinUtil.LookupKey result = objectReader.readValue(objectWriter.writeValueAsString(lookupKey), LookupJoinUtil.LookupKey.class);
assertEquals(lookupKey, result);
}
}
use of org.apache.flink.table.api.TableConfig in project flink by apache.
the class TemporalTableSourceSpecSerdeTest method testTemporalTableSourceSpecSerde.
@ParameterizedTest
@MethodSource("testTemporalTableSourceSpecSerde")
public void testTemporalTableSourceSpecSerde(TemporalTableSourceSpec spec) throws IOException {
CatalogManager catalogManager = CatalogManagerMocks.createEmptyCatalogManager();
catalogManager.createTemporaryTable(spec.getTableSourceSpec().getContextResolvedTable().getResolvedTable(), spec.getTableSourceSpec().getContextResolvedTable().getIdentifier(), false);
SerdeContext serdeCtx = JsonSerdeTestUtil.configuredSerdeContext(catalogManager, new TableConfig());
String json = JsonSerdeTestUtil.toJson(serdeCtx, spec);
TemporalTableSourceSpec actual = JsonSerdeTestUtil.toObject(serdeCtx, json, TemporalTableSourceSpec.class);
assertThat(actual.getTableSourceSpec().getContextResolvedTable()).isEqualTo(spec.getTableSourceSpec().getContextResolvedTable());
assertThat(actual.getTableSourceSpec().getSourceAbilities()).isEqualTo(spec.getTableSourceSpec().getSourceAbilities());
assertThat(actual.getOutputType()).isEqualTo(spec.getOutputType());
}
use of org.apache.flink.table.api.TableConfig in project flink by apache.
the class PushLocalAggIntoScanRuleBase method canPushDown.
protected boolean canPushDown(RelOptRuleCall call, BatchPhysicalGroupAggregateBase aggregate, BatchPhysicalTableSourceScan tableSourceScan) {
TableConfig tableConfig = ShortcutUtils.unwrapContext(call.getPlanner()).getTableConfig();
if (!tableConfig.getConfiguration().getBoolean(OptimizerConfigOptions.TABLE_OPTIMIZER_SOURCE_AGGREGATE_PUSHDOWN_ENABLED)) {
return false;
}
if (aggregate.isFinal() || aggregate.getAggCallList().isEmpty()) {
return false;
}
List<AggregateCall> aggCallList = JavaScalaConversionUtil.toJava(aggregate.getAggCallList());
for (AggregateCall aggCall : aggCallList) {
if (aggCall.isDistinct() || aggCall.isApproximate() || aggCall.getArgList().size() > 1 || aggCall.hasFilter() || !aggCall.getCollation().getFieldCollations().isEmpty()) {
return false;
}
}
TableSourceTable tableSourceTable = tableSourceScan.tableSourceTable();
// we can not push aggregates twice
return tableSourceTable != null && tableSourceTable.tableSource() instanceof SupportsAggregatePushDown && Arrays.stream(tableSourceTable.abilitySpecs()).noneMatch(spec -> spec instanceof AggregatePushDownSpec);
}
use of org.apache.flink.table.api.TableConfig in project flink by apache.
the class TwoStageOptimizedWindowAggregateRule method matches.
@Override
public boolean matches(RelOptRuleCall call) {
final StreamPhysicalWindowAggregate windowAgg = call.rel(0);
final RelNode realInput = call.rel(2);
final TableConfig tableConfig = unwrapContext(call.getPlanner()).getTableConfig();
final WindowingStrategy windowing = windowAgg.windowing();
// the two-phase optimization must be enabled
if (getAggPhaseStrategy(tableConfig) == AggregatePhaseStrategy.ONE_PHASE) {
return false;
}
// otherwise the processing-time can't be materialized in a single node
if (!windowing.isRowtime()) {
return false;
}
// all aggregate function should support merge() method
if (!AggregateUtil.doAllSupportPartialMerge(windowAgg.aggInfoList().aggInfos())) {
return false;
}
return !isInputSatisfyRequiredDistribution(realInput, windowAgg.grouping());
}
Aggregations