use of org.apache.flink.table.delegation.Executor in project flink by apache.
the class ExecutionContext method createTableEnvironment.
// ------------------------------------------------------------------------------------------------------------------
// Helper to create Table Environment
// ------------------------------------------------------------------------------------------------------------------
private StreamTableEnvironment createTableEnvironment() {
// checks the value of RUNTIME_MODE
EnvironmentSettings settings = EnvironmentSettings.fromConfiguration(flinkConfig);
if (!settings.isBlinkPlanner()) {
throw new TableException("The old planner is not supported anymore. Please update to new default planner.");
}
TableConfig tableConfig = new TableConfig();
tableConfig.addConfiguration(flinkConfig);
StreamExecutionEnvironment streamExecEnv = createStreamExecutionEnvironment();
final Executor executor = lookupExecutor(settings.getExecutor(), streamExecEnv);
return createStreamTableEnvironment(streamExecEnv, settings, tableConfig, executor, sessionState.catalogManager, sessionState.moduleManager, sessionState.functionCatalog, classLoader);
}
use of org.apache.flink.table.delegation.Executor in project flink by apache.
the class DefaultExecutorTest method testDefaultBatchProperties.
@Test
public void testDefaultBatchProperties() {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
final Executor executor = new DefaultExecutor(env);
final List<Transformation<?>> dummyTransformations = Collections.singletonList(env.fromElements(1, 2, 3).addSink(new DiscardingSink<>()).getTransformation());
final Configuration configuration = new Configuration();
configuration.set(ExecutionOptions.RUNTIME_MODE, RuntimeExecutionMode.BATCH);
final StreamGraph streamGraph = (StreamGraph) executor.createPipeline(dummyTransformations, configuration, "Default Name");
assertTrue(streamGraph.getExecutionConfig().isObjectReuseEnabled());
assertEquals(0, streamGraph.getExecutionConfig().getLatencyTrackingInterval());
assertTrue(streamGraph.isChainingEnabled());
assertFalse(streamGraph.isAllVerticesInSameSlotSharingGroupByDefault());
assertFalse(streamGraph.getCheckpointConfig().isCheckpointingEnabled());
assertEquals(GlobalStreamExchangeMode.ALL_EDGES_BLOCKING, streamGraph.getGlobalStreamExchangeMode());
}
use of org.apache.flink.table.delegation.Executor in project zeppelin by apache.
the class Flink113Shims method createPlannerAndExecutor.
@Override
public ImmutablePair<Object, Object> createPlannerAndExecutor(ClassLoader classLoader, Object environmentSettings, Object sEnv, Object tableConfig, Object functionCatalog, Object catalogManager) {
EnvironmentSettings settings = (EnvironmentSettings) environmentSettings;
Executor executor = (Executor) lookupExecutor(classLoader, settings, sEnv);
Map<String, String> plannerProperties = settings.toPlannerProperties();
Planner planner = ComponentFactoryService.find(PlannerFactory.class, plannerProperties).create(plannerProperties, executor, (TableConfig) tableConfig, (FunctionCatalog) functionCatalog, (CatalogManager) catalogManager);
return ImmutablePair.of(planner, executor);
}
use of org.apache.flink.table.delegation.Executor in project zeppelin by apache.
the class Flink112Shims method createPlannerAndExecutor.
@Override
public ImmutablePair<Object, Object> createPlannerAndExecutor(ClassLoader classLoader, Object environmentSettings, Object sEnv, Object tableConfig, Object functionCatalog, Object catalogManager) {
EnvironmentSettings settings = (EnvironmentSettings) environmentSettings;
Executor executor = (Executor) lookupExecutor(classLoader, settings, sEnv);
Map<String, String> plannerProperties = settings.toPlannerProperties();
Planner planner = ComponentFactoryService.find(PlannerFactory.class, plannerProperties).create(plannerProperties, executor, (TableConfig) tableConfig, (FunctionCatalog) functionCatalog, (CatalogManager) catalogManager);
return ImmutablePair.of(planner, executor);
}
use of org.apache.flink.table.delegation.Executor in project zeppelin by apache.
the class TableEnvFactory method createJavaFlinkStreamTableEnvironment.
public TableEnvironment createJavaFlinkStreamTableEnvironment(EnvironmentSettings settings, ClassLoader classLoader) {
try {
ImmutablePair<Object, Object> pair = flinkShims.createPlannerAndExecutor(classLoader, settings, senv.getJavaEnv(), oldPlannerBatchTableConfig, functionCatalog, catalogManager);
Planner planner = (Planner) pair.left;
Executor executor = (Executor) pair.right;
Class clazz = Class.forName("org.apache.flink.table.api.bridge.java.internal.StreamTableEnvironmentImpl");
try {
Constructor constructor = clazz.getConstructor(CatalogManager.class, ModuleManager.class, FunctionCatalog.class, TableConfig.class, org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.class, Planner.class, Executor.class, boolean.class);
return (TableEnvironment) constructor.newInstance(oldPlannerCatalogManager, moduleManager, oldPlannerFunctionCatalog, oldPlannerStreamTableConfig, senv.getJavaEnv(), planner, executor, settings.isStreamingMode());
} catch (NoSuchMethodException e) {
// Flink 1.11.1 change the constructor signature, FLINK-18419
Constructor constructor = clazz.getConstructor(CatalogManager.class, ModuleManager.class, FunctionCatalog.class, TableConfig.class, org.apache.flink.streaming.api.environment.StreamExecutionEnvironment.class, Planner.class, Executor.class, boolean.class, ClassLoader.class);
return (TableEnvironment) constructor.newInstance(oldPlannerCatalogManager, moduleManager, oldPlannerFunctionCatalog, oldPlannerStreamTableConfig, senv.getJavaEnv(), planner, executor, settings.isStreamingMode(), classLoader);
}
} catch (Exception e) {
throw new TableException("Fail to createJavaFlinkStreamTableEnvironment", e);
}
}
Aggregations