use of org.apache.flink.configuration.ConfigOption in project zeppelin by apache.
the class FlinkSqlInterpreter method runSqlList.
private InterpreterResult runSqlList(String st, InterpreterContext context) {
try {
boolean runAsOne = Boolean.parseBoolean(context.getStringLocalProperty("runAsOne", "false"));
List<String> sqls = sqlSplitter.splitSql(st).stream().map(String::trim).collect(Collectors.toList());
boolean isFirstInsert = true;
boolean hasInsert = false;
for (String sql : sqls) {
Optional<SqlCommandParser.SqlCommandCall> sqlCommand = sqlCommandParser.parse(sql);
if (!sqlCommand.isPresent()) {
try {
context.out.write("%text Invalid Sql statement: " + sql + "\n");
context.out.write(flinkInterpreter.getFlinkShims().sqlHelp());
} catch (IOException e) {
return new InterpreterResult(InterpreterResult.Code.ERROR, e.toString());
}
return new InterpreterResult(InterpreterResult.Code.ERROR);
}
try {
if (sqlCommand.get().command == SqlCommand.INSERT_INTO || sqlCommand.get().command == SqlCommand.INSERT_OVERWRITE) {
hasInsert = true;
if (isFirstInsert && runAsOne) {
flinkInterpreter.getFlinkShims().startMultipleInsert(tbenv, context);
isFirstInsert = false;
}
}
callCommand(sqlCommand.get(), context);
context.out.flush();
} catch (Throwable e) {
LOGGER.error("Fail to run sql:" + sql, e);
try {
context.out.write("%text Fail to run sql command: " + sql + "\n" + ExceptionUtils.getStackTrace(e) + "\n");
} catch (IOException ex) {
LOGGER.warn("Unexpected exception:", ex);
return new InterpreterResult(InterpreterResult.Code.ERROR, ExceptionUtils.getStackTrace(e));
}
return new InterpreterResult(InterpreterResult.Code.ERROR);
}
}
if (runAsOne && hasInsert) {
try {
lock.lock();
String jobName = context.getStringLocalProperty("jobName", st);
if (flinkInterpreter.getFlinkShims().executeMultipleInsertInto(jobName, this.tbenv, context)) {
context.out.write("Insertion successfully.\n");
}
} catch (Exception e) {
LOGGER.error("Fail to execute sql as one job", e);
return new InterpreterResult(InterpreterResult.Code.ERROR, ExceptionUtils.getStackTrace(e));
} finally {
if (lock.isHeldByCurrentThread()) {
lock.unlock();
}
}
}
} catch (Exception e) {
LOGGER.error("Fail to execute sql", e);
return new InterpreterResult(InterpreterResult.Code.ERROR, ExceptionUtils.getStackTrace(e));
} finally {
// reset parallelism
this.tbenv.getConfig().getConfiguration().set(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, defaultSqlParallelism);
// reset table config
for (ConfigOption configOption : tableConfigOptions.values()) {
// some may has no default value, e.g. ExecutionConfigOptions#TABLE_EXEC_DISABLED_OPERATORS
if (configOption.defaultValue() != null) {
this.tbenv.getConfig().getConfiguration().set(configOption, configOption.defaultValue());
}
}
this.tbenv.getConfig().getConfiguration().addAll(flinkInterpreter.getFlinkConfiguration());
}
return new InterpreterResult(InterpreterResult.Code.SUCCESS);
}
use of org.apache.flink.configuration.ConfigOption in project zeppelin by apache.
the class Flink114Shims method extractConfigOptions.
private Map<String, ConfigOption> extractConfigOptions(Class clazz) {
Map<String, ConfigOption> configOptions = new HashMap();
Field[] fields = clazz.getDeclaredFields();
for (Field field : fields) {
if (field.getType().isAssignableFrom(ConfigOption.class)) {
try {
ConfigOption configOption = (ConfigOption) field.get(ConfigOption.class);
configOptions.put(configOption.key(), configOption);
} catch (Throwable e) {
LOGGER.warn("Fail to get ConfigOption", e);
}
}
}
return configOptions;
}
use of org.apache.flink.configuration.ConfigOption in project zeppelin by apache.
the class Flink112Shims method extractConfigOptions.
private Map<String, ConfigOption> extractConfigOptions(Class clazz) {
Map<String, ConfigOption> configOptions = new HashMap();
Field[] fields = clazz.getDeclaredFields();
for (Field field : fields) {
if (field.getType().isAssignableFrom(ConfigOption.class)) {
try {
ConfigOption configOption = (ConfigOption) field.get(ConfigOption.class);
configOptions.put(configOption.key(), configOption);
} catch (Throwable e) {
LOGGER.warn("Fail to get ConfigOption", e);
}
}
}
return configOptions;
}
use of org.apache.flink.configuration.ConfigOption in project flink by apache.
the class TaskExecutorResourceUtilsTest method setAllRequiredOptionsExceptOne.
private static Configuration setAllRequiredOptionsExceptOne(ConfigOption<?> optionToNotSet) {
Configuration configuration = new Configuration();
if (!TaskManagerOptions.CPU_CORES.equals(optionToNotSet)) {
configuration.set(TaskManagerOptions.CPU_CORES, 1.0);
}
// skip network to exclude min/max mismatch config failure
MemorySize network = MemorySize.ofMebiBytes(3);
configuration.set(TaskManagerOptions.NETWORK_MEMORY_MIN, network);
configuration.set(TaskManagerOptions.NETWORK_MEMORY_MAX, network);
// noinspection unchecked
TaskExecutorResourceUtils.CONFIG_OPTIONS.stream().filter(option -> !option.equals(TaskManagerOptions.CPU_CORES)).filter(option -> !option.equals(optionToNotSet)).forEach(option -> configuration.set((ConfigOption<MemorySize>) option, MemorySize.ofMebiBytes(1)));
return configuration;
}
use of org.apache.flink.configuration.ConfigOption in project flink by apache.
the class FactoryUtilTest method testRequiredPlaceholderOption.
@Test
public void testRequiredPlaceholderOption() {
final Set<ConfigOption<?>> requiredOptions = new HashSet<>();
requiredOptions.add(ConfigOptions.key("fields.#.min").intType().noDefaultValue());
requiredOptions.add(ConfigOptions.key("no.placeholder.anymore").intType().noDefaultValue().withFallbackKeys("old.fields.#.min"));
FactoryUtil.validateFactoryOptions(requiredOptions, new HashSet<>(), new Configuration());
}
Aggregations