use of org.apache.flink.configuration.IllegalConfigurationException in project flink by apache.
the class FsStateBackendFactory method createFromConfig.
@Override
public FsStateBackend createFromConfig(Configuration config) throws IllegalConfigurationException {
final String checkpointDirURI = config.getString(CHECKPOINT_DIRECTORY_URI_CONF_KEY, null);
final int memoryThreshold = config.getInteger(MEMORY_THRESHOLD_CONF_KEY, FsStateBackend.DEFAULT_FILE_STATE_THRESHOLD);
if (checkpointDirURI == null) {
throw new IllegalConfigurationException("Cannot create the file system state backend: The configuration does not specify the " + "checkpoint directory '" + CHECKPOINT_DIRECTORY_URI_CONF_KEY + '\'');
}
try {
Path path = new Path(checkpointDirURI);
return new FsStateBackend(path.toUri(), memoryThreshold);
} catch (IOException | IllegalArgumentException e) {
throw new IllegalConfigurationException("Invalid configuration for the state backend", e);
}
}
use of org.apache.flink.configuration.IllegalConfigurationException in project flink by apache.
the class ExecutionGraphBuilder method buildGraph.
/**
* Builds the ExecutionGraph from the JobGraph.
* If a prior execution graph exists, the JobGraph will be attached. If no prior execution
* graph exists, then the JobGraph will become attach to a new empty execution graph.
*/
public static ExecutionGraph buildGraph(@Nullable ExecutionGraph prior, JobGraph jobGraph, Configuration jobManagerConfig, ScheduledExecutorService futureExecutor, Executor ioExecutor, SlotProvider slotProvider, ClassLoader classLoader, CheckpointRecoveryFactory recoveryFactory, Time timeout, RestartStrategy restartStrategy, MetricGroup metrics, int parallelismForAutoMax, Logger log) throws JobExecutionException, JobException {
checkNotNull(jobGraph, "job graph cannot be null");
final String jobName = jobGraph.getName();
final JobID jobId = jobGraph.getJobID();
// create a new execution graph, if none exists so far
final ExecutionGraph executionGraph;
try {
executionGraph = (prior != null) ? prior : new ExecutionGraph(futureExecutor, ioExecutor, jobId, jobName, jobGraph.getJobConfiguration(), jobGraph.getSerializedExecutionConfig(), timeout, restartStrategy, jobGraph.getUserJarBlobKeys(), jobGraph.getClasspaths(), slotProvider, classLoader, metrics);
} catch (IOException e) {
throw new JobException("Could not create the execution graph.", e);
}
// set the basic properties
executionGraph.setScheduleMode(jobGraph.getScheduleMode());
executionGraph.setQueuedSchedulingAllowed(jobGraph.getAllowQueuedScheduling());
try {
executionGraph.setJsonPlan(JsonPlanGenerator.generatePlan(jobGraph));
} catch (Throwable t) {
log.warn("Cannot create JSON plan for job", t);
// give the graph an empty plan
executionGraph.setJsonPlan("{}");
}
// initialize the vertices that have a master initialization hook
// file output formats create directories here, input formats create splits
final long initMasterStart = System.nanoTime();
log.info("Running initialization on master for job {} ({}).", jobName, jobId);
for (JobVertex vertex : jobGraph.getVertices()) {
String executableClass = vertex.getInvokableClassName();
if (executableClass == null || executableClass.isEmpty()) {
throw new JobSubmissionException(jobId, "The vertex " + vertex.getID() + " (" + vertex.getName() + ") has no invokable class.");
}
if (vertex.getParallelism() == ExecutionConfig.PARALLELISM_AUTO_MAX) {
vertex.setParallelism(parallelismForAutoMax);
}
try {
vertex.initializeOnMaster(classLoader);
} catch (Throwable t) {
throw new JobExecutionException(jobId, "Cannot initialize task '" + vertex.getName() + "': " + t.getMessage(), t);
}
}
log.info("Successfully ran initialization on master in {} ms.", (System.nanoTime() - initMasterStart) / 1_000_000);
// topologically sort the job vertices and attach the graph to the existing one
List<JobVertex> sortedTopology = jobGraph.getVerticesSortedTopologicallyFromSources();
if (log.isDebugEnabled()) {
log.debug("Adding {} vertices from job graph {} ({}).", sortedTopology.size(), jobName, jobId);
}
executionGraph.attachJobGraph(sortedTopology);
if (log.isDebugEnabled()) {
log.debug("Successfully created execution graph from job graph {} ({}).", jobName, jobId);
}
// configure the state checkpointing
JobSnapshottingSettings snapshotSettings = jobGraph.getSnapshotSettings();
if (snapshotSettings != null) {
List<ExecutionJobVertex> triggerVertices = idToVertex(snapshotSettings.getVerticesToTrigger(), executionGraph);
List<ExecutionJobVertex> ackVertices = idToVertex(snapshotSettings.getVerticesToAcknowledge(), executionGraph);
List<ExecutionJobVertex> confirmVertices = idToVertex(snapshotSettings.getVerticesToConfirm(), executionGraph);
CompletedCheckpointStore completedCheckpoints;
CheckpointIDCounter checkpointIdCounter;
try {
int maxNumberOfCheckpointsToRetain = jobManagerConfig.getInteger(CoreOptions.MAX_RETAINED_CHECKPOINTS);
if (maxNumberOfCheckpointsToRetain <= 0) {
// warning and use 1 as the default value if the setting in
// state.checkpoints.max-retained-checkpoints is not greater than 0.
log.warn("The setting for '{} : {}' is invalid. Using default value of {}", CoreOptions.MAX_RETAINED_CHECKPOINTS.key(), maxNumberOfCheckpointsToRetain, CoreOptions.MAX_RETAINED_CHECKPOINTS.defaultValue());
maxNumberOfCheckpointsToRetain = CoreOptions.MAX_RETAINED_CHECKPOINTS.defaultValue();
}
completedCheckpoints = recoveryFactory.createCheckpointStore(jobId, maxNumberOfCheckpointsToRetain, classLoader);
checkpointIdCounter = recoveryFactory.createCheckpointIDCounter(jobId);
} catch (Exception e) {
throw new JobExecutionException(jobId, "Failed to initialize high-availability checkpoint handler", e);
}
// Maximum number of remembered checkpoints
int historySize = jobManagerConfig.getInteger(ConfigConstants.JOB_MANAGER_WEB_CHECKPOINTS_HISTORY_SIZE, ConfigConstants.DEFAULT_JOB_MANAGER_WEB_CHECKPOINTS_HISTORY_SIZE);
CheckpointStatsTracker checkpointStatsTracker = new CheckpointStatsTracker(historySize, ackVertices, snapshotSettings, metrics);
// The default directory for externalized checkpoints
String externalizedCheckpointsDir = jobManagerConfig.getString(ConfigConstants.CHECKPOINTS_DIRECTORY_KEY, null);
// load the state backend for checkpoint metadata.
// if specified in the application, use from there, otherwise load from configuration
final StateBackend metadataBackend;
final StateBackend applicationConfiguredBackend = snapshotSettings.getDefaultStateBackend();
if (applicationConfiguredBackend != null) {
metadataBackend = applicationConfiguredBackend;
log.info("Using application-defined state backend for checkpoint/savepoint metadata: {}.", applicationConfiguredBackend);
} else {
try {
metadataBackend = AbstractStateBackend.loadStateBackendFromConfigOrCreateDefault(jobManagerConfig, classLoader, log);
} catch (IllegalConfigurationException | IOException | DynamicCodeLoadingException e) {
throw new JobExecutionException(jobId, "Could not instantiate configured state backend", e);
}
}
executionGraph.enableCheckpointing(snapshotSettings.getCheckpointInterval(), snapshotSettings.getCheckpointTimeout(), snapshotSettings.getMinPauseBetweenCheckpoints(), snapshotSettings.getMaxConcurrentCheckpoints(), snapshotSettings.getExternalizedCheckpointSettings(), triggerVertices, ackVertices, confirmVertices, checkpointIdCounter, completedCheckpoints, externalizedCheckpointsDir, metadataBackend, checkpointStatsTracker);
}
return executionGraph;
}
use of org.apache.flink.configuration.IllegalConfigurationException in project flink by apache.
the class ZookeeperHaServices method createBlobStore.
/**
* Creates the BLOB store in which BLOBs are stored in a highly-available
* fashion.
*
* @param configuration configuration to extract the storage path from
* @return Blob store
* @throws IOException if the blob store could not be created
*/
public static BlobStore createBlobStore(final Configuration configuration) throws IOException {
String storagePath = configuration.getValue(HighAvailabilityOptions.HA_STORAGE_PATH);
if (isNullOrWhitespaceOnly(storagePath)) {
throw new IllegalConfigurationException("Configuration is missing the mandatory parameter: " + HighAvailabilityOptions.HA_STORAGE_PATH);
}
final Path path;
try {
path = new Path(storagePath);
} catch (Exception e) {
throw new IOException("Invalid path for highly available storage (" + HighAvailabilityOptions.HA_STORAGE_PATH.key() + ')', e);
}
final FileSystem fileSystem;
try {
fileSystem = path.getFileSystem();
} catch (Exception e) {
throw new IOException("Could not create FileSystem for highly available storage (" + HighAvailabilityOptions.HA_STORAGE_PATH.key() + ')', e);
}
final String clusterId = configuration.getValue(HighAvailabilityOptions.HA_CLUSTER_ID);
storagePath += "/" + clusterId;
return new FileSystemBlobStore(fileSystem, storagePath);
}
use of org.apache.flink.configuration.IllegalConfigurationException in project flink by apache.
the class TaskManagerStartupTest method testMemoryConfigWrong.
/**
* Tests that the TaskManager startup fails synchronously when the I/O directories are
* not writable.
*/
@Test
public void testMemoryConfigWrong() {
try {
Configuration cfg = new Configuration();
cfg.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, "localhost");
cfg.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, 21656);
cfg.setString(ConfigConstants.TASK_MANAGER_MEMORY_PRE_ALLOCATE_KEY, "true");
// something invalid
cfg.setInteger(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, -42);
try {
TaskManager.runTaskManager("localhost", ResourceID.generate(), 0, cfg);
fail("Should fail synchronously with an exception");
} catch (IllegalConfigurationException e) {
// splendid!
}
// something ridiculously high
final long memSize = (((long) Integer.MAX_VALUE - 1) * ConfigConstants.DEFAULT_TASK_MANAGER_MEMORY_SEGMENT_SIZE) >> 20;
cfg.setLong(ConfigConstants.TASK_MANAGER_MEMORY_SIZE_KEY, memSize);
try {
TaskManager.runTaskManager("localhost", ResourceID.generate(), 0, cfg);
fail("Should fail synchronously with an exception");
} catch (Exception e) {
// splendid!
assertTrue(e.getCause() instanceof OutOfMemoryError);
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.configuration.IllegalConfigurationException in project flink by apache.
the class MesosApplicationMasterRunner method createMesosConfig.
/**
* Loads and validates the ResourceManager Mesos configuration from the given Flink configuration.
*/
public static MesosConfiguration createMesosConfig(Configuration flinkConfig, String hostname) {
Protos.FrameworkInfo.Builder frameworkInfo = Protos.FrameworkInfo.newBuilder().setHostname(hostname);
Protos.Credential.Builder credential = null;
if (!flinkConfig.containsKey(ConfigConstants.MESOS_MASTER_URL)) {
throw new IllegalConfigurationException(ConfigConstants.MESOS_MASTER_URL + " must be configured.");
}
String masterUrl = flinkConfig.getString(ConfigConstants.MESOS_MASTER_URL, null);
Duration failoverTimeout = FiniteDuration.apply(flinkConfig.getInteger(ConfigConstants.MESOS_FAILOVER_TIMEOUT_SECONDS, ConfigConstants.DEFAULT_MESOS_FAILOVER_TIMEOUT_SECS), TimeUnit.SECONDS);
frameworkInfo.setFailoverTimeout(failoverTimeout.toSeconds());
frameworkInfo.setName(flinkConfig.getString(ConfigConstants.MESOS_RESOURCEMANAGER_FRAMEWORK_NAME, ConfigConstants.DEFAULT_MESOS_RESOURCEMANAGER_FRAMEWORK_NAME));
frameworkInfo.setRole(flinkConfig.getString(ConfigConstants.MESOS_RESOURCEMANAGER_FRAMEWORK_ROLE, ConfigConstants.DEFAULT_MESOS_RESOURCEMANAGER_FRAMEWORK_ROLE));
frameworkInfo.setUser(flinkConfig.getString(ConfigConstants.MESOS_RESOURCEMANAGER_FRAMEWORK_USER, ConfigConstants.DEFAULT_MESOS_RESOURCEMANAGER_FRAMEWORK_USER));
if (flinkConfig.containsKey(ConfigConstants.MESOS_RESOURCEMANAGER_FRAMEWORK_PRINCIPAL)) {
frameworkInfo.setPrincipal(flinkConfig.getString(ConfigConstants.MESOS_RESOURCEMANAGER_FRAMEWORK_PRINCIPAL, null));
credential = Protos.Credential.newBuilder();
credential.setPrincipal(frameworkInfo.getPrincipal());
// and thus don't set the 'secret' configuration setting
if (flinkConfig.containsKey(ConfigConstants.MESOS_RESOURCEMANAGER_FRAMEWORK_SECRET)) {
credential.setSecret(flinkConfig.getString(ConfigConstants.MESOS_RESOURCEMANAGER_FRAMEWORK_SECRET, null));
}
}
MesosConfiguration mesos = new MesosConfiguration(masterUrl, frameworkInfo, scala.Option.apply(credential));
return mesos;
}
Aggregations