use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class BashJavaUtils method getJmResourceParams.
/**
* Generate and print JVM parameters of Flink Master resources as one line.
*/
@VisibleForTesting
static List<String> getJmResourceParams(Configuration configuration) {
JobManagerProcessSpec jobManagerProcessSpec = JobManagerProcessUtils.processSpecFromConfigWithNewOptionToInterpretLegacyHeap(configuration, JobManagerOptions.JVM_HEAP_MEMORY);
logMasterConfiguration(jobManagerProcessSpec);
return Arrays.asList(JobManagerProcessUtils.generateJvmParametersStr(jobManagerProcessSpec, configuration), JobManagerProcessUtils.generateDynamicConfigsStr(jobManagerProcessSpec));
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class YarnClusterDescriptor method getStagingDir.
/**
* Returns the configured remote target home directory if set, otherwise returns the default
* home directory.
*
* @param defaultFileSystem default file system used
* @return the remote target home directory
*/
@VisibleForTesting
Path getStagingDir(FileSystem defaultFileSystem) throws IOException {
final String configuredStagingDir = flinkConfiguration.getString(YarnConfigOptions.STAGING_DIRECTORY);
if (configuredStagingDir == null) {
return defaultFileSystem.getHomeDirectory();
}
FileSystem stagingDirFs = new Path(configuredStagingDir).getFileSystem(defaultFileSystem.getConf());
return stagingDirFs.makeQualified(new Path(configuredStagingDir));
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class EmbeddedRocksDBStateBackend method resetRocksDBLoadedFlag.
@VisibleForTesting
static void resetRocksDBLoadedFlag() throws Exception {
final Field initField = org.rocksdb.NativeLibraryLoader.class.getDeclaredField("initialized");
initField.setAccessible(true);
initField.setBoolean(null, false);
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class EmbeddedRocksDBStateBackend method ensureRocksDBIsLoaded.
// ------------------------------------------------------------------------
// static library loading utilities
// ------------------------------------------------------------------------
@VisibleForTesting
static void ensureRocksDBIsLoaded(String tempDirectory) throws IOException {
synchronized (EmbeddedRocksDBStateBackend.class) {
if (!rocksDbInitialized) {
final File tempDirParent = new File(tempDirectory).getAbsoluteFile();
LOG.info("Attempting to load RocksDB native library and store it under '{}'", tempDirParent);
Throwable lastException = null;
for (int attempt = 1; attempt <= ROCKSDB_LIB_LOADING_ATTEMPTS; attempt++) {
File rocksLibFolder = null;
try {
// when multiple instances of this class and RocksDB exist in different
// class loaders, then we can see the following exception:
// "java.lang.UnsatisfiedLinkError: Native Library
// /path/to/temp/dir/librocksdbjni-linux64.so
// already loaded in another class loader"
// to avoid that, we need to add a random element to the library file path
// (I know, seems like an unnecessary hack, since the JVM obviously can
// handle multiple
// instances of the same JNI library being loaded in different class
// loaders, but
// apparently not when coming from the same file path, so there we go)
rocksLibFolder = new File(tempDirParent, "rocksdb-lib-" + new AbstractID());
// make sure the temp path exists
LOG.debug("Attempting to create RocksDB native library folder {}", rocksLibFolder);
// noinspection ResultOfMethodCallIgnored
rocksLibFolder.mkdirs();
// explicitly load the JNI dependency if it has not been loaded before
NativeLibraryLoader.getInstance().loadLibrary(rocksLibFolder.getAbsolutePath());
// this initialization here should validate that the loading succeeded
RocksDB.loadLibrary();
// seems to have worked
LOG.info("Successfully loaded RocksDB native library");
rocksDbInitialized = true;
return;
} catch (Throwable t) {
lastException = t;
LOG.debug("RocksDB JNI library loading attempt {} failed", attempt, t);
// try to force RocksDB to attempt reloading the library
try {
resetRocksDBLoadedFlag();
} catch (Throwable tt) {
LOG.debug("Failed to reset 'initialized' flag in RocksDB native code loader", tt);
}
FileUtils.deleteDirectoryQuietly(rocksLibFolder);
}
}
throw new IOException("Could not load the native RocksDB library", lastException);
}
}
}
use of org.apache.flink.annotation.VisibleForTesting in project flink by apache.
the class StreamOperatorStateHandler method snapshotState.
@VisibleForTesting
void snapshotState(CheckpointedStreamOperator streamOperator, Optional<InternalTimeServiceManager<?>> timeServiceManager, String operatorName, long checkpointId, long timestamp, CheckpointOptions checkpointOptions, CheckpointStreamFactory factory, OperatorSnapshotFutures snapshotInProgress, StateSnapshotContextSynchronousImpl snapshotContext, boolean isUsingCustomRawKeyedState) throws CheckpointException {
try {
if (timeServiceManager.isPresent()) {
checkState(keyedStateBackend != null, "keyedStateBackend should be available with timeServiceManager");
final InternalTimeServiceManager<?> manager = timeServiceManager.get();
boolean requiresLegacyRawKeyedStateSnapshots = keyedStateBackend instanceof AbstractKeyedStateBackend && ((AbstractKeyedStateBackend<?>) keyedStateBackend).requiresLegacySynchronousTimerSnapshots(checkpointOptions.getCheckpointType());
if (requiresLegacyRawKeyedStateSnapshots) {
checkState(!isUsingCustomRawKeyedState, "Attempting to snapshot timers to raw keyed state, but this operator has custom raw keyed state to write.");
manager.snapshotToRawKeyedState(snapshotContext.getRawKeyedOperatorStateOutput(), operatorName);
}
}
streamOperator.snapshotState(snapshotContext);
snapshotInProgress.setKeyedStateRawFuture(snapshotContext.getKeyedStateStreamFuture());
snapshotInProgress.setOperatorStateRawFuture(snapshotContext.getOperatorStateStreamFuture());
if (null != operatorStateBackend) {
snapshotInProgress.setOperatorStateManagedFuture(operatorStateBackend.snapshot(checkpointId, timestamp, factory, checkpointOptions));
}
if (null != keyedStateBackend) {
if (isCanonicalSavepoint(checkpointOptions.getCheckpointType())) {
SnapshotStrategyRunner<KeyedStateHandle, ? extends FullSnapshotResources<?>> snapshotRunner = prepareCanonicalSavepoint(keyedStateBackend, closeableRegistry);
snapshotInProgress.setKeyedStateManagedFuture(snapshotRunner.snapshot(checkpointId, timestamp, factory, checkpointOptions));
} else {
snapshotInProgress.setKeyedStateManagedFuture(keyedStateBackend.snapshot(checkpointId, timestamp, factory, checkpointOptions));
}
}
} catch (Exception snapshotException) {
try {
snapshotInProgress.cancel();
} catch (Exception e) {
snapshotException.addSuppressed(e);
}
String snapshotFailMessage = "Could not complete snapshot " + checkpointId + " for operator " + operatorName + ".";
try {
snapshotContext.closeExceptionally();
} catch (IOException e) {
snapshotException.addSuppressed(e);
}
throw new CheckpointException(snapshotFailMessage, CheckpointFailureReason.CHECKPOINT_DECLINED, snapshotException);
}
}
Aggregations