use of org.apache.flink.util.TemporaryClassLoaderContext in project flink by apache.
the class FileSystem method getUnguardedFileSystem.
@Internal
public static FileSystem getUnguardedFileSystem(final URI fsUri) throws IOException {
checkNotNull(fsUri, "file system URI");
LOCK.lock();
try {
final URI uri;
if (fsUri.getScheme() != null) {
uri = fsUri;
} else {
// Apply the default fs scheme
final URI defaultUri = getDefaultFsUri();
URI rewrittenUri = null;
try {
rewrittenUri = new URI(defaultUri.getScheme(), null, defaultUri.getHost(), defaultUri.getPort(), fsUri.getPath(), null, null);
} catch (URISyntaxException e) {
// for local URIs, we make one more try to repair the path by making it absolute
if (defaultUri.getScheme().equals("file")) {
try {
rewrittenUri = new URI("file", null, new Path(new File(fsUri.getPath()).getAbsolutePath()).toUri().getPath(), null);
} catch (URISyntaxException ignored) {
// could not help it...
}
}
}
if (rewrittenUri != null) {
uri = rewrittenUri;
} else {
throw new IOException("The file system URI '" + fsUri + "' declares no scheme and cannot be interpreted relative to the default file system URI (" + defaultUri + ").");
}
}
// print a helpful pointer for malformed local URIs (happens a lot to new users)
if (uri.getScheme().equals("file") && uri.getAuthority() != null && !uri.getAuthority().isEmpty()) {
String supposedUri = "file:///" + uri.getAuthority() + uri.getPath();
throw new IOException("Found local file path with authority '" + uri.getAuthority() + "' in path '" + uri.toString() + "'. Hint: Did you forget a slash? (correct path would be '" + supposedUri + "')");
}
final FSKey key = new FSKey(uri.getScheme(), uri.getAuthority());
// See if there is a file system object in the cache
{
FileSystem cached = CACHE.get(key);
if (cached != null) {
return cached;
}
}
// JobManager or TaskManager setup
if (FS_FACTORIES.isEmpty()) {
initializeWithoutPlugins(new Configuration());
}
// Try to create a new file system
final FileSystem fs;
final FileSystemFactory factory = FS_FACTORIES.get(uri.getScheme());
if (factory != null) {
ClassLoader classLoader = factory.getClassLoader();
try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(classLoader)) {
fs = factory.create(uri);
}
} else if (!ALLOWED_FALLBACK_FILESYSTEMS.contains(uri.getScheme()) && DIRECTLY_SUPPORTED_FILESYSTEM.containsKey(uri.getScheme())) {
final Collection<String> plugins = DIRECTLY_SUPPORTED_FILESYSTEM.get(uri.getScheme());
throw new UnsupportedFileSystemSchemeException(String.format("Could not find a file system implementation for scheme '%s'. The scheme is " + "directly supported by Flink through the following plugin%s: %s. Please ensure that each " + "plugin resides within its own subfolder within the plugins directory. See https://ci.apache" + ".org/projects/flink/flink-docs-stable/ops/plugins.html for more information. If you want to " + "use a Hadoop file system for that scheme, please add the scheme to the configuration fs" + ".allowed-fallback-filesystems. For a full list of supported file systems, " + "please see https://nightlies.apache.org/flink/flink-docs-stable/ops/filesystems/.", uri.getScheme(), plugins.size() == 1 ? "" : "s", String.join(", ", plugins)));
} else {
try {
fs = FALLBACK_FACTORY.create(uri);
} catch (UnsupportedFileSystemSchemeException e) {
throw new UnsupportedFileSystemSchemeException("Could not find a file system implementation for scheme '" + uri.getScheme() + "'. The scheme is not directly supported by Flink and no Hadoop file system to " + "support this scheme could be loaded. For a full list of supported file systems, " + "please see https://nightlies.apache.org/flink/flink-docs-stable/ops/filesystems/.", e);
}
}
CACHE.put(key, fs);
return fs;
} finally {
LOCK.unlock();
}
}
use of org.apache.flink.util.TemporaryClassLoaderContext in project flink by apache.
the class SourceCoordinator method start.
@Override
public void start() throws Exception {
LOG.info("Starting split enumerator for source {}.", operatorName);
// we mark this as started first, so that we can later distinguish the cases where
// 'start()' wasn't called and where 'start()' failed.
started = true;
// it here
if (enumerator == null) {
final ClassLoader userCodeClassLoader = context.getCoordinatorContext().getUserCodeClassloader();
try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(userCodeClassLoader)) {
enumerator = source.createEnumerator(context);
} catch (Throwable t) {
ExceptionUtils.rethrowIfFatalErrorOrOOM(t);
LOG.error("Failed to create Source Enumerator for source {}", operatorName, t);
context.failJob(t);
return;
}
}
// The start sequence is the first task in the coordinator executor.
// We rely on the single-threaded coordinator executor to guarantee
// the other methods are invoked after the enumerator has started.
runInEventLoop(() -> enumerator.start(), "starting the SplitEnumerator.");
}
use of org.apache.flink.util.TemporaryClassLoaderContext in project flink by apache.
the class CatalogITCase method testCreateCatalogFromUserClassLoader.
@Test
public void testCreateCatalogFromUserClassLoader() throws Exception {
final String className = "UserCatalogFactory";
URLClassLoader classLoader = ClassLoaderUtils.withRoot(temporaryFolder.newFolder()).addResource("META-INF/services/org.apache.flink.table.factories.Factory", "UserCatalogFactory").addClass(className, "import org.apache.flink.configuration.ConfigOption;\n" + "import org.apache.flink.table.catalog.Catalog;\n" + "import org.apache.flink.table.catalog.GenericInMemoryCatalog;\n" + "import org.apache.flink.table.factories.CatalogFactory;\n" + "\n" + "import java.util.Collections;\n" + "import java.util.Set;\n" + "\n" + "public class UserCatalogFactory implements CatalogFactory {\n" + " @Override\n" + " public Catalog createCatalog(Context context) {\n" + " return new GenericInMemoryCatalog(context.getName());\n" + " }\n" + "\n" + " @Override\n" + " public String factoryIdentifier() {\n" + " return \"userCatalog\";\n" + " }\n" + "\n" + " @Override\n" + " public Set<ConfigOption<?>> requiredOptions() {\n" + " return Collections.emptySet();\n" + " }\n" + "\n" + " @Override\n" + " public Set<ConfigOption<?>> optionalOptions() {\n" + " return Collections.emptySet();\n" + " }\n" + "}").build();
try (TemporaryClassLoaderContext context = TemporaryClassLoaderContext.of(classLoader)) {
TableEnvironment tableEnvironment = getTableEnvironment();
tableEnvironment.executeSql("CREATE CATALOG cat WITH ('type'='userCatalog')");
assertTrue(tableEnvironment.getCatalog("cat").isPresent());
}
}
use of org.apache.flink.util.TemporaryClassLoaderContext in project flink by apache.
the class FlinkKafkaProducer method abortTransactions.
// ----------------------------------- Utilities --------------------------
private void abortTransactions(final Set<String> transactionalIds) {
final ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
transactionalIds.parallelStream().forEach(transactionalId -> {
// we should set the correct classloader for it.
try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(classLoader)) {
// don't mess with the original configuration or any other
// properties of the
// original object
// -> create an internal kafka producer on our own and do not rely
// on
// initTransactionalProducer().
final Properties myConfig = new Properties();
myConfig.putAll(producerConfig);
initTransactionalProducerConfig(myConfig, transactionalId);
FlinkKafkaInternalProducer<byte[], byte[]> kafkaProducer = null;
try {
kafkaProducer = new FlinkKafkaInternalProducer<>(myConfig);
// it suffices to call initTransactions - this will abort any
// lingering transactions
kafkaProducer.initTransactions();
} finally {
if (kafkaProducer != null) {
kafkaProducer.close(Duration.ofSeconds(0));
}
}
}
});
}
use of org.apache.flink.util.TemporaryClassLoaderContext in project flink by apache.
the class OperatorCoordinatorHolder method create.
// ------------------------------------------------------------------------
// Factories
// ------------------------------------------------------------------------
public static OperatorCoordinatorHolder create(SerializedValue<OperatorCoordinator.Provider> serializedProvider, ExecutionJobVertex jobVertex, ClassLoader classLoader, CoordinatorStore coordinatorStore) throws Exception {
try (TemporaryClassLoaderContext ignored = TemporaryClassLoaderContext.of(classLoader)) {
final OperatorCoordinator.Provider provider = serializedProvider.deserializeValue(classLoader);
final OperatorID opId = provider.getOperatorId();
final SubtaskAccess.SubtaskAccessFactory taskAccesses = new ExecutionSubtaskAccess.ExecutionJobVertexSubtaskAccess(jobVertex, opId);
return create(opId, provider, coordinatorStore, jobVertex.getName(), jobVertex.getGraph().getUserClassLoader(), jobVertex.getParallelism(), jobVertex.getMaxParallelism(), taskAccesses);
}
}
Aggregations