use of co.cask.cdap.common.lang.FilterClassLoader in project cdap by caskdata.
the class SparkRuntimeContextProvider method createProgram.
private static Program createProgram(CConfiguration cConf, SparkRuntimeContextConfig contextConfig) throws IOException {
File programJar = new File(PROGRAM_JAR_NAME);
File programDir = new File(PROGRAM_JAR_EXPANDED_NAME);
ClassLoader parentClassLoader = new FilterClassLoader(SparkRuntimeContextProvider.class.getClassLoader(), SparkRuntimeUtils.SPARK_PROGRAM_CLASS_LOADER_FILTER);
ClassLoader classLoader = new ProgramClassLoader(cConf, programDir, parentClassLoader);
return new DefaultProgram(new ProgramDescriptor(contextConfig.getProgramId(), contextConfig.getApplicationSpecification()), Locations.toLocation(programJar), classLoader);
}
use of co.cask.cdap.common.lang.FilterClassLoader in project cdap by caskdata.
the class MainClassLoader method createFromContext.
/**
* @param filter A {@link FilterClassLoader.Filter} for filtering out classes from the
* @param extraClasspath extra list of {@link URL} to be added to the end of the classpath for the
* {@link MainClassLoader} to be created
* @return a new instance from the current context classloader or the system classloader. The returned
* {@link MainClassLoader} will be the defining classloader for classes in the context classloader
* that the filter rejected. For classes that pass the filter, the defining classloader will be the original
* context classloader.
* It will return {@code null} if it is not able to create a new instance due to lack of classpath information.
*/
@Nullable
public static MainClassLoader createFromContext(FilterClassLoader.Filter filter, URL... extraClasspath) {
ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
if (classLoader == null) {
classLoader = ClassLoader.getSystemClassLoader();
}
List<URL> classpath = new ArrayList<>();
if (classLoader instanceof URLClassLoader) {
classpath.addAll(Arrays.asList(((URLClassLoader) classLoader).getURLs()));
} else if (classLoader == ClassLoader.getSystemClassLoader()) {
addClassPath(classpath);
} else {
// No able to create a new MainClassLoader
return null;
}
classpath.addAll(Arrays.asList(extraClasspath));
ClassLoader filtered = new FilterClassLoader(classLoader, filter);
ClassLoader parent = new CombineClassLoader(classLoader.getParent(), Collections.singleton(filtered));
return new MainClassLoader(classpath.toArray(new URL[classpath.size()]), parent);
}
use of co.cask.cdap.common.lang.FilterClassLoader in project cdap by caskdata.
the class AuthorizerClassLoader method createParent.
@VisibleForTesting
static ClassLoader createParent() {
ClassLoader baseClassLoader = AuthorizerClassLoader.class.getClassLoader();
final Set<String> authorizerResources = traceSecurityDependencies(baseClassLoader);
// by default, FilterClassLoader's defaultFilter allows all hadoop classes, which makes it so that
// the authorizer extension can share the same instance of UserGroupInformation. This allows kerberos credential
// renewal to also renew for any extension
final FilterClassLoader.Filter defaultFilter = FilterClassLoader.defaultFilter();
return new FilterClassLoader(baseClassLoader, new FilterClassLoader.Filter() {
@Override
public boolean acceptResource(String resource) {
return defaultFilter.acceptResource(resource) || authorizerResources.contains(resource);
}
@Override
public boolean acceptPackage(String packageName) {
return true;
}
});
}
use of co.cask.cdap.common.lang.FilterClassLoader in project cdap by caskdata.
the class SparkContainerLauncher method launch.
/**
* Launches the given main class. The main class will be loaded through the {@link SparkContainerClassLoader}.
*
* @param mainClassName the main class to launch
* @param args arguments for the main class
*/
@SuppressWarnings("unused")
public static void launch(String mainClassName, String[] args) throws Exception {
Thread.setDefaultUncaughtExceptionHandler(new UncaughtExceptionHandler());
ClassLoader systemClassLoader = ClassLoader.getSystemClassLoader();
Set<URL> urls = ClassLoaders.getClassLoaderURLs(systemClassLoader, new LinkedHashSet<URL>());
// Remove the URL that contains the given main classname to avoid infinite recursion.
// This is needed because we generate a class with the same main classname in order to intercept the main()
// method call from the container launch script.
urls.remove(getURLByClass(systemClassLoader, mainClassName));
// Remove the first scala from the set of classpath. This ensure the one from Spark is used for spark
URL scalaURL = getURLByClass(systemClassLoader, "scala.language");
Enumeration<URL> resources = systemClassLoader.getResources("scala/language.class");
// Only remove the scala if there are more than one in the classpath
int count = 0;
while (resources.hasMoreElements()) {
resources.nextElement();
count++;
}
if (count > 1) {
urls.remove(scalaURL);
}
// First create a FilterClassLoader that only loads JVM and kafka classes from the system classloader
// This is to isolate the scala library from children
ClassLoader parentClassLoader = new FilterClassLoader(systemClassLoader, KAFKA_FILTER);
// Creates the SparkRunnerClassLoader for class rewriting and it will be used for the rest of the execution.
// Use the extension classloader as the parent instead of the system classloader because
// Spark classes are in the system classloader which we want to rewrite.
ClassLoader classLoader = new SparkContainerClassLoader(urls.toArray(new URL[urls.size()]), parentClassLoader);
// Sets the context classloader and launch the actual Spark main class.
Thread.currentThread().setContextClassLoader(classLoader);
// Install the JUL to SLF4J Bridge
try {
classLoader.loadClass(SLF4JBridgeHandler.class.getName()).getDeclaredMethod("install").invoke(null);
} catch (Exception e) {
// Log the error and continue
LOG.warn("Failed to invoke SLF4JBridgeHandler.install() required for jul-to-slf4j bridge", e);
}
try {
// Get the SparkRuntimeContext to initialize all necessary services and logging context
// Need to do it using the SparkRunnerClassLoader through reflection.
classLoader.loadClass(SparkRuntimeContextProvider.class.getName()).getMethod("get").invoke(null);
// Invoke StandardOutErrorRedirector.redirectToLogger()
classLoader.loadClass(StandardOutErrorRedirector.class.getName()).getDeclaredMethod("redirectToLogger", String.class).invoke(null, mainClassName);
// which causes executor logs attempt to write to driver log directory
if (System.getProperty("spark.executorEnv.CDAP_LOG_DIR") != null) {
System.setProperty("spark.executorEnv.CDAP_LOG_DIR", "<LOG_DIR>");
}
LOG.info("Launch main class {}.main({})", mainClassName, Arrays.toString(args));
classLoader.loadClass(mainClassName).getMethod("main", String[].class).invoke(null, new Object[] { args });
LOG.info("Main method returned {}", mainClassName);
} catch (Throwable t) {
// LOG the exception since this exception will be propagated back to JVM
// and kill the main thread (hence the JVM process).
// If we don't log it here as ERROR, it will be logged by UncaughtExceptionHandler as DEBUG level
LOG.error("Exception raised when calling {}.main(String[]) method", mainClassName, t);
throw t;
}
}
Aggregations