use of org.apache.flink.client.program.PackagedProgram in project flink by apache.
the class ClassLoaderITCase method testProgramWithChildFirstClassLoader.
@Test
public void testProgramWithChildFirstClassLoader() throws IOException, ProgramInvocationException {
// We have two files named test-resource in src/resource (parent classloader classpath) and
// tmp folders (child classloader classpath) respectively.
String childResourceDirName = "child0";
String testResourceName = "test-resource";
File childResourceDir = FOLDER.newFolder(childResourceDirName);
File childResource = new File(childResourceDir, testResourceName);
assertTrue(childResource.createNewFile());
TestStreamEnvironment.setAsContext(miniClusterResource.getMiniCluster(), parallelism, Collections.singleton(new Path(CLASSLOADING_POLICY_JAR_PATH)), Collections.emptyList());
// child-first classloading
Configuration childFirstConf = new Configuration();
childFirstConf.setString("classloader.resolve-order", "child-first");
final PackagedProgram childFirstProgram = PackagedProgram.newBuilder().setJarFile(new File(CLASSLOADING_POLICY_JAR_PATH)).setUserClassPaths(Collections.singletonList(childResourceDir.toURI().toURL())).setConfiguration(childFirstConf).setArguments(testResourceName, childResourceDirName).build();
final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
Thread.currentThread().setContextClassLoader(childFirstProgram.getUserCodeClassLoader());
try {
childFirstProgram.invokeInteractiveModeForExecution();
} finally {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
}
use of org.apache.flink.client.program.PackagedProgram in project flink by apache.
the class ClassLoaderITCase method testCustomSplitJobWithCustomClassLoaderPath.
@Test
public void testCustomSplitJobWithCustomClassLoaderPath() throws IOException, ProgramInvocationException {
URL classpath = new File(INPUT_SPLITS_PROG_JAR_FILE).toURI().toURL();
PackagedProgram inputSplitTestProg2 = PackagedProgram.newBuilder().setJarFile(new File(INPUT_SPLITS_PROG_JAR_FILE)).build();
TestEnvironment.setAsContext(miniClusterResource.getMiniCluster(), parallelism, Collections.emptyList(), Collections.singleton(classpath));
inputSplitTestProg2.invokeInteractiveModeForExecution();
}
use of org.apache.flink.client.program.PackagedProgram in project flink by apache.
the class ClassLoaderITCase method testCustomSplitJobWithCustomClassLoaderJar.
@Test
public void testCustomSplitJobWithCustomClassLoaderJar() throws ProgramInvocationException {
PackagedProgram inputSplitTestProg = PackagedProgram.newBuilder().setJarFile(new File(INPUT_SPLITS_PROG_JAR_FILE)).build();
TestEnvironment.setAsContext(miniClusterResource.getMiniCluster(), parallelism, Collections.singleton(new Path(INPUT_SPLITS_PROG_JAR_FILE)), Collections.emptyList());
inputSplitTestProg.invokeInteractiveModeForExecution();
}
use of org.apache.flink.client.program.PackagedProgram in project flink by apache.
the class ClassLoaderITCase method testCheckpointedStreamingClassloaderJobWithCustomClassLoader.
@Test
public void testCheckpointedStreamingClassloaderJobWithCustomClassLoader() throws ProgramInvocationException {
// checkpointed streaming job with custom classes for the checkpoint (FLINK-2543)
// the test also ensures that user specific exceptions are serializable between JobManager
// <--> JobClient.
PackagedProgram streamingCheckpointedProg = PackagedProgram.newBuilder().setJarFile(new File(STREAMING_CHECKPOINTED_PROG_JAR_FILE)).build();
TestStreamEnvironment.setAsContext(miniClusterResource.getMiniCluster(), parallelism, Collections.singleton(new Path(STREAMING_CHECKPOINTED_PROG_JAR_FILE)), Collections.emptyList());
try {
streamingCheckpointedProg.invokeInteractiveModeForExecution();
} catch (Exception e) {
// Program should terminate with a 'SuccessException':
// the exception class is contained in the user-jar, but is not present on the maven
// classpath
// the deserialization of the exception should thus fail here
Optional<Throwable> exception = ExceptionUtils.findThrowable(e, candidate -> candidate.getClass().getName().equals("org.apache.flink.test.classloading.jar.CheckpointedStreamingProgram$SuccessException"));
if (!exception.isPresent()) {
// exception is not serialized between JobManager and JobClient.
throw e;
}
try {
Class.forName(exception.get().getClass().getName());
fail("Deserialization of user exception should have failed.");
} catch (ClassNotFoundException expected) {
// expected
}
}
}
use of org.apache.flink.client.program.PackagedProgram in project flink by apache.
the class YarnConfigurationITCase method testFlinkContainerMemory.
/**
* Tests that the Flink components are started with the correct memory settings.
*/
@Test(timeout = 60000)
public void testFlinkContainerMemory() throws Exception {
runTest(() -> {
final YarnClient yarnClient = getYarnClient();
final Configuration configuration = new Configuration(flinkConfiguration);
final int slotsPerTaskManager = 3;
configuration.set(TaskManagerOptions.NUM_TASK_SLOTS, slotsPerTaskManager);
final int masterMemory = 768;
configuration.set(JobManagerOptions.TOTAL_PROCESS_MEMORY, MemorySize.ofMebiBytes(masterMemory));
final TaskExecutorProcessSpec tmResourceSpec = TaskExecutorProcessUtils.processSpecFromConfig(configuration);
final int taskManagerMemory = tmResourceSpec.getTotalProcessMemorySize().getMebiBytes();
final YarnConfiguration yarnConfiguration = getYarnConfiguration();
final YarnClusterDescriptor clusterDescriptor = YarnTestUtils.createClusterDescriptorWithLogging(CliFrontend.getConfigurationDirectoryFromEnv(), configuration, yarnConfiguration, yarnClient, true);
clusterDescriptor.setLocalJarPath(new Path(flinkUberjar.getAbsolutePath()));
clusterDescriptor.addShipFiles(Arrays.asList(flinkLibFolder.listFiles()));
final File streamingWordCountFile = getTestJarPath("WindowJoin.jar");
final PackagedProgram packagedProgram = PackagedProgram.newBuilder().setJarFile(streamingWordCountFile).build();
final JobGraph jobGraph = PackagedProgramUtils.createJobGraph(packagedProgram, configuration, 1, false);
try {
final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder().setMasterMemoryMB(masterMemory).setTaskManagerMemoryMB(taskManagerMemory).setSlotsPerTaskManager(slotsPerTaskManager).createClusterSpecification();
final ClusterClient<ApplicationId> clusterClient = clusterDescriptor.deployJobCluster(clusterSpecification, jobGraph, true).getClusterClient();
final ApplicationId clusterId = clusterClient.getClusterId();
final RestClient restClient = new RestClient(configuration, TestingUtils.defaultExecutor());
try {
final ApplicationReport applicationReport = yarnClient.getApplicationReport(clusterId);
final ApplicationAttemptId currentApplicationAttemptId = applicationReport.getCurrentApplicationAttemptId();
// wait until we have second container allocated
List<ContainerReport> containers = yarnClient.getContainers(currentApplicationAttemptId);
while (containers.size() < 2) {
// this is nasty but Yarn does not offer a better way to wait
Thread.sleep(50L);
containers = yarnClient.getContainers(currentApplicationAttemptId);
}
for (ContainerReport container : containers) {
if (container.getContainerId().getId() == 1) {
// this should be the application master
assertThat(container.getAllocatedResource().getMemory(), is(masterMemory));
} else {
assertThat(container.getAllocatedResource().getMemory(), is(taskManagerMemory));
}
}
final URI webURI = new URI(clusterClient.getWebInterfaceURL());
CompletableFuture<TaskManagersInfo> taskManagersInfoCompletableFuture;
Collection<TaskManagerInfo> taskManagerInfos;
while (true) {
taskManagersInfoCompletableFuture = restClient.sendRequest(webURI.getHost(), webURI.getPort(), TaskManagersHeaders.getInstance(), EmptyMessageParameters.getInstance(), EmptyRequestBody.getInstance());
final TaskManagersInfo taskManagersInfo = taskManagersInfoCompletableFuture.get();
taskManagerInfos = taskManagersInfo.getTaskManagerInfos();
// wait until the task manager has registered and reported its slots
if (hasTaskManagerConnectedAndReportedSlots(taskManagerInfos)) {
break;
} else {
Thread.sleep(100L);
}
}
// there should be at least one TaskManagerInfo
final TaskManagerInfo taskManagerInfo = taskManagerInfos.iterator().next();
assertThat(taskManagerInfo.getNumberSlots(), is(slotsPerTaskManager));
final long expectedHeapSizeBytes = tmResourceSpec.getJvmHeapMemorySize().getBytes();
// We compare here physical memory assigned to a container with the heap
// memory that we should pass to
// jvm as Xmx parameter. Those value might differ significantly due to
// system page size or jvm
// implementation therefore we use 15% threshold here.
assertThat((double) taskManagerInfo.getHardwareDescription().getSizeOfJvmHeap() / (double) expectedHeapSizeBytes, is(closeTo(1.0, 0.15)));
final int expectedManagedMemoryMB = tmResourceSpec.getManagedMemorySize().getMebiBytes();
assertThat((int) (taskManagerInfo.getHardwareDescription().getSizeOfManagedMemory() >> 20), is(expectedManagedMemoryMB));
} finally {
restClient.shutdown(TIMEOUT);
clusterClient.close();
}
clusterDescriptor.killCluster(clusterId);
} finally {
clusterDescriptor.close();
}
});
}
Aggregations