use of org.apache.flink.runtime.blob.VoidBlobStore in project flink by apache.
the class BlobLibraryCacheManagerTest method testLibraryCacheManagerDifferentJobsCleanup.
/**
* Tests that the {@link BlobLibraryCacheManager} cleans up after the class loader leases for
* different jobs are closed.
*/
@Test
public void testLibraryCacheManagerDifferentJobsCleanup() throws Exception {
JobID jobId1 = new JobID();
JobID jobId2 = new JobID();
List<PermanentBlobKey> keys1 = new ArrayList<>();
List<PermanentBlobKey> keys2 = new ArrayList<>();
BlobServer server = null;
PermanentBlobCache cache = null;
BlobLibraryCacheManager libCache = null;
final byte[] buf = new byte[128];
try {
Configuration config = new Configuration();
config.setLong(BlobServerOptions.CLEANUP_INTERVAL, 1L);
server = new BlobServer(config, temporaryFolder.newFolder(), new VoidBlobStore());
server.start();
InetSocketAddress serverAddress = new InetSocketAddress("localhost", server.getPort());
cache = new PermanentBlobCache(config, temporaryFolder.newFolder(), new VoidBlobStore(), serverAddress);
keys1.add(server.putPermanent(jobId1, buf));
buf[0] += 1;
keys1.add(server.putPermanent(jobId1, buf));
keys2.add(server.putPermanent(jobId2, buf));
libCache = createBlobLibraryCacheManager(cache);
cache.registerJob(jobId1);
cache.registerJob(jobId2);
assertEquals(0, libCache.getNumberOfManagedJobs());
assertEquals(0, libCache.getNumberOfReferenceHolders(jobId1));
checkFileCountForJob(2, jobId1, server);
checkFileCountForJob(0, jobId1, cache);
checkFileCountForJob(1, jobId2, server);
checkFileCountForJob(0, jobId2, cache);
final LibraryCacheManager.ClassLoaderLease classLoaderLeaseJob1 = libCache.registerClassLoaderLease(jobId1);
final UserCodeClassLoader classLoader1 = classLoaderLeaseJob1.getOrResolveClassLoader(keys1, Collections.emptyList());
assertEquals(1, libCache.getNumberOfManagedJobs());
assertEquals(1, libCache.getNumberOfReferenceHolders(jobId1));
assertEquals(0, libCache.getNumberOfReferenceHolders(jobId2));
assertEquals(2, checkFilesExist(jobId1, keys1, cache, true));
checkFileCountForJob(2, jobId1, server);
checkFileCountForJob(2, jobId1, cache);
assertEquals(0, checkFilesExist(jobId2, keys2, cache, false));
checkFileCountForJob(1, jobId2, server);
checkFileCountForJob(0, jobId2, cache);
final LibraryCacheManager.ClassLoaderLease classLoaderLeaseJob2 = libCache.registerClassLoaderLease(jobId2);
final UserCodeClassLoader classLoader2 = classLoaderLeaseJob2.getOrResolveClassLoader(keys2, Collections.emptyList());
assertThat(classLoader1, not(sameInstance(classLoader2)));
try {
classLoaderLeaseJob2.getOrResolveClassLoader(keys1, Collections.<URL>emptyList());
fail("Should fail with an IllegalStateException");
} catch (IllegalStateException e) {
// that's what we want
}
try {
classLoaderLeaseJob2.getOrResolveClassLoader(keys2, Collections.singletonList(new URL("file:///tmp/does-not-exist")));
fail("Should fail with an IllegalStateException");
} catch (IllegalStateException e) {
// that's what we want
}
assertEquals(2, libCache.getNumberOfManagedJobs());
assertEquals(1, libCache.getNumberOfReferenceHolders(jobId1));
assertEquals(1, libCache.getNumberOfReferenceHolders(jobId2));
assertEquals(2, checkFilesExist(jobId1, keys1, cache, true));
checkFileCountForJob(2, jobId1, server);
checkFileCountForJob(2, jobId1, cache);
assertEquals(1, checkFilesExist(jobId2, keys2, cache, true));
checkFileCountForJob(1, jobId2, server);
checkFileCountForJob(1, jobId2, cache);
classLoaderLeaseJob1.release();
assertEquals(1, libCache.getNumberOfManagedJobs());
assertEquals(0, libCache.getNumberOfReferenceHolders(jobId1));
assertEquals(1, libCache.getNumberOfReferenceHolders(jobId2));
assertEquals(2, checkFilesExist(jobId1, keys1, cache, true));
checkFileCountForJob(2, jobId1, server);
checkFileCountForJob(2, jobId1, cache);
assertEquals(1, checkFilesExist(jobId2, keys2, cache, true));
checkFileCountForJob(1, jobId2, server);
checkFileCountForJob(1, jobId2, cache);
classLoaderLeaseJob2.release();
assertEquals(0, libCache.getNumberOfManagedJobs());
assertEquals(0, libCache.getNumberOfReferenceHolders(jobId1));
assertEquals(0, libCache.getNumberOfReferenceHolders(jobId2));
assertEquals(2, checkFilesExist(jobId1, keys1, cache, true));
checkFileCountForJob(2, jobId1, server);
checkFileCountForJob(2, jobId1, cache);
assertEquals(1, checkFilesExist(jobId2, keys2, cache, true));
checkFileCountForJob(1, jobId2, server);
checkFileCountForJob(1, jobId2, cache);
// only PermanentBlobCache#releaseJob() calls clean up files (tested in
// BlobCacheCleanupTest etc.
} finally {
if (libCache != null) {
libCache.shutdown();
}
// should have been closed by the libraryCacheManager, but just in case
if (cache != null) {
cache.close();
}
if (server != null) {
server.close();
}
}
}
use of org.apache.flink.runtime.blob.VoidBlobStore in project flink-mirror by flink-ci.
the class KubernetesHaServicesTest method testInternalJobCleanupShouldCleanupConfigMaps.
@Test
public void testInternalJobCleanupShouldCleanupConfigMaps() throws Exception {
new Context() {
{
runTest(() -> {
final KubernetesHaServices kubernetesHaServices = new KubernetesHaServices(flinkKubeClient, executorService, configuration, new VoidBlobStore());
JobID jobID = new JobID();
String configMapName = kubernetesHaServices.getLeaderPathForJobManager(jobID);
final KubernetesConfigMap configMap = new TestingFlinkKubeClient.MockKubernetesConfigMap(configMapName);
flinkKubeClient.createConfigMap(configMap);
assertThat(flinkKubeClient.getConfigMap(configMapName).isPresent(), is(true));
kubernetesHaServices.internalCleanupJobData(jobID);
assertThat(flinkKubeClient.getConfigMap(configMapName).isPresent(), is(false));
});
}
};
}
use of org.apache.flink.runtime.blob.VoidBlobStore in project flink-mirror by flink-ci.
the class KubernetesHaServicesTest method testInternalCleanupShouldCleanupConfigMaps.
@Test
public void testInternalCleanupShouldCleanupConfigMaps() throws Exception {
new Context() {
{
runTest(() -> {
final KubernetesHaServices kubernetesHaServices = new KubernetesHaServices(flinkKubeClient, executorService, configuration, new VoidBlobStore());
kubernetesHaServices.internalCleanup();
final Map<String, String> labels = deleteConfigMapByLabelsFuture.get(TIMEOUT, TimeUnit.MILLISECONDS);
assertThat(labels.size(), is(3));
assertThat(labels.get(LABEL_CONFIGMAP_TYPE_KEY), is(LABEL_CONFIGMAP_TYPE_HIGH_AVAILABILITY));
});
}
};
}
use of org.apache.flink.runtime.blob.VoidBlobStore in project flink-mirror by flink-ci.
the class AbstractDispatcherTest method setUp.
@Before
public void setUp() throws Exception {
heartbeatServices = new HeartbeatServices(1000L, 10000L);
haServices = new TestingHighAvailabilityServices();
haServices.setCheckpointRecoveryFactory(new StandaloneCheckpointRecoveryFactory());
haServices.setResourceManagerLeaderRetriever(new SettableLeaderRetrievalService());
haServices.setJobGraphStore(new StandaloneJobGraphStore());
haServices.setJobResultStore(new EmbeddedJobResultStore());
configuration = new Configuration();
blobServer = new BlobServer(configuration, temporaryFolder.newFolder(), new VoidBlobStore());
}
use of org.apache.flink.runtime.blob.VoidBlobStore in project flink-mirror by flink-ci.
the class DefaultExecutionGraphDeploymentWithBlobCacheTest method setupBlobServer.
@Before
@Override
public void setupBlobServer() throws IOException {
Configuration config = new Configuration();
// always offload the serialized job and task information
config.setInteger(BlobServerOptions.OFFLOAD_MINSIZE, 0);
blobServer = new BlobServer(config, TEMPORARY_FOLDER.newFolder(), new VoidBlobStore());
blobServer.start();
blobWriter = blobServer;
InetSocketAddress serverAddress = new InetSocketAddress("localhost", blobServer.getPort());
blobCache = new PermanentBlobCache(config, TEMPORARY_FOLDER.newFolder(), new VoidBlobStore(), serverAddress);
}
Aggregations