use of java.util.concurrent.ConcurrentHashMap in project hadoop by apache.
the class TestLocalResourcesTrackerImpl method testLocalResourceCache.
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void testLocalResourceCache() {
String user = "testuser";
DrainDispatcher dispatcher = null;
try {
Configuration conf = new Configuration();
dispatcher = createDispatcher(conf);
EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class);
EventHandler<ContainerEvent> containerEventHandler = mock(EventHandler.class);
// Registering event handlers.
dispatcher.register(LocalizerEventType.class, localizerEventHandler);
dispatcher.register(ContainerEventType.class, containerEventHandler);
ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
LocalResourcesTracker tracker = new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc, true, conf, new NMNullStateStoreService(), null);
LocalResourceRequest lr = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC);
// Creating 2 containers for same application which will be requesting
// same local resource.
// Container 1 requesting local resource.
ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1);
LocalizerContext lc1 = new LocalizerContext(user, cId1, null);
ResourceEvent reqEvent1 = new ResourceRequestEvent(lr, LocalResourceVisibility.PRIVATE, lc1);
// No resource request is initially present in local cache
Assert.assertEquals(0, localrsrc.size());
// Container-1 requesting local resource.
tracker.handle(reqEvent1);
dispatcher.await();
// New localized Resource should have been added to local resource map
// and the requesting container will be added to its waiting queue.
Assert.assertEquals(1, localrsrc.size());
Assert.assertTrue(localrsrc.containsKey(lr));
Assert.assertEquals(1, localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId1));
Assert.assertEquals(ResourceState.DOWNLOADING, localrsrc.get(lr).getState());
// Container 2 requesting the resource
ContainerId cId2 = BuilderUtils.newContainerId(1, 1, 1, 2);
LocalizerContext lc2 = new LocalizerContext(user, cId2, null);
ResourceEvent reqEvent2 = new ResourceRequestEvent(lr, LocalResourceVisibility.PRIVATE, lc2);
tracker.handle(reqEvent2);
dispatcher.await();
// Container 2 should have been added to the waiting queue of the local
// resource
Assert.assertEquals(2, localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId2));
// Failing resource localization
ResourceEvent resourceFailedEvent = new ResourceFailedLocalizationEvent(lr, (new Exception("test").getMessage()));
// Backing up the resource to track its state change as it will be
// removed after the failed event.
LocalizedResource localizedResource = localrsrc.get(lr);
tracker.handle(resourceFailedEvent);
dispatcher.await();
// After receiving failed resource event; all waiting containers will be
// notified with Container Resource Failed Event.
Assert.assertEquals(0, localrsrc.size());
verify(containerEventHandler, timeout(1000).times(2)).handle(isA(ContainerResourceFailedEvent.class));
Assert.assertEquals(ResourceState.FAILED, localizedResource.getState());
// Container 1 trying to release the resource (This resource is already
// deleted from the cache. This call should return silently without
// exception.
ResourceReleaseEvent relEvent1 = new ResourceReleaseEvent(lr, cId1);
tracker.handle(relEvent1);
dispatcher.await();
// Container-3 now requests for the same resource. This request call
// is coming prior to Container-2's release call.
ContainerId cId3 = BuilderUtils.newContainerId(1, 1, 1, 3);
LocalizerContext lc3 = new LocalizerContext(user, cId3, null);
ResourceEvent reqEvent3 = new ResourceRequestEvent(lr, LocalResourceVisibility.PRIVATE, lc3);
tracker.handle(reqEvent3);
dispatcher.await();
// Local resource cache now should have the requested resource and the
// number of waiting containers should be 1.
Assert.assertEquals(1, localrsrc.size());
Assert.assertTrue(localrsrc.containsKey(lr));
Assert.assertEquals(1, localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3));
// Container-2 Releases the resource
ResourceReleaseEvent relEvent2 = new ResourceReleaseEvent(lr, cId2);
tracker.handle(relEvent2);
dispatcher.await();
// Making sure that there is no change in the cache after the release.
Assert.assertEquals(1, localrsrc.size());
Assert.assertTrue(localrsrc.containsKey(lr));
Assert.assertEquals(1, localrsrc.get(lr).getRefCount());
Assert.assertTrue(localrsrc.get(lr).ref.contains(cId3));
// Sending ResourceLocalizedEvent to tracker. In turn resource should
// send Container Resource Localized Event to waiting containers.
Path localizedPath = new Path("/tmp/file1");
ResourceLocalizedEvent localizedEvent = new ResourceLocalizedEvent(lr, localizedPath, 123L);
tracker.handle(localizedEvent);
dispatcher.await();
// Verifying ContainerResourceLocalizedEvent .
verify(containerEventHandler, timeout(1000).times(1)).handle(isA(ContainerResourceLocalizedEvent.class));
Assert.assertEquals(ResourceState.LOCALIZED, localrsrc.get(lr).getState());
Assert.assertEquals(1, localrsrc.get(lr).getRefCount());
// Container-3 releasing the resource.
ResourceReleaseEvent relEvent3 = new ResourceReleaseEvent(lr, cId3);
tracker.handle(relEvent3);
dispatcher.await();
Assert.assertEquals(0, localrsrc.get(lr).getRefCount());
} finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
use of java.util.concurrent.ConcurrentHashMap in project hadoop by apache.
the class TestLocalResourcesTrackerImpl method testResourcePresentInGoodDir.
@SuppressWarnings("unchecked")
@Test
public void testResourcePresentInGoodDir() throws IOException {
String user = "testuser";
DrainDispatcher dispatcher = null;
try {
Configuration conf = new Configuration();
dispatcher = createDispatcher(conf);
EventHandler<LocalizerEvent> localizerEventHandler = mock(EventHandler.class);
EventHandler<LocalizerEvent> containerEventHandler = mock(EventHandler.class);
dispatcher.register(LocalizerEventType.class, localizerEventHandler);
dispatcher.register(ContainerEventType.class, containerEventHandler);
ContainerId cId1 = BuilderUtils.newContainerId(1, 1, 1, 1);
LocalizerContext lc1 = new LocalizerContext(user, cId1, null);
LocalResourceRequest req1 = createLocalResourceRequest(user, 1, 1, LocalResourceVisibility.PUBLIC);
LocalResourceRequest req2 = createLocalResourceRequest(user, 2, 1, LocalResourceVisibility.PUBLIC);
LocalizedResource lr1 = createLocalizedResource(req1, dispatcher);
LocalizedResource lr2 = createLocalizedResource(req2, dispatcher);
ConcurrentMap<LocalResourceRequest, LocalizedResource> localrsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
localrsrc.put(req1, lr1);
localrsrc.put(req2, lr2);
LocalDirsHandlerService dirsHandler = mock(LocalDirsHandlerService.class);
List<String> goodDirs = new ArrayList<String>();
// /tmp/somedir2 is bad
goodDirs.add("/tmp/somedir1/");
goodDirs.add("/tmp/somedir2");
Mockito.when(dirsHandler.getLocalDirs()).thenReturn(goodDirs);
Mockito.when(dirsHandler.getLocalDirsForRead()).thenReturn(goodDirs);
LocalResourcesTrackerImpl tracker = new LocalResourcesTrackerImpl(user, null, dispatcher, localrsrc, true, conf, new NMNullStateStoreService(), dirsHandler);
ResourceEvent req11Event = new ResourceRequestEvent(req1, LocalResourceVisibility.PUBLIC, lc1);
ResourceEvent req21Event = new ResourceRequestEvent(req2, LocalResourceVisibility.PUBLIC, lc1);
// Localize R1 for C1
tracker.handle(req11Event);
// Localize R2 for C1
tracker.handle(req21Event);
dispatcher.await();
// Localize resource1
Path p1 = tracker.getPathForLocalization(req1, new Path("/tmp/somedir1"), null);
Path p2 = tracker.getPathForLocalization(req2, new Path("/tmp/somedir2"), null);
ResourceLocalizedEvent rle1 = new ResourceLocalizedEvent(req1, p1, 1);
tracker.handle(rle1);
ResourceLocalizedEvent rle2 = new ResourceLocalizedEvent(req2, p2, 1);
tracker.handle(rle2);
dispatcher.await();
// Remove somedir2 from gooddirs
Assert.assertTrue(tracker.checkLocalResource(lr2));
goodDirs.remove(1);
Assert.assertFalse(tracker.checkLocalResource(lr2));
} finally {
if (dispatcher != null) {
dispatcher.stop();
}
}
}
use of java.util.concurrent.ConcurrentHashMap in project hadoop by apache.
the class TestLocalCacheCleanup method testBasicCleanup.
@Test
public void testBasicCleanup() {
ConcurrentMap<LocalResourceRequest, LocalizedResource> publicRsrc = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
addResource(publicRsrc, "/pub-resource1.txt", 5, 20, 0);
addResource(publicRsrc, "/pub-resource2.txt", 3, 20, 0);
addResource(publicRsrc, "/pub-resource3.txt", 15, 20, 0);
ConcurrentMap<String, LocalResourcesTracker> privateRsrc = new ConcurrentHashMap<String, LocalResourcesTracker>();
ConcurrentMap<LocalResourceRequest, LocalizedResource> user1rsrcs = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
addResource(user1rsrcs, "/private-u1-resource4.txt", 1, 20, 0);
LocalResourcesTracker user1Tracker = new StubbedLocalResourcesTrackerImpl("user1", user1rsrcs);
privateRsrc.put("user1", user1Tracker);
ConcurrentMap<LocalResourceRequest, LocalizedResource> user2rsrcs = new ConcurrentHashMap<LocalResourceRequest, LocalizedResource>();
addResource(user2rsrcs, "/private-u2-resource5.txt", 2, 20, 0);
LocalResourcesTracker user2Tracker = new StubbedLocalResourcesTrackerImpl("user2", user2rsrcs);
privateRsrc.put("user2", user2Tracker);
ResourceLocalizationService rls = createLocService(publicRsrc, privateRsrc, 0);
LocalCacheCleanerStats stats = rls.handleCacheCleanup();
assertEquals(0, ((StubbedLocalResourcesTrackerImpl) rls.publicRsrc).getLocalRsrc().size());
assertEquals(0, ((StubbedLocalResourcesTrackerImpl) privateRsrc.get("user1")).getLocalRsrc().size());
assertEquals(0, ((StubbedLocalResourcesTrackerImpl) privateRsrc.get("user2")).getLocalRsrc().size());
assertEquals(100, stats.getTotalDelSize());
assertEquals(60, stats.getPublicDelSize());
assertEquals(40, stats.getPrivateDelSize());
}
use of java.util.concurrent.ConcurrentHashMap in project hbase by apache.
the class SnapshotInfo method getSnapshotsFilesMap.
/**
* Returns the map of store files based on path for all snapshots
* @param conf the {@link Configuration} to use
* @param uniqueHFilesArchiveSize pass out the size for store files in archive
* @param uniqueHFilesSize pass out the size for store files shared
* @param uniqueHFilesMobSize pass out the size for mob store files shared
* @return the map of store files
*/
public static Map<Path, Integer> getSnapshotsFilesMap(final Configuration conf, AtomicLong uniqueHFilesArchiveSize, AtomicLong uniqueHFilesSize, AtomicLong uniqueHFilesMobSize) throws IOException {
List<SnapshotDescription> snapshotList = getSnapshotList(conf);
if (snapshotList.isEmpty()) {
return Collections.emptyMap();
}
ConcurrentHashMap<Path, Integer> fileMap = new ConcurrentHashMap<>();
ExecutorService exec = SnapshotManifest.createExecutor(conf, "SnapshotsFilesMapping");
try {
for (final SnapshotDescription snapshot : snapshotList) {
getSnapshotFilesMap(conf, snapshot, exec, fileMap, uniqueHFilesArchiveSize, uniqueHFilesSize, uniqueHFilesMobSize);
}
} finally {
exec.shutdown();
}
return fileMap;
}
use of java.util.concurrent.ConcurrentHashMap in project hadoop by apache.
the class TestApplicationLimitsByPartition method testHeadroom.
@Test
public void testHeadroom() throws Exception {
/*
* Test Case: Verify Headroom calculated is sum of headrooms for each
* partition requested. So submit a app with requests for default partition
* and 'x' partition, so the total headroom for the user should be sum of
* the head room for both labels.
*/
simpleNodeLabelMappingToManager();
CapacitySchedulerConfiguration csConf = (CapacitySchedulerConfiguration) TestUtils.getComplexConfigurationWithQueueLabels(conf);
final String A1 = CapacitySchedulerConfiguration.ROOT + ".a" + ".a1";
final String B2 = CapacitySchedulerConfiguration.ROOT + ".b" + ".b2";
csConf.setUserLimit(A1, 25);
csConf.setUserLimit(B2, 25);
YarnConfiguration conf = new YarnConfiguration();
CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getConf()).thenReturn(conf);
when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB));
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB));
when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
RMContext rmContext = TestUtils.getMockRMContext();
RMContext spyRMContext = spy(rmContext);
when(spyRMContext.getNodeLabelManager()).thenReturn(mgr);
when(csContext.getRMContext()).thenReturn(spyRMContext);
mgr.activateNode(NodeId.newInstance("h0", 0), // default Label
Resource.newInstance(160 * GB, 16));
mgr.activateNode(NodeId.newInstance("h1", 0), // label x
Resource.newInstance(160 * GB, 16));
mgr.activateNode(NodeId.newInstance("h2", 0), // label y
Resource.newInstance(160 * GB, 16));
// Say cluster has 100 nodes of 16G each
Resource clusterResource = Resources.createResource(160 * GB);
when(csContext.getClusterResource()).thenReturn(clusterResource);
Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
CSQueue rootQueue = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, "root", queues, queues, TestUtils.spyHook);
ResourceUsage queueResUsage = rootQueue.getQueueResourceUsage();
when(csContext.getClusterResourceUsage()).thenReturn(queueResUsage);
// Manipulate queue 'a'
LeafQueue queue = TestLeafQueue.stubLeafQueue((LeafQueue) queues.get("b2"));
queue.updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
String rack_0 = "rack_0";
FiCaSchedulerNode node_0 = TestUtils.getMockNode("h0", rack_0, 0, 160 * GB);
FiCaSchedulerNode node_1 = TestUtils.getMockNode("h1", rack_0, 0, 160 * GB);
final String user_0 = "user_0";
final String user_1 = "user_1";
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
ConcurrentMap<ApplicationId, RMApp> spyApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
RMApp rmApp = mock(RMApp.class);
ResourceRequest amResourceRequest = mock(ResourceRequest.class);
Resource amResource = Resources.createResource(0, 0);
when(amResourceRequest.getCapability()).thenReturn(amResource);
when(rmApp.getAMResourceRequest()).thenReturn(amResourceRequest);
Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
when(spyRMContext.getRMApps()).thenReturn(spyApps);
RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
when(rmApp.getRMAppAttempt((ApplicationAttemptId) Matchers.any())).thenReturn(rmAppAttempt);
when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
Mockito.doReturn(true).when(spyApps).containsKey((ApplicationId) Matchers.any());
Priority priority_1 = TestUtils.createMockPriority(1);
// Submit first application with some resource-requests from user_0,
// and check headroom
final ApplicationAttemptId appAttemptId_0_0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0_0 = new FiCaSchedulerApp(appAttemptId_0_0, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
queue.submitApplicationAttempt(app_0_0, user_0);
List<ResourceRequest> app_0_0_requests = new ArrayList<ResourceRequest>();
app_0_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
app_0_0.updateResourceRequests(app_0_0_requests);
// Schedule to compute
queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
//head room = queue capacity = 50 % 90% 160 GB * 0.25 (UL)
Resource expectedHeadroom = Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
assertEquals(expectedHeadroom, app_0_0.getHeadroom());
// Submit second application from user_0, check headroom
final ApplicationAttemptId appAttemptId_0_1 = TestUtils.getMockApplicationAttemptId(1, 0);
FiCaSchedulerApp app_0_1 = new FiCaSchedulerApp(appAttemptId_0_1, user_0, queue, queue.getAbstractUsersManager(), spyRMContext);
queue.submitApplicationAttempt(app_0_1, user_0);
List<ResourceRequest> app_0_1_requests = new ArrayList<ResourceRequest>();
app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
app_0_1.updateResourceRequests(app_0_1_requests);
app_0_1_requests.clear();
app_0_1_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory, "y"));
app_0_1.updateResourceRequests(app_0_1_requests);
// Schedule to compute
queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
queue.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), // Schedule to compute
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
// no change
assertEquals(expectedHeadroom, app_0_0.getHeadroom());
//head room for default label + head room for y partition
//head room for y partition = 100% 50%(b queue capacity ) * 160 * GB
Resource expectedHeadroomWithReqInY = Resources.add(Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1), expectedHeadroom);
assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
// Submit first application from user_1, check for new headroom
final ApplicationAttemptId appAttemptId_1_0 = TestUtils.getMockApplicationAttemptId(2, 0);
FiCaSchedulerApp app_1_0 = new FiCaSchedulerApp(appAttemptId_1_0, user_1, queue, queue.getAbstractUsersManager(), spyRMContext);
queue.submitApplicationAttempt(app_1_0, user_1);
List<ResourceRequest> app_1_0_requests = new ArrayList<ResourceRequest>();
app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
app_1_0.updateResourceRequests(app_1_0_requests);
app_1_0_requests.clear();
app_1_0_requests.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory, "y"));
app_1_0.updateResourceRequests(app_1_0_requests);
// Schedule to compute
queue.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), // Schedule to compute
SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
//head room = queue capacity = (50 % 90% 160 GB)/2 (for 2 users)
expectedHeadroom = Resources.createResource((int) (0.5 * 0.9 * 160 * 0.25) * GB, 1);
//head room for default label + head room for y partition
//head room for y partition = 100% 50%(b queue capacity ) * 160 * GB
expectedHeadroomWithReqInY = Resources.add(Resources.createResource((int) (0.25 * 0.5 * 160) * GB, 1), expectedHeadroom);
assertEquals(expectedHeadroom, app_0_0.getHeadroom());
assertEquals(expectedHeadroomWithReqInY, app_0_1.getHeadroom());
assertEquals(expectedHeadroomWithReqInY, app_1_0.getHeadroom());
}
Aggregations