use of java.util.LinkedList in project storm by apache.
the class DefaultResourceAwareStrategy method orderExecutors.
/**
* Order executors based on how many in and out connections it will potentially need to make.
* First order components by the number of in and out connections it will have. Then iterate through the sorted list of components.
* For each component sort the neighbors of that component by how many connections it will have to make with that component.
* Add an executor from this component and then from each neighboring component in sorted order. Do this until there is nothing left to schedule
*
* @param td The topology the executors belong to
* @param unassignedExecutors a collection of unassigned executors that need to be unassigned. Should only try to assign executors from this list
* @return a list of executors in sorted order
*/
private List<ExecutorDetails> orderExecutors(TopologyDetails td, Collection<ExecutorDetails> unassignedExecutors) {
Map<String, Component> componentMap = td.getComponents();
List<ExecutorDetails> execsScheduled = new LinkedList<>();
Map<String, Queue<ExecutorDetails>> compToExecsToSchedule = new HashMap<>();
for (Component component : componentMap.values()) {
compToExecsToSchedule.put(component.id, new LinkedList<ExecutorDetails>());
for (ExecutorDetails exec : component.execs) {
if (unassignedExecutors.contains(exec)) {
compToExecsToSchedule.get(component.id).add(exec);
}
}
}
Set<Component> sortedComponents = sortComponents(componentMap);
sortedComponents.addAll(componentMap.values());
for (Component currComp : sortedComponents) {
Map<String, Component> neighbors = new HashMap<String, Component>();
for (String compId : (List<String>) ListUtils.union(currComp.children, currComp.parents)) {
neighbors.put(compId, componentMap.get(compId));
}
Set<Component> sortedNeighbors = sortNeighbors(currComp, neighbors);
Queue<ExecutorDetails> currCompExesToSched = compToExecsToSchedule.get(currComp.id);
boolean flag = false;
do {
flag = false;
if (!currCompExesToSched.isEmpty()) {
execsScheduled.add(currCompExesToSched.poll());
flag = true;
}
for (Component neighborComp : sortedNeighbors) {
Queue<ExecutorDetails> neighborCompExesToSched = compToExecsToSchedule.get(neighborComp.id);
if (!neighborCompExesToSched.isEmpty()) {
execsScheduled.add(neighborCompExesToSched.poll());
flag = true;
}
}
} while (flag);
}
return execsScheduled;
}
use of java.util.LinkedList in project hadoop by apache.
the class TestContainerManagerSecurity method testContainerTokenWithEpoch.
/**
* This tests whether a containerId is serialized/deserialized with epoch.
*
* @throws IOException
* @throws InterruptedException
* @throws YarnException
*/
private void testContainerTokenWithEpoch(Configuration conf) throws IOException, InterruptedException, YarnException {
LOG.info("Running test for serializing/deserializing containerIds");
NMTokenSecretManagerInRM nmTokenSecretManagerInRM = yarnCluster.getResourceManager().getRMContext().getNMTokenSecretManager();
ApplicationId appId = ApplicationId.newInstance(1, 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
ContainerId cId = ContainerId.newContainerId(appAttemptId, (5L << 40) | 3L);
NodeManager nm = yarnCluster.getNodeManager(0);
NMTokenSecretManagerInNM nmTokenSecretManagerInNM = nm.getNMContext().getNMTokenSecretManager();
String user = "test";
waitForNMToReceiveNMTokenKey(nmTokenSecretManagerInNM, nm);
NodeId nodeId = nm.getNMContext().getNodeId();
// Both id should be equal.
Assert.assertEquals(nmTokenSecretManagerInNM.getCurrentKey().getKeyId(), nmTokenSecretManagerInRM.getCurrentKey().getKeyId());
// Creating a normal Container Token
RMContainerTokenSecretManager containerTokenSecretManager = yarnCluster.getResourceManager().getRMContext().getContainerTokenSecretManager();
Resource r = Resource.newInstance(1230, 2);
Token containerToken = containerTokenSecretManager.createContainerToken(cId, 0, nodeId, user, r, Priority.newInstance(0), 0);
ContainerTokenIdentifier containerTokenIdentifier = new ContainerTokenIdentifier();
byte[] tokenIdentifierContent = containerToken.getIdentifier().array();
DataInputBuffer dib = new DataInputBuffer();
dib.reset(tokenIdentifierContent, tokenIdentifierContent.length);
containerTokenIdentifier.readFields(dib);
Assert.assertEquals(cId, containerTokenIdentifier.getContainerID());
Assert.assertEquals(cId.toString(), containerTokenIdentifier.getContainerID().toString());
Token nmToken = nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
YarnRPC rpc = YarnRPC.create(conf);
testStartContainer(rpc, appAttemptId, nodeId, containerToken, nmToken, false);
List<ContainerId> containerIds = new LinkedList<ContainerId>();
containerIds.add(cId);
ContainerManagementProtocol proxy = getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
GetContainerStatusesResponse res = proxy.getContainerStatuses(GetContainerStatusesRequest.newInstance(containerIds));
Assert.assertNotNull(res.getContainerStatuses().get(0));
Assert.assertEquals(cId, res.getContainerStatuses().get(0).getContainerId());
Assert.assertEquals(cId.toString(), res.getContainerStatuses().get(0).getContainerId().toString());
}
use of java.util.LinkedList in project hadoop by apache.
the class TestFSRMStateStore method verifyFilesUnreadablebyHDFS.
private void verifyFilesUnreadablebyHDFS(MiniDFSCluster cluster, Path root) throws Exception {
DistributedFileSystem fs = cluster.getFileSystem();
Queue<Path> paths = new LinkedList<>();
paths.add(root);
while (!paths.isEmpty()) {
Path p = paths.poll();
FileStatus stat = fs.getFileStatus(p);
if (!stat.isDirectory()) {
try {
LOG.warn("\n\n ##Testing path [" + p + "]\n\n");
fs.open(p);
Assert.fail("Super user should not be able to read [" + UserGroupInformation.getCurrentUser() + "] [" + p.getName() + "]");
} catch (AccessControlException e) {
Assert.assertTrue(e.getMessage().contains("superuser is not allowed to perform this operation"));
} catch (Exception e) {
Assert.fail("Should get an AccessControlException here");
}
}
if (stat.isDirectory()) {
FileStatus[] ls = fs.listStatus(p);
for (FileStatus f : ls) {
paths.add(f.getPath());
}
}
}
}
use of java.util.LinkedList in project hadoop by apache.
the class EntityGroupFSTimelineStore method getTimelineStoresFromCacheIds.
private List<TimelineStore> getTimelineStoresFromCacheIds(Set<TimelineEntityGroupId> groupIds, String entityType, List<EntityCacheItem> cacheItems) throws IOException {
List<TimelineStore> stores = new LinkedList<TimelineStore>();
// non-null storage for the group ids.
for (TimelineEntityGroupId groupId : groupIds) {
TimelineStore storeForId = getCachedStore(groupId, cacheItems);
if (storeForId != null) {
LOG.debug("Adding {} as a store for the query", storeForId.getName());
stores.add(storeForId);
metrics.incrGetEntityToDetailOps();
}
}
if (stores.size() == 0) {
LOG.debug("Using summary store for {}", entityType);
stores.add(this.summaryStore);
metrics.incrGetEntityToSummaryOps();
}
return stores;
}
use of java.util.LinkedList in project hive by apache.
the class HiveMetaStoreChecker method checkPartitionDirs.
private void checkPartitionDirs(final ExecutorService executor, final Path basePath, final Set<Path> result, final FileSystem fs, final int maxDepth) throws HiveException {
try {
Queue<Future<Path>> futures = new LinkedList<Future<Path>>();
ConcurrentLinkedQueue<PathDepthInfo> nextLevel = new ConcurrentLinkedQueue<>();
nextLevel.add(new PathDepthInfo(basePath, 0));
//not done right
while (!nextLevel.isEmpty()) {
ConcurrentLinkedQueue<PathDepthInfo> tempQueue = new ConcurrentLinkedQueue<>();
//process each level in parallel
while (!nextLevel.isEmpty()) {
futures.add(executor.submit(new PathDepthInfoCallable(nextLevel.poll(), maxDepth, fs, tempQueue)));
}
while (!futures.isEmpty()) {
Path p = futures.poll().get();
if (p != null) {
result.add(p);
}
}
//update the nextlevel with newly discovered sub-directories from the above
nextLevel = tempQueue;
}
} catch (InterruptedException | ExecutionException e) {
LOG.error(e.getMessage());
executor.shutdownNow();
throw new HiveException(e.getCause());
}
}
Aggregations