use of java.util.Map.Entry in project hadoop by apache.
the class FSImageTestUtil method assertPropertiesFilesSame.
/**
* Assert that a set of properties files all contain the same data.
*
* @param propFiles the files to compare.
* @param ignoredProperties the property names to be ignored during
* comparison.
* @throws IOException if the files cannot be opened or read
* @throws AssertionError if the files differ
*/
public static void assertPropertiesFilesSame(File[] propFiles, Set<String> ignoredProperties) throws IOException {
Set<Map.Entry<Object, Object>> prevProps = null;
for (File f : propFiles) {
Properties props;
FileInputStream is = new FileInputStream(f);
try {
props = new Properties();
props.load(is);
} finally {
IOUtils.closeStream(is);
}
if (prevProps == null) {
prevProps = props.entrySet();
} else {
Set<Entry<Object, Object>> diff = Sets.symmetricDifference(prevProps, props.entrySet());
Iterator<Entry<Object, Object>> it = diff.iterator();
while (it.hasNext()) {
Entry<Object, Object> entry = it.next();
if (ignoredProperties != null && ignoredProperties.contains(entry.getKey())) {
continue;
}
fail("Properties file " + f + " differs from " + propFiles[0]);
}
}
}
}
use of java.util.Map.Entry in project hadoop by apache.
the class JobBase method getReport.
/**
* log the counters
*
*/
protected String getReport() {
StringBuffer sb = new StringBuffer();
Iterator iter = this.longCounters.entrySet().iterator();
while (iter.hasNext()) {
Entry e = (Entry) iter.next();
sb.append(e.getKey().toString()).append("\t").append(e.getValue()).append("\n");
}
iter = this.doubleCounters.entrySet().iterator();
while (iter.hasNext()) {
Entry e = (Entry) iter.next();
sb.append(e.getKey().toString()).append("\t").append(e.getValue()).append("\n");
}
return sb.toString();
}
use of java.util.Map.Entry in project hadoop by apache.
the class NodesListManager method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.conf = conf;
int nodeIpCacheTimeout = conf.getInt(YarnConfiguration.RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS, YarnConfiguration.DEFAULT_RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS);
if (nodeIpCacheTimeout <= 0) {
resolver = new DirectResolver();
} else {
resolver = new CachedResolver(SystemClock.getInstance(), nodeIpCacheTimeout);
addIfService(resolver);
}
// Read the hosts/exclude files to restrict access to the RM
try {
this.includesFile = conf.get(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH);
this.excludesFile = conf.get(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
this.hostsReader = createHostsFileReader(this.includesFile, this.excludesFile);
setDecomissionedNMs();
printConfiguredHosts();
} catch (YarnException ex) {
disableHostsFileReader(ex);
} catch (IOException ioe) {
disableHostsFileReader(ioe);
}
final int nodeRemovalTimeout = conf.getInt(YarnConfiguration.RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC, YarnConfiguration.DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC);
nodeRemovalCheckInterval = (Math.min(nodeRemovalTimeout / 2, 600000));
removalTimer = new Timer("Node Removal Timer");
removalTimer.schedule(new TimerTask() {
@Override
public void run() {
long now = Time.monotonicNow();
for (Map.Entry<NodeId, RMNode> entry : rmContext.getInactiveRMNodes().entrySet()) {
NodeId nodeId = entry.getKey();
RMNode rmNode = entry.getValue();
if (isUntrackedNode(rmNode.getHostName())) {
if (rmNode.getUntrackedTimeStamp() == 0) {
rmNode.setUntrackedTimeStamp(now);
} else if (now - rmNode.getUntrackedTimeStamp() > nodeRemovalTimeout) {
RMNode result = rmContext.getInactiveRMNodes().remove(nodeId);
if (result != null) {
decrInactiveNMMetrics(rmNode);
LOG.info("Removed " + result.getState().toString() + " node " + result.getHostName() + " from inactive nodes list");
}
}
} else {
rmNode.setUntrackedTimeStamp(0);
}
}
}
}, nodeRemovalCheckInterval, nodeRemovalCheckInterval);
super.serviceInit(conf);
}
use of java.util.Map.Entry in project hadoop by apache.
the class InputStriper method splitFor.
/**
* @param inputDir Pool used to resolve block locations.
* @param bytes Target byte count
* @param nLocs Number of block locations per split.
* @return A set of files satisfying the byte count, with locations weighted
* to the dominating proportion of input bytes.
*/
CombineFileSplit splitFor(FilePool inputDir, long bytes, int nLocs) throws IOException {
final ArrayList<Path> paths = new ArrayList<Path>();
final ArrayList<Long> start = new ArrayList<Long>();
final ArrayList<Long> length = new ArrayList<Long>();
final HashMap<String, Double> sb = new HashMap<String, Double>();
do {
paths.add(current.getPath());
start.add(currentStart);
final long fromFile = Math.min(bytes, current.getLen() - currentStart);
length.add(fromFile);
for (BlockLocation loc : inputDir.locationsFor(current, currentStart, fromFile)) {
final double tedium = loc.getLength() / (1.0 * bytes);
for (String l : loc.getHosts()) {
Double j = sb.get(l);
if (null == j) {
sb.put(l, tedium);
} else {
sb.put(l, j.doubleValue() + tedium);
}
}
}
currentStart += fromFile;
bytes -= fromFile;
// Switch to a new file if
// - the current file is uncompressed and completely used
// - the current file is compressed
CompressionCodecFactory compressionCodecs = new CompressionCodecFactory(conf);
CompressionCodec codec = compressionCodecs.getCodec(current.getPath());
if (current.getLen() - currentStart == 0 || codec != null) {
current = files.get(++idx % files.size());
currentStart = 0;
}
} while (bytes > 0);
final ArrayList<Entry<String, Double>> sort = new ArrayList<Entry<String, Double>>(sb.entrySet());
Collections.sort(sort, hostRank);
final String[] hosts = new String[Math.min(nLocs, sort.size())];
for (int i = 0; i < nLocs && i < sort.size(); ++i) {
hosts[i] = sort.get(i).getKey();
}
return new CombineFileSplit(paths.toArray(new Path[0]), toLongArray(start), toLongArray(length), hosts);
}
use of java.util.Map.Entry in project hadoop by apache.
the class SchedulerApplicationAttempt method pullNewlyUpdatedContainers.
/**
* A container is promoted if its executionType is changed from
* OPPORTUNISTIC to GUARANTEED. It id demoted if the change is from
* GUARANTEED to OPPORTUNISTIC.
* @return Newly Promoted and Demoted containers
*/
private List<Container> pullNewlyUpdatedContainers(Map<ContainerId, RMContainer> newlyUpdatedContainers, ContainerUpdateType updateTpe) {
List<Container> updatedContainers = new ArrayList<>();
if (oppContainerContext == null && (ContainerUpdateType.DEMOTE_EXECUTION_TYPE == updateTpe || ContainerUpdateType.PROMOTE_EXECUTION_TYPE == updateTpe)) {
return updatedContainers;
}
try {
writeLock.lock();
Iterator<Map.Entry<ContainerId, RMContainer>> i = newlyUpdatedContainers.entrySet().iterator();
while (i.hasNext()) {
Map.Entry<ContainerId, RMContainer> entry = i.next();
ContainerId matchedContainerId = entry.getKey();
RMContainer tempRMContainer = entry.getValue();
RMContainer existingRMContainer = getRMContainer(matchedContainerId);
if (existingRMContainer != null) {
// swap containers
existingRMContainer = getUpdateContext().swapContainer(tempRMContainer, existingRMContainer, updateTpe);
getUpdateContext().removeFromOutstandingUpdate(tempRMContainer.getAllocatedSchedulerKey(), existingRMContainer.getContainer());
Container updatedContainer = updateContainerAndNMToken(existingRMContainer, updateTpe);
updatedContainers.add(updatedContainer);
}
tempContainerToKill.add(tempRMContainer);
i.remove();
}
// Release all temporary containers
Iterator<RMContainer> tempIter = tempContainerToKill.iterator();
while (tempIter.hasNext()) {
RMContainer c = tempIter.next();
// Mark container for release (set RRs to null, so RM does not think
// it is a recoverable container)
((RMContainerImpl) c).setResourceRequests(null);
((AbstractYarnScheduler) rmContext.getScheduler()).completedContainer(c, SchedulerUtils.createAbnormalContainerStatus(c.getContainerId(), SchedulerUtils.UPDATED_CONTAINER), RMContainerEventType.KILL);
tempIter.remove();
}
return updatedContainers;
} finally {
writeLock.unlock();
}
}
Aggregations