Search in sources :

Example 6 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class FSImageTestUtil method assertPropertiesFilesSame.

/**
   * Assert that a set of properties files all contain the same data.
   *
   * @param propFiles the files to compare.
   * @param ignoredProperties the property names to be ignored during
   *                          comparison.
   * @throws IOException if the files cannot be opened or read
   * @throws AssertionError if the files differ
   */
public static void assertPropertiesFilesSame(File[] propFiles, Set<String> ignoredProperties) throws IOException {
    Set<Map.Entry<Object, Object>> prevProps = null;
    for (File f : propFiles) {
        Properties props;
        FileInputStream is = new FileInputStream(f);
        try {
            props = new Properties();
            props.load(is);
        } finally {
            IOUtils.closeStream(is);
        }
        if (prevProps == null) {
            prevProps = props.entrySet();
        } else {
            Set<Entry<Object, Object>> diff = Sets.symmetricDifference(prevProps, props.entrySet());
            Iterator<Entry<Object, Object>> it = diff.iterator();
            while (it.hasNext()) {
                Entry<Object, Object> entry = it.next();
                if (ignoredProperties != null && ignoredProperties.contains(entry.getKey())) {
                    continue;
                }
                fail("Properties file " + f + " differs from " + propFiles[0]);
            }
        }
    }
}
Also used : Entry(java.util.Map.Entry) Properties(java.util.Properties) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) FSImageFile(org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile) File(java.io.File) FileInputStream(java.io.FileInputStream)

Example 7 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class JobBase method getReport.

/**
   * log the counters
   * 
   */
protected String getReport() {
    StringBuffer sb = new StringBuffer();
    Iterator iter = this.longCounters.entrySet().iterator();
    while (iter.hasNext()) {
        Entry e = (Entry) iter.next();
        sb.append(e.getKey().toString()).append("\t").append(e.getValue()).append("\n");
    }
    iter = this.doubleCounters.entrySet().iterator();
    while (iter.hasNext()) {
        Entry e = (Entry) iter.next();
        sb.append(e.getKey().toString()).append("\t").append(e.getValue()).append("\n");
    }
    return sb.toString();
}
Also used : Entry(java.util.Map.Entry) Iterator(java.util.Iterator)

Example 8 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class NodesListManager method serviceInit.

@Override
protected void serviceInit(Configuration conf) throws Exception {
    this.conf = conf;
    int nodeIpCacheTimeout = conf.getInt(YarnConfiguration.RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS, YarnConfiguration.DEFAULT_RM_NODE_IP_CACHE_EXPIRY_INTERVAL_SECS);
    if (nodeIpCacheTimeout <= 0) {
        resolver = new DirectResolver();
    } else {
        resolver = new CachedResolver(SystemClock.getInstance(), nodeIpCacheTimeout);
        addIfService(resolver);
    }
    // Read the hosts/exclude files to restrict access to the RM
    try {
        this.includesFile = conf.get(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, YarnConfiguration.DEFAULT_RM_NODES_INCLUDE_FILE_PATH);
        this.excludesFile = conf.get(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, YarnConfiguration.DEFAULT_RM_NODES_EXCLUDE_FILE_PATH);
        this.hostsReader = createHostsFileReader(this.includesFile, this.excludesFile);
        setDecomissionedNMs();
        printConfiguredHosts();
    } catch (YarnException ex) {
        disableHostsFileReader(ex);
    } catch (IOException ioe) {
        disableHostsFileReader(ioe);
    }
    final int nodeRemovalTimeout = conf.getInt(YarnConfiguration.RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC, YarnConfiguration.DEFAULT_RM_NODEMANAGER_UNTRACKED_REMOVAL_TIMEOUT_MSEC);
    nodeRemovalCheckInterval = (Math.min(nodeRemovalTimeout / 2, 600000));
    removalTimer = new Timer("Node Removal Timer");
    removalTimer.schedule(new TimerTask() {

        @Override
        public void run() {
            long now = Time.monotonicNow();
            for (Map.Entry<NodeId, RMNode> entry : rmContext.getInactiveRMNodes().entrySet()) {
                NodeId nodeId = entry.getKey();
                RMNode rmNode = entry.getValue();
                if (isUntrackedNode(rmNode.getHostName())) {
                    if (rmNode.getUntrackedTimeStamp() == 0) {
                        rmNode.setUntrackedTimeStamp(now);
                    } else if (now - rmNode.getUntrackedTimeStamp() > nodeRemovalTimeout) {
                        RMNode result = rmContext.getInactiveRMNodes().remove(nodeId);
                        if (result != null) {
                            decrInactiveNMMetrics(rmNode);
                            LOG.info("Removed " + result.getState().toString() + " node " + result.getHostName() + " from inactive nodes list");
                        }
                    }
                } else {
                    rmNode.setUntrackedTimeStamp(0);
                }
            }
        }
    }, nodeRemovalCheckInterval, nodeRemovalCheckInterval);
    super.serviceInit(conf);
}
Also used : Entry(java.util.Map.Entry) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) Timer(java.util.Timer) TimerTask(java.util.TimerTask) NodeId(org.apache.hadoop.yarn.api.records.NodeId) IOException(java.io.IOException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException)

Example 9 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class InputStriper method splitFor.

/**
   * @param inputDir Pool used to resolve block locations.
   * @param bytes Target byte count
   * @param nLocs Number of block locations per split.
   * @return A set of files satisfying the byte count, with locations weighted
   *         to the dominating proportion of input bytes.
   */
CombineFileSplit splitFor(FilePool inputDir, long bytes, int nLocs) throws IOException {
    final ArrayList<Path> paths = new ArrayList<Path>();
    final ArrayList<Long> start = new ArrayList<Long>();
    final ArrayList<Long> length = new ArrayList<Long>();
    final HashMap<String, Double> sb = new HashMap<String, Double>();
    do {
        paths.add(current.getPath());
        start.add(currentStart);
        final long fromFile = Math.min(bytes, current.getLen() - currentStart);
        length.add(fromFile);
        for (BlockLocation loc : inputDir.locationsFor(current, currentStart, fromFile)) {
            final double tedium = loc.getLength() / (1.0 * bytes);
            for (String l : loc.getHosts()) {
                Double j = sb.get(l);
                if (null == j) {
                    sb.put(l, tedium);
                } else {
                    sb.put(l, j.doubleValue() + tedium);
                }
            }
        }
        currentStart += fromFile;
        bytes -= fromFile;
        // Switch to a new file if
        //  - the current file is uncompressed and completely used
        //  - the current file is compressed
        CompressionCodecFactory compressionCodecs = new CompressionCodecFactory(conf);
        CompressionCodec codec = compressionCodecs.getCodec(current.getPath());
        if (current.getLen() - currentStart == 0 || codec != null) {
            current = files.get(++idx % files.size());
            currentStart = 0;
        }
    } while (bytes > 0);
    final ArrayList<Entry<String, Double>> sort = new ArrayList<Entry<String, Double>>(sb.entrySet());
    Collections.sort(sort, hostRank);
    final String[] hosts = new String[Math.min(nLocs, sort.size())];
    for (int i = 0; i < nLocs && i < sort.size(); ++i) {
        hosts[i] = sort.get(i).getKey();
    }
    return new CombineFileSplit(paths.toArray(new Path[0]), toLongArray(start), toLongArray(length), hosts);
}
Also used : Path(org.apache.hadoop.fs.Path) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) CombineFileSplit(org.apache.hadoop.mapreduce.lib.input.CombineFileSplit) BlockLocation(org.apache.hadoop.fs.BlockLocation) Entry(java.util.Map.Entry) CompressionCodecFactory(org.apache.hadoop.io.compress.CompressionCodecFactory) CompressionCodec(org.apache.hadoop.io.compress.CompressionCodec)

Example 10 with Entry

use of java.util.Map.Entry in project hadoop by apache.

the class SchedulerApplicationAttempt method pullNewlyUpdatedContainers.

/**
   * A container is promoted if its executionType is changed from
   * OPPORTUNISTIC to GUARANTEED. It id demoted if the change is from
   * GUARANTEED to OPPORTUNISTIC.
   * @return Newly Promoted and Demoted containers
   */
private List<Container> pullNewlyUpdatedContainers(Map<ContainerId, RMContainer> newlyUpdatedContainers, ContainerUpdateType updateTpe) {
    List<Container> updatedContainers = new ArrayList<>();
    if (oppContainerContext == null && (ContainerUpdateType.DEMOTE_EXECUTION_TYPE == updateTpe || ContainerUpdateType.PROMOTE_EXECUTION_TYPE == updateTpe)) {
        return updatedContainers;
    }
    try {
        writeLock.lock();
        Iterator<Map.Entry<ContainerId, RMContainer>> i = newlyUpdatedContainers.entrySet().iterator();
        while (i.hasNext()) {
            Map.Entry<ContainerId, RMContainer> entry = i.next();
            ContainerId matchedContainerId = entry.getKey();
            RMContainer tempRMContainer = entry.getValue();
            RMContainer existingRMContainer = getRMContainer(matchedContainerId);
            if (existingRMContainer != null) {
                // swap containers
                existingRMContainer = getUpdateContext().swapContainer(tempRMContainer, existingRMContainer, updateTpe);
                getUpdateContext().removeFromOutstandingUpdate(tempRMContainer.getAllocatedSchedulerKey(), existingRMContainer.getContainer());
                Container updatedContainer = updateContainerAndNMToken(existingRMContainer, updateTpe);
                updatedContainers.add(updatedContainer);
            }
            tempContainerToKill.add(tempRMContainer);
            i.remove();
        }
        // Release all temporary containers
        Iterator<RMContainer> tempIter = tempContainerToKill.iterator();
        while (tempIter.hasNext()) {
            RMContainer c = tempIter.next();
            // Mark container for release (set RRs to null, so RM does not think
            // it is a recoverable container)
            ((RMContainerImpl) c).setResourceRequests(null);
            ((AbstractYarnScheduler) rmContext.getScheduler()).completedContainer(c, SchedulerUtils.createAbnormalContainerStatus(c.getContainerId(), SchedulerUtils.UPDATED_CONTAINER), RMContainerEventType.KILL);
            tempIter.remove();
        }
        return updatedContainers;
    } finally {
        writeLock.unlock();
    }
}
Also used : RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) Entry(java.util.Map.Entry) RMContainerImpl(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) ArrayList(java.util.ArrayList) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)

Aggregations

Entry (java.util.Map.Entry)1041 HashMap (java.util.HashMap)295 Map (java.util.Map)288 ArrayList (java.util.ArrayList)258 List (java.util.List)177 Iterator (java.util.Iterator)113 IOException (java.io.IOException)109 Test (org.junit.Test)77 Set (java.util.Set)68 LinkedHashMap (java.util.LinkedHashMap)64 HashSet (java.util.HashSet)62 File (java.io.File)56 Collection (java.util.Collection)42 TreeMap (java.util.TreeMap)36 Properties (java.util.Properties)35 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)35 TestSuite (junit.framework.TestSuite)33 LinkedList (java.util.LinkedList)31 NamedIcon (jmri.jmrit.catalog.NamedIcon)28 Collectors (java.util.stream.Collectors)27