Search in sources :

Example 26 with Set

use of java.util.Set in project hadoop by apache.

the class DFSInputStream method reportCheckSumFailure.

/**
   * DFSInputStream reports checksum failure.
   * For replicated blocks, we have the following logic:
   * Case I : client has tried multiple data nodes and at least one of the
   * attempts has succeeded. We report the other failures as corrupted block to
   * namenode.
   * Case II: client has tried out all data nodes, but all failed. We
   * only report if the total number of replica is 1. We do not
   * report otherwise since this maybe due to the client is a handicapped client
   * (who can not read).
   *
   * For erasure-coded blocks, each block in corruptedBlockMap is an internal
   * block in a block group, and there is usually only one DataNode
   * corresponding to each internal block. For this case we simply report the
   * corrupted blocks to NameNode and ignore the above logic.
   *
   * @param corruptedBlocks map of corrupted blocks
   * @param dataNodeCount number of data nodes who contains the block replicas
   */
protected void reportCheckSumFailure(CorruptedBlocks corruptedBlocks, int dataNodeCount, boolean isStriped) {
    Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap = corruptedBlocks.getCorruptionMap();
    if (corruptedBlockMap.isEmpty()) {
        return;
    }
    List<LocatedBlock> reportList = new ArrayList<>(corruptedBlockMap.size());
    for (Map.Entry<ExtendedBlock, Set<DatanodeInfo>> entry : corruptedBlockMap.entrySet()) {
        ExtendedBlock blk = entry.getKey();
        Set<DatanodeInfo> dnSet = entry.getValue();
        if (isStriped || ((dnSet.size() < dataNodeCount) && (dnSet.size() > 0)) || ((dataNodeCount == 1) && (dnSet.size() == dataNodeCount))) {
            DatanodeInfo[] locs = new DatanodeInfo[dnSet.size()];
            int i = 0;
            for (DatanodeInfo dn : dnSet) {
                locs[i++] = dn;
            }
            reportList.add(new LocatedBlock(blk, locs));
        }
    }
    if (reportList.size() > 0) {
        dfsClient.reportChecksumFailure(src, reportList.toArray(new LocatedBlock[reportList.size()]));
    }
    corruptedBlockMap.clear();
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) EnumSet(java.util.EnumSet) Set(java.util.Set) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) AbstractMap(java.util.AbstractMap)

Example 27 with Set

use of java.util.Set in project hadoop by apache.

the class TestUpgradeDomainBlockPlacementPolicy method testPlacementAfterDecommission.

@Test(timeout = 300000)
public void testPlacementAfterDecommission() throws Exception {
    final long fileSize = DEFAULT_BLOCK_SIZE * 5;
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    DFSTestUtil.createFile(cluster.getFileSystem(), path, fileSize, REPLICATION_FACTOR, 1000L);
    // Decommission some nodes and wait until decommissions have finished.
    refreshDatanodeAdminProperties2();
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            boolean successful = true;
            LocatedBlocks locatedBlocks;
            try {
                locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
            } catch (IOException ioe) {
                return false;
            }
            for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
                Set<DatanodeInfo> locs = new HashSet<>();
                for (DatanodeInfo datanodeInfo : block.getLocations()) {
                    if (datanodeInfo.getAdminState() == DatanodeInfo.AdminStates.NORMAL) {
                        locs.add(datanodeInfo);
                    }
                }
                for (DatanodeID datanodeID : expectedDatanodeIDs) {
                    successful = successful && locs.contains(datanodeID);
                }
            }
            return successful;
        }
    }, 1000, 60000);
    // Verify block placement policy of each block.
    LocatedBlocks locatedBlocks;
    locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
    for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
        BlockPlacementStatus status = cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy().verifyBlockPlacement(block.getLocations(), REPLICATION_FACTOR);
        assertTrue(status.isPlacementPolicySatisfied());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HashSet(java.util.HashSet) Set(java.util.Set) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) BlockPlacementStatus(org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Test(org.junit.Test)

Example 28 with Set

use of java.util.Set in project hadoop by apache.

the class RumenToSLSConverter method generateSLSNodeFile.

@SuppressWarnings("unchecked")
private static void generateSLSNodeFile(String outputFile) throws IOException {
    try (Writer output = new OutputStreamWriter(new FileOutputStream(outputFile), "UTF-8")) {
        ObjectMapper mapper = new ObjectMapper();
        ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
        for (Map.Entry<String, Set<String>> entry : rackNodeMap.entrySet()) {
            Map rack = new LinkedHashMap();
            rack.put("rack", entry.getKey());
            List nodes = new ArrayList();
            for (String name : entry.getValue()) {
                Map node = new LinkedHashMap();
                node.put("node", name);
                nodes.add(node);
            }
            rack.put("nodes", nodes);
            output.write(writer.writeValueAsString(rack) + EOL);
        }
    }
}
Also used : TreeSet(java.util.TreeSet) Set(java.util.Set) FileOutputStream(java.io.FileOutputStream) ArrayList(java.util.ArrayList) ObjectWriter(com.fasterxml.jackson.databind.ObjectWriter) OutputStreamWriter(java.io.OutputStreamWriter) ArrayList(java.util.ArrayList) List(java.util.List) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) TreeMap(java.util.TreeMap) OutputStreamWriter(java.io.OutputStreamWriter) ObjectWriter(com.fasterxml.jackson.databind.ObjectWriter) Writer(java.io.Writer) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) LinkedHashMap(java.util.LinkedHashMap)

Example 29 with Set

use of java.util.Set in project hadoop by apache.

the class ResourceSet method addResources.

public Map<LocalResourceVisibility, Collection<LocalResourceRequest>> addResources(Map<String, LocalResource> localResourceMap) throws URISyntaxException {
    if (localResourceMap == null || localResourceMap.isEmpty()) {
        return null;
    }
    Map<LocalResourceRequest, Set<String>> allResources = new HashMap<>();
    List<LocalResourceRequest> publicList = new ArrayList<>();
    List<LocalResourceRequest> privateList = new ArrayList<>();
    List<LocalResourceRequest> appList = new ArrayList<>();
    for (Map.Entry<String, LocalResource> rsrc : localResourceMap.entrySet()) {
        LocalResource resource = rsrc.getValue();
        LocalResourceRequest req = new LocalResourceRequest(rsrc.getValue());
        allResources.putIfAbsent(req, new HashSet<>());
        allResources.get(req).add(rsrc.getKey());
        storeSharedCacheUploadPolicy(req, resource.getShouldBeUploadedToSharedCache());
        switch(resource.getVisibility()) {
            case PUBLIC:
                publicList.add(req);
                break;
            case PRIVATE:
                privateList.add(req);
                break;
            case APPLICATION:
                appList.add(req);
                break;
            default:
                break;
        }
    }
    Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new LinkedHashMap<>();
    if (!publicList.isEmpty()) {
        publicRsrcs.addAll(publicList);
        req.put(LocalResourceVisibility.PUBLIC, publicList);
    }
    if (!privateList.isEmpty()) {
        privateRsrcs.addAll(privateList);
        req.put(LocalResourceVisibility.PRIVATE, privateList);
    }
    if (!appList.isEmpty()) {
        appRsrcs.addAll(appList);
        req.put(LocalResourceVisibility.APPLICATION, appList);
    }
    if (!allResources.isEmpty()) {
        this.pendingResources.putAll(allResources);
    }
    return req;
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) LinkedHashMap(java.util.LinkedHashMap) LocalResourceVisibility(org.apache.hadoop.yarn.api.records.LocalResourceVisibility) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map)

Example 30 with Set

use of java.util.Set in project hadoop by apache.

the class RMWebServices method getLabelsToNodes.

@GET
@Path("/label-mappings")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public LabelsToNodesInfo getLabelsToNodes(@QueryParam("labels") Set<String> labels) throws IOException {
    init();
    LabelsToNodesInfo lts = new LabelsToNodesInfo();
    Map<NodeLabelInfo, NodeIDsInfo> ltsMap = lts.getLabelsToNodes();
    Map<NodeLabel, Set<NodeId>> labelsToNodeId = null;
    if (labels == null || labels.size() == 0) {
        labelsToNodeId = rm.getRMContext().getNodeLabelManager().getLabelsInfoToNodes();
    } else {
        labelsToNodeId = rm.getRMContext().getNodeLabelManager().getLabelsInfoToNodes(labels);
    }
    for (Entry<NodeLabel, Set<NodeId>> entry : labelsToNodeId.entrySet()) {
        List<String> nodeIdStrList = new ArrayList<String>();
        for (NodeId nodeId : entry.getValue()) {
            nodeIdStrList.add(nodeId.toString());
        }
        ltsMap.put(new NodeLabelInfo(entry.getKey()), new NodeIDsInfo(nodeIdStrList));
    }
    return lts;
}
Also used : LabelsToNodesInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo) NodeLabel(org.apache.hadoop.yarn.api.records.NodeLabel) EnumSet(java.util.EnumSet) Set(java.util.Set) HashSet(java.util.HashSet) NodeLabelInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelInfo) ArrayList(java.util.ArrayList) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Aggregations

Set (java.util.Set)6789 HashSet (java.util.HashSet)4372 HashMap (java.util.HashMap)2090 Map (java.util.Map)1865 Iterator (java.util.Iterator)1774 ArrayList (java.util.ArrayList)1113 List (java.util.List)980 Test (org.junit.Test)920 TreeSet (java.util.TreeSet)536 IOException (java.io.IOException)501 SSOException (com.iplanet.sso.SSOException)467 LinkedHashSet (java.util.LinkedHashSet)418 SMSException (com.sun.identity.sm.SMSException)347 IdRepoException (com.sun.identity.idm.IdRepoException)268 Collection (java.util.Collection)259 ImmutableSet (com.google.common.collect.ImmutableSet)256 File (java.io.File)245 SSOToken (com.iplanet.sso.SSOToken)226 Collectors (java.util.stream.Collectors)219 Test (org.testng.annotations.Test)209