use of java.util.Set in project hadoop by apache.
the class DFSInputStream method reportCheckSumFailure.
/**
* DFSInputStream reports checksum failure.
* For replicated blocks, we have the following logic:
* Case I : client has tried multiple data nodes and at least one of the
* attempts has succeeded. We report the other failures as corrupted block to
* namenode.
* Case II: client has tried out all data nodes, but all failed. We
* only report if the total number of replica is 1. We do not
* report otherwise since this maybe due to the client is a handicapped client
* (who can not read).
*
* For erasure-coded blocks, each block in corruptedBlockMap is an internal
* block in a block group, and there is usually only one DataNode
* corresponding to each internal block. For this case we simply report the
* corrupted blocks to NameNode and ignore the above logic.
*
* @param corruptedBlocks map of corrupted blocks
* @param dataNodeCount number of data nodes who contains the block replicas
*/
protected void reportCheckSumFailure(CorruptedBlocks corruptedBlocks, int dataNodeCount, boolean isStriped) {
Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap = corruptedBlocks.getCorruptionMap();
if (corruptedBlockMap.isEmpty()) {
return;
}
List<LocatedBlock> reportList = new ArrayList<>(corruptedBlockMap.size());
for (Map.Entry<ExtendedBlock, Set<DatanodeInfo>> entry : corruptedBlockMap.entrySet()) {
ExtendedBlock blk = entry.getKey();
Set<DatanodeInfo> dnSet = entry.getValue();
if (isStriped || ((dnSet.size() < dataNodeCount) && (dnSet.size() > 0)) || ((dataNodeCount == 1) && (dnSet.size() == dataNodeCount))) {
DatanodeInfo[] locs = new DatanodeInfo[dnSet.size()];
int i = 0;
for (DatanodeInfo dn : dnSet) {
locs[i++] = dn;
}
reportList.add(new LocatedBlock(blk, locs));
}
}
if (reportList.size() > 0) {
dfsClient.reportChecksumFailure(src, reportList.toArray(new LocatedBlock[reportList.size()]));
}
corruptedBlockMap.clear();
}
use of java.util.Set in project hadoop by apache.
the class TestUpgradeDomainBlockPlacementPolicy method testPlacementAfterDecommission.
@Test(timeout = 300000)
public void testPlacementAfterDecommission() throws Exception {
final long fileSize = DEFAULT_BLOCK_SIZE * 5;
final String testFile = new String("/testfile");
final Path path = new Path(testFile);
DFSTestUtil.createFile(cluster.getFileSystem(), path, fileSize, REPLICATION_FACTOR, 1000L);
// Decommission some nodes and wait until decommissions have finished.
refreshDatanodeAdminProperties2();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
boolean successful = true;
LocatedBlocks locatedBlocks;
try {
locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
} catch (IOException ioe) {
return false;
}
for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
Set<DatanodeInfo> locs = new HashSet<>();
for (DatanodeInfo datanodeInfo : block.getLocations()) {
if (datanodeInfo.getAdminState() == DatanodeInfo.AdminStates.NORMAL) {
locs.add(datanodeInfo);
}
}
for (DatanodeID datanodeID : expectedDatanodeIDs) {
successful = successful && locs.contains(datanodeID);
}
}
return successful;
}
}, 1000, 60000);
// Verify block placement policy of each block.
LocatedBlocks locatedBlocks;
locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
BlockPlacementStatus status = cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy().verifyBlockPlacement(block.getLocations(), REPLICATION_FACTOR);
assertTrue(status.isPlacementPolicySatisfied());
}
}
use of java.util.Set in project hadoop by apache.
the class RumenToSLSConverter method generateSLSNodeFile.
@SuppressWarnings("unchecked")
private static void generateSLSNodeFile(String outputFile) throws IOException {
try (Writer output = new OutputStreamWriter(new FileOutputStream(outputFile), "UTF-8")) {
ObjectMapper mapper = new ObjectMapper();
ObjectWriter writer = mapper.writerWithDefaultPrettyPrinter();
for (Map.Entry<String, Set<String>> entry : rackNodeMap.entrySet()) {
Map rack = new LinkedHashMap();
rack.put("rack", entry.getKey());
List nodes = new ArrayList();
for (String name : entry.getValue()) {
Map node = new LinkedHashMap();
node.put("node", name);
nodes.add(node);
}
rack.put("nodes", nodes);
output.write(writer.writeValueAsString(rack) + EOL);
}
}
}
use of java.util.Set in project hadoop by apache.
the class ResourceSet method addResources.
public Map<LocalResourceVisibility, Collection<LocalResourceRequest>> addResources(Map<String, LocalResource> localResourceMap) throws URISyntaxException {
if (localResourceMap == null || localResourceMap.isEmpty()) {
return null;
}
Map<LocalResourceRequest, Set<String>> allResources = new HashMap<>();
List<LocalResourceRequest> publicList = new ArrayList<>();
List<LocalResourceRequest> privateList = new ArrayList<>();
List<LocalResourceRequest> appList = new ArrayList<>();
for (Map.Entry<String, LocalResource> rsrc : localResourceMap.entrySet()) {
LocalResource resource = rsrc.getValue();
LocalResourceRequest req = new LocalResourceRequest(rsrc.getValue());
allResources.putIfAbsent(req, new HashSet<>());
allResources.get(req).add(rsrc.getKey());
storeSharedCacheUploadPolicy(req, resource.getShouldBeUploadedToSharedCache());
switch(resource.getVisibility()) {
case PUBLIC:
publicList.add(req);
break;
case PRIVATE:
privateList.add(req);
break;
case APPLICATION:
appList.add(req);
break;
default:
break;
}
}
Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req = new LinkedHashMap<>();
if (!publicList.isEmpty()) {
publicRsrcs.addAll(publicList);
req.put(LocalResourceVisibility.PUBLIC, publicList);
}
if (!privateList.isEmpty()) {
privateRsrcs.addAll(privateList);
req.put(LocalResourceVisibility.PRIVATE, privateList);
}
if (!appList.isEmpty()) {
appRsrcs.addAll(appList);
req.put(LocalResourceVisibility.APPLICATION, appList);
}
if (!allResources.isEmpty()) {
this.pendingResources.putAll(allResources);
}
return req;
}
use of java.util.Set in project hadoop by apache.
the class RMWebServices method getLabelsToNodes.
@GET
@Path("/label-mappings")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public LabelsToNodesInfo getLabelsToNodes(@QueryParam("labels") Set<String> labels) throws IOException {
init();
LabelsToNodesInfo lts = new LabelsToNodesInfo();
Map<NodeLabelInfo, NodeIDsInfo> ltsMap = lts.getLabelsToNodes();
Map<NodeLabel, Set<NodeId>> labelsToNodeId = null;
if (labels == null || labels.size() == 0) {
labelsToNodeId = rm.getRMContext().getNodeLabelManager().getLabelsInfoToNodes();
} else {
labelsToNodeId = rm.getRMContext().getNodeLabelManager().getLabelsInfoToNodes(labels);
}
for (Entry<NodeLabel, Set<NodeId>> entry : labelsToNodeId.entrySet()) {
List<String> nodeIdStrList = new ArrayList<String>();
for (NodeId nodeId : entry.getValue()) {
nodeIdStrList.add(nodeId.toString());
}
ltsMap.put(new NodeLabelInfo(entry.getKey()), new NodeIDsInfo(nodeIdStrList));
}
return lts;
}
Aggregations