Search in sources :

Example 1 with DatanodeInfoWithStorage

use of org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage in project hadoop by apache.

the class NamenodeFsck method getReplicaInfo.

/**
   * Display info of each replica for replication block.
   * For striped block group, display info of each internal block.
   */
private String getReplicaInfo(BlockInfo storedBlock) {
    if (!(showLocations || showRacks || showReplicaDetails || showUpgradeDomains)) {
        return "";
    }
    final boolean isComplete = storedBlock.isComplete();
    Iterator<DatanodeStorageInfo> storagesItr;
    StringBuilder sb = new StringBuilder(" [");
    final boolean isStriped = storedBlock.isStriped();
    Map<DatanodeStorageInfo, Long> storage2Id = new HashMap<>();
    if (isComplete) {
        if (isStriped) {
            long blockId = storedBlock.getBlockId();
            Iterable<StorageAndBlockIndex> sis = ((BlockInfoStriped) storedBlock).getStorageAndIndexInfos();
            for (StorageAndBlockIndex si : sis) {
                storage2Id.put(si.getStorage(), blockId + si.getBlockIndex());
            }
        }
        storagesItr = storedBlock.getStorageInfos();
    } else {
        storagesItr = storedBlock.getUnderConstructionFeature().getExpectedStorageLocationsIterator();
    }
    while (storagesItr.hasNext()) {
        DatanodeStorageInfo storage = storagesItr.next();
        if (isStriped && isComplete) {
            long index = storage2Id.get(storage);
            sb.append("blk_" + index + ":");
        }
        DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor();
        if (showRacks) {
            sb.append(NodeBase.getPath(dnDesc));
        } else {
            sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(), storage.getStorageType()));
        }
        if (showUpgradeDomains) {
            String upgradeDomain = (dnDesc.getUpgradeDomain() != null) ? dnDesc.getUpgradeDomain() : UNDEFINED;
            sb.append("(ud=" + upgradeDomain + ")");
        }
        if (showReplicaDetails) {
            Collection<DatanodeDescriptor> corruptReplicas = blockManager.getCorruptReplicas(storedBlock);
            sb.append("(");
            if (dnDesc.isDecommissioned()) {
                sb.append("DECOMMISSIONED)");
            } else if (dnDesc.isDecommissionInProgress()) {
                sb.append("DECOMMISSIONING)");
            } else if (this.showMaintenanceState && dnDesc.isEnteringMaintenance()) {
                sb.append("ENTERING MAINTENANCE)");
            } else if (this.showMaintenanceState && dnDesc.isInMaintenance()) {
                sb.append("IN MAINTENANCE)");
            } else if (corruptReplicas != null && corruptReplicas.contains(dnDesc)) {
                sb.append("CORRUPT)");
            } else if (blockManager.isExcess(dnDesc, storedBlock)) {
                sb.append("EXCESS)");
            } else if (dnDesc.isStale(this.staleInterval)) {
                sb.append("STALE_NODE)");
            } else if (storage.areBlockContentsStale()) {
                sb.append("STALE_BLOCK_CONTENT)");
            } else {
                sb.append("LIVE)");
            }
        }
        if (storagesItr.hasNext()) {
            sb.append(", ");
        }
    }
    sb.append(']');
    return sb.toString();
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) HashMap(java.util.HashMap) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DatanodeInfoWithStorage(org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage) StorageAndBlockIndex(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped.StorageAndBlockIndex)

Example 2 with DatanodeInfoWithStorage

use of org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage in project SSM by Intel-bigdata.

the class TestMoverExecutor method moveCrossNodes.

@Test
public // DataXceiver.replaceBlock doing this job
void moveCrossNodes() throws Exception {
    Configuration conf = smartContext.getConf();
    URI namenode = cluster.getURI();
    if (namenode == null) {
        throw new Exception("Cannot get namenode url.");
    }
    generateFile("One-block file");
    FileMovePlan plan = new FileMovePlan(namenode, fileName);
    // Schedule move of one replica to another node
    NameNodeConnector nnc = new NameNodeConnector(namenode, conf);
    HashSet<DatanodeInfo> fileNodes = new HashSet<>();
    ExtendedBlock block = null;
    for (LocatedBlock lb : getLocatedBlocks(dfsClient, fileName, plan)) {
        block = lb.getBlock();
        fileNodes.addAll(Arrays.asList(lb.getLocations()));
    }
    final DatanodeStorageReport[] reports = nnc.getLiveDatanodeStorageReport();
    nnc.close();
    for (DatanodeStorageReport report : reports) {
        DatanodeInfo targetDatanode = report.getDatanodeInfo();
        if (!fileNodes.contains(targetDatanode)) {
            if (block == null) {
                continue;
            }
            StorageGroup source = new StorageGroup(fileNodes.iterator().next(), StorageType.DISK.toString());
            StorageGroup target = new StorageGroup(targetDatanode, StorageType.SSD.toString());
            addPlan(plan, source, target, block.getBlockId());
            break;
        }
    }
    // Do mover executor
    MoverStatus status = new MoverStatus();
    MoverExecutor moverExecutor = new MoverExecutor(status, conf, 10, 500);
    int failedMoves = moverExecutor.executeMove(plan);
    Assert.assertEquals(0, failedMoves);
    // Check storage after move
    // Thread.sleep(100000);
    int ssdNum = 0;
    int hddNum = 0;
    for (LocatedBlock lb : getLocatedBlocks(dfsClient, fileName)) {
        for (DatanodeInfo datanodeInfo : lb.getLocations()) {
            Assert.assertTrue(datanodeInfo instanceof DatanodeInfoWithStorage);
            StorageType storageType = ((DatanodeInfoWithStorage) datanodeInfo).getStorageType();
            if (storageType.equals(StorageType.SSD)) {
                ssdNum++;
            } else if (storageType.equals(StorageType.DISK)) {
                hddNum++;
            }
        }
    }
// Assert.assertEquals(1, ssdNum);
// Assert.assertEquals(2, hddNum);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) URI(java.net.URI) FileMovePlan(org.smartdata.model.action.FileMovePlan) IOException(java.io.IOException) DatanodeInfoWithStorage(org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 3 with DatanodeInfoWithStorage

use of org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage in project SSM by Intel-bigdata.

the class TestMoverExecutor method moveInSameNode.

@Test
public void moveInSameNode() throws Exception {
    Configuration conf = smartContext.getConf();
    URI namenode = cluster.getURI();
    String blockContent = "This is a block with 50B.";
    StringBuilder stringBuilder = new StringBuilder();
    for (int i = 0; i < 50; i++) {
        stringBuilder.append(blockContent);
    }
    String content = stringBuilder.toString();
    generateFile(content);
    FileMovePlan plan = new FileMovePlan(namenode, fileName);
    // Schedule move in the same node
    for (LocatedBlock lb : getLocatedBlocks(dfsClient, fileName, plan)) {
        ExtendedBlock block = lb.getBlock();
        for (DatanodeInfo datanodeInfo : lb.getLocations()) {
            StorageGroup source = new StorageGroup(datanodeInfo, StorageType.DISK.toString());
            StorageGroup target = new StorageGroup(datanodeInfo, StorageType.SSD.toString());
            addPlan(plan, source, target, block.getBlockId());
        }
    }
    // Do move executor
    MoverStatus status = new MoverStatus();
    MoverExecutor moverExecutor = new MoverExecutor(status, conf, 10, 3);
    int failedMoves = moverExecutor.executeMove(plan);
    Assert.assertEquals(0, failedMoves);
    cluster.triggerBlockReports();
    boolean success = true;
    for (int i = 0; i < 3; i++) {
        success = true;
        // Check storage after move
        for (LocatedBlock lb : getLocatedBlocks(dfsClient, fileName)) {
            for (DatanodeInfo datanodeInfo : lb.getLocations()) {
                StorageType realType = ((DatanodeInfoWithStorage) datanodeInfo).getStorageType();
                success = realType == StorageType.SSD && success;
            }
        }
        if (success) {
            break;
        }
        Thread.sleep(500);
    }
    if (!success) {
        Assert.fail("Not the expected storage type SSD.");
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) URI(java.net.URI) FileMovePlan(org.smartdata.model.action.FileMovePlan) DatanodeInfoWithStorage(org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage) Test(org.junit.Test)

Example 4 with DatanodeInfoWithStorage

use of org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage in project SSM by Intel-bigdata.

the class CheckStorageAction method execute.

@Override
protected void execute() throws Exception {
    if (fileName == null) {
        throw new IllegalArgumentException("File parameter is missing! ");
    }
    HdfsFileStatus fileStatus = dfsClient.getFileInfo(fileName);
    if (fileStatus == null) {
        throw new ActionException("File does not exist.");
    }
    if (fileStatus.isDir()) {
        appendResult("This is a directory which has no storage result!");
        // Append to log for the convenience of UI implementation
        appendLog("This is a directory which has no storage result!");
        return;
    }
    long length = fileStatus.getLen();
    List<LocatedBlock> locatedBlocks = dfsClient.getLocatedBlocks(fileName, 0, length).getLocatedBlocks();
    if (locatedBlocks.size() == 0) {
        appendResult("File '" + fileName + "' has no blocks.");
        appendLog("File '" + fileName + "' has no blocks.");
        return;
    }
    for (LocatedBlock locatedBlock : locatedBlocks) {
        StringBuilder blockInfo = new StringBuilder();
        blockInfo.append("File offset = ").append(locatedBlock.getStartOffset()).append(", ");
        blockInfo.append("Block locations = {");
        for (DatanodeInfo datanodeInfo : locatedBlock.getLocations()) {
            blockInfo.append(datanodeInfo.getName());
            if (datanodeInfo instanceof DatanodeInfoWithStorage) {
                blockInfo.append("[").append(((DatanodeInfoWithStorage) datanodeInfo).getStorageType()).append("]");
            }
            blockInfo.append(" ");
        }
        blockInfo.append("}");
        appendResult(blockInfo.toString());
        appendLog(blockInfo.toString());
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeInfoWithStorage(org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) ActionException(org.smartdata.action.ActionException) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Aggregations

DatanodeInfoWithStorage (org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage)4 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 URI (java.net.URI)2 Configuration (org.apache.hadoop.conf.Configuration)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 Test (org.junit.Test)2 FileMovePlan (org.smartdata.model.action.FileMovePlan)2 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)1 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)1 StorageAndBlockIndex (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped.StorageAndBlockIndex)1 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)1 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)1 DatanodeStorageReport (org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)1 ActionException (org.smartdata.action.ActionException)1