Search in sources :

Example 1 with StorageAndBlockIndex

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped.StorageAndBlockIndex in project hadoop by apache.

the class NamenodeFsck method getReplicaInfo.

/**
   * Display info of each replica for replication block.
   * For striped block group, display info of each internal block.
   */
private String getReplicaInfo(BlockInfo storedBlock) {
    if (!(showLocations || showRacks || showReplicaDetails || showUpgradeDomains)) {
        return "";
    }
    final boolean isComplete = storedBlock.isComplete();
    Iterator<DatanodeStorageInfo> storagesItr;
    StringBuilder sb = new StringBuilder(" [");
    final boolean isStriped = storedBlock.isStriped();
    Map<DatanodeStorageInfo, Long> storage2Id = new HashMap<>();
    if (isComplete) {
        if (isStriped) {
            long blockId = storedBlock.getBlockId();
            Iterable<StorageAndBlockIndex> sis = ((BlockInfoStriped) storedBlock).getStorageAndIndexInfos();
            for (StorageAndBlockIndex si : sis) {
                storage2Id.put(si.getStorage(), blockId + si.getBlockIndex());
            }
        }
        storagesItr = storedBlock.getStorageInfos();
    } else {
        storagesItr = storedBlock.getUnderConstructionFeature().getExpectedStorageLocationsIterator();
    }
    while (storagesItr.hasNext()) {
        DatanodeStorageInfo storage = storagesItr.next();
        if (isStriped && isComplete) {
            long index = storage2Id.get(storage);
            sb.append("blk_" + index + ":");
        }
        DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor();
        if (showRacks) {
            sb.append(NodeBase.getPath(dnDesc));
        } else {
            sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(), storage.getStorageType()));
        }
        if (showUpgradeDomains) {
            String upgradeDomain = (dnDesc.getUpgradeDomain() != null) ? dnDesc.getUpgradeDomain() : UNDEFINED;
            sb.append("(ud=" + upgradeDomain + ")");
        }
        if (showReplicaDetails) {
            Collection<DatanodeDescriptor> corruptReplicas = blockManager.getCorruptReplicas(storedBlock);
            sb.append("(");
            if (dnDesc.isDecommissioned()) {
                sb.append("DECOMMISSIONED)");
            } else if (dnDesc.isDecommissionInProgress()) {
                sb.append("DECOMMISSIONING)");
            } else if (this.showMaintenanceState && dnDesc.isEnteringMaintenance()) {
                sb.append("ENTERING MAINTENANCE)");
            } else if (this.showMaintenanceState && dnDesc.isInMaintenance()) {
                sb.append("IN MAINTENANCE)");
            } else if (corruptReplicas != null && corruptReplicas.contains(dnDesc)) {
                sb.append("CORRUPT)");
            } else if (blockManager.isExcess(dnDesc, storedBlock)) {
                sb.append("EXCESS)");
            } else if (dnDesc.isStale(this.staleInterval)) {
                sb.append("STALE_NODE)");
            } else if (storage.areBlockContentsStale()) {
                sb.append("STALE_BLOCK_CONTENT)");
            } else {
                sb.append("LIVE)");
            }
        }
        if (storagesItr.hasNext()) {
            sb.append(", ");
        }
    }
    sb.append(']');
    return sb.toString();
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) HashMap(java.util.HashMap) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DatanodeInfoWithStorage(org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage) StorageAndBlockIndex(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped.StorageAndBlockIndex)

Example 2 with StorageAndBlockIndex

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped.StorageAndBlockIndex in project hadoop by apache.

the class BlockManager method countReplicasForStripedBlock.

/**
   * For a striped block, it is possible it contains full number of internal
   * blocks (i.e., 9 by default), but with duplicated replicas of the same
   * internal block. E.g., for the following list of internal blocks
   * b0, b0, b1, b2, b3, b4, b5, b6, b7
   * we have 9 internal blocks but we actually miss b8.
   * We should use this method to detect the above scenario and schedule
   * necessary reconstruction.
   */
private void countReplicasForStripedBlock(NumberReplicas counters, BlockInfoStriped block, Collection<DatanodeDescriptor> nodesCorrupt, boolean inStartupSafeMode) {
    BitSet bitSet = new BitSet(block.getTotalBlockNum());
    for (StorageAndBlockIndex si : block.getStorageAndIndexInfos()) {
        StoredReplicaState state = checkReplicaOnStorage(counters, block, si.getStorage(), nodesCorrupt, inStartupSafeMode);
        if (state == StoredReplicaState.LIVE) {
            if (!bitSet.get(si.getBlockIndex())) {
                bitSet.set(si.getBlockIndex());
            } else {
                counters.subtract(StoredReplicaState.LIVE, 1);
                counters.add(StoredReplicaState.REDUNDANT, 1);
            }
        }
    }
}
Also used : StoredReplicaState(org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas.StoredReplicaState) StorageAndBlockIndex(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped.StorageAndBlockIndex) BitSet(java.util.BitSet)

Aggregations

StorageAndBlockIndex (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped.StorageAndBlockIndex)2 BitSet (java.util.BitSet)1 HashMap (java.util.HashMap)1 DatanodeInfoWithStorage (org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage)1 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)1 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)1 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)1 StoredReplicaState (org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas.StoredReplicaState)1