Search in sources :

Example 31 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class JsonUtilClient method toLocatedBlock.

/** Convert a Json map to LocatedBlock. */
static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
    if (m == null) {
        return null;
    }
    final ExtendedBlock b = toExtendedBlock((Map<?, ?>) m.get("block"));
    final DatanodeInfo[] locations = toDatanodeInfoArray(getList(m, "locations"));
    final long startOffset = ((Number) m.get("startOffset")).longValue();
    final boolean isCorrupt = (Boolean) m.get("isCorrupt");
    final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(getList(m, "cachedLocations"));
    final StorageType[] storageTypes = toStorageTypeArray(getList(m, "storageTypes"));
    final LocatedBlock locatedblock = new LocatedBlock(b, locations, null, storageTypes, startOffset, isCorrupt, cachedLocations);
    locatedblock.setBlockToken(toBlockToken((Map<?, ?>) m.get("blockToken")));
    return locatedblock;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Map(java.util.Map)

Example 32 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class PBHelperClient method convertLocatedBlockProto.

public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
    if (proto == null)
        return null;
    List<DatanodeInfoProto> locs = proto.getLocsList();
    DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
    for (int i = 0; i < locs.size(); i++) {
        targets[i] = convert(locs.get(i));
    }
    final StorageType[] storageTypes = convertStorageTypes(proto.getStorageTypesList(), locs.size());
    final int storageIDsCount = proto.getStorageIDsCount();
    final String[] storageIDs;
    if (storageIDsCount == 0) {
        storageIDs = null;
    } else {
        Preconditions.checkState(storageIDsCount == locs.size());
        storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
    }
    byte[] indices = null;
    if (proto.hasBlockIndices()) {
        indices = proto.getBlockIndices().toByteArray();
    }
    // Set values from the isCached list, re-using references from loc
    List<DatanodeInfo> cachedLocs = new ArrayList<>(locs.size());
    List<Boolean> isCachedList = proto.getIsCachedList();
    for (int i = 0; i < isCachedList.size(); i++) {
        if (isCachedList.get(i)) {
            cachedLocs.add(targets[i]);
        }
    }
    final LocatedBlock lb;
    if (indices == null) {
        lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
    } else {
        lb = new LocatedStripedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, indices, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
        List<TokenProto> tokenProtos = proto.getBlockTokensList();
        Token<BlockTokenIdentifier>[] blockTokens = convertTokens(tokenProtos);
        ((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
    }
    lb.setBlockToken(convert(proto.getBlockToken()));
    return lb;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) TokenProto(org.apache.hadoop.security.proto.SecurityProtos.TokenProto) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Token(org.apache.hadoop.security.token.Token) ByteString(com.google.protobuf.ByteString) DatanodeInfoProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)

Example 33 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class Receiver method opTransferBlock.

/** Receive {@link Op#TRANSFER_BLOCK} */
private void opTransferBlock(DataInputStream in) throws IOException {
    final OpTransferBlockProto proto = OpTransferBlockProto.parseFrom(vintPrefixed(in));
    final DatanodeInfo[] targets = PBHelperClient.convert(proto.getTargetsList());
    TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName());
    try {
        transferBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelperClient.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), targets, PBHelperClient.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
    } finally {
        if (traceScope != null)
            traceScope.close();
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) TraceScope(org.apache.htrace.core.TraceScope) OpTransferBlockProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockProto)

Example 34 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class DFSTestUtil method waitForDecommission.

/*
   * Wait up to 20s for the given DN (IP:port) to be decommissioned
   */
public static void waitForDecommission(FileSystem fs, String name) throws IOException, InterruptedException, TimeoutException {
    DatanodeInfo dn = null;
    int count = 0;
    final int ATTEMPTS = 20;
    do {
        Thread.sleep(1000);
        DistributedFileSystem dfs = (DistributedFileSystem) fs;
        for (DatanodeInfo info : dfs.getDataNodeStats()) {
            if (name.equals(info.getXferAddr())) {
                dn = info;
            }
        }
        count++;
    } while ((dn == null || dn.isDecommissionInProgress() || !dn.isDecommissioned()) && count < ATTEMPTS);
    if (count == ATTEMPTS) {
        throw new TimeoutException("Timed out waiting for datanode " + name + " to decommission.");
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) TimeoutException(java.util.concurrent.TimeoutException)

Example 35 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestMaintenanceState method testTransitionFromDecommissionedAndExpired.

/**
   * Transition from decommissioned state to maintenance state.
   * After the maintenance state expires, it is transitioned to NORMAL.
   */
@Test(timeout = 360000)
public void testTransitionFromDecommissionedAndExpired() throws IOException {
    LOG.info("Starting testTransitionFromDecommissionedAndExpired");
    final Path file = new Path("/testTransitionFromDecommissionedAndExpired.dat");
    startCluster(1, 4);
    final FileSystem fileSys = getCluster().getFileSystem(0);
    writeFile(fileSys, file, 3, 1);
    final DatanodeInfo nodeOutofService = takeNodeOutofService(0, null, 0, null, AdminStates.DECOMMISSIONED);
    takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(), Long.MAX_VALUE, null, AdminStates.IN_MAINTENANCE);
    // Adjust the expiration.
    takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(), Time.now() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
    cleanupFile(fileSys, file);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Aggregations

DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)214 Test (org.junit.Test)103 Path (org.apache.hadoop.fs.Path)91 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)73 IOException (java.io.IOException)47 FileSystem (org.apache.hadoop.fs.FileSystem)44 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)43 ArrayList (java.util.ArrayList)39 Configuration (org.apache.hadoop.conf.Configuration)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)37 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)32 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)32 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)29 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)25 InetSocketAddress (java.net.InetSocketAddress)20 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)20 StorageType (org.apache.hadoop.fs.StorageType)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)14