use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class JsonUtilClient method toLocatedBlock.
/** Convert a Json map to LocatedBlock. */
static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
if (m == null) {
return null;
}
final ExtendedBlock b = toExtendedBlock((Map<?, ?>) m.get("block"));
final DatanodeInfo[] locations = toDatanodeInfoArray(getList(m, "locations"));
final long startOffset = ((Number) m.get("startOffset")).longValue();
final boolean isCorrupt = (Boolean) m.get("isCorrupt");
final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(getList(m, "cachedLocations"));
final StorageType[] storageTypes = toStorageTypeArray(getList(m, "storageTypes"));
final LocatedBlock locatedblock = new LocatedBlock(b, locations, null, storageTypes, startOffset, isCorrupt, cachedLocations);
locatedblock.setBlockToken(toBlockToken((Map<?, ?>) m.get("blockToken")));
return locatedblock;
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class PBHelperClient method convertLocatedBlockProto.
public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
if (proto == null)
return null;
List<DatanodeInfoProto> locs = proto.getLocsList();
DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
for (int i = 0; i < locs.size(); i++) {
targets[i] = convert(locs.get(i));
}
final StorageType[] storageTypes = convertStorageTypes(proto.getStorageTypesList(), locs.size());
final int storageIDsCount = proto.getStorageIDsCount();
final String[] storageIDs;
if (storageIDsCount == 0) {
storageIDs = null;
} else {
Preconditions.checkState(storageIDsCount == locs.size());
storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
}
byte[] indices = null;
if (proto.hasBlockIndices()) {
indices = proto.getBlockIndices().toByteArray();
}
// Set values from the isCached list, re-using references from loc
List<DatanodeInfo> cachedLocs = new ArrayList<>(locs.size());
List<Boolean> isCachedList = proto.getIsCachedList();
for (int i = 0; i < isCachedList.size(); i++) {
if (isCachedList.get(i)) {
cachedLocs.add(targets[i]);
}
}
final LocatedBlock lb;
if (indices == null) {
lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
} else {
lb = new LocatedStripedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, indices, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
List<TokenProto> tokenProtos = proto.getBlockTokensList();
Token<BlockTokenIdentifier>[] blockTokens = convertTokens(tokenProtos);
((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
}
lb.setBlockToken(convert(proto.getBlockToken()));
return lb;
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class Receiver method opTransferBlock.
/** Receive {@link Op#TRANSFER_BLOCK} */
private void opTransferBlock(DataInputStream in) throws IOException {
final OpTransferBlockProto proto = OpTransferBlockProto.parseFrom(vintPrefixed(in));
final DatanodeInfo[] targets = PBHelperClient.convert(proto.getTargetsList());
TraceScope traceScope = continueTraceSpan(proto.getHeader(), proto.getClass().getSimpleName());
try {
transferBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()), PBHelperClient.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), targets, PBHelperClient.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
} finally {
if (traceScope != null)
traceScope.close();
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class DFSTestUtil method waitForDecommission.
/*
* Wait up to 20s for the given DN (IP:port) to be decommissioned
*/
public static void waitForDecommission(FileSystem fs, String name) throws IOException, InterruptedException, TimeoutException {
DatanodeInfo dn = null;
int count = 0;
final int ATTEMPTS = 20;
do {
Thread.sleep(1000);
DistributedFileSystem dfs = (DistributedFileSystem) fs;
for (DatanodeInfo info : dfs.getDataNodeStats()) {
if (name.equals(info.getXferAddr())) {
dn = info;
}
}
count++;
} while ((dn == null || dn.isDecommissionInProgress() || !dn.isDecommissioned()) && count < ATTEMPTS);
if (count == ATTEMPTS) {
throw new TimeoutException("Timed out waiting for datanode " + name + " to decommission.");
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.
the class TestMaintenanceState method testTransitionFromDecommissionedAndExpired.
/**
* Transition from decommissioned state to maintenance state.
* After the maintenance state expires, it is transitioned to NORMAL.
*/
@Test(timeout = 360000)
public void testTransitionFromDecommissionedAndExpired() throws IOException {
LOG.info("Starting testTransitionFromDecommissionedAndExpired");
final Path file = new Path("/testTransitionFromDecommissionedAndExpired.dat");
startCluster(1, 4);
final FileSystem fileSys = getCluster().getFileSystem(0);
writeFile(fileSys, file, 3, 1);
final DatanodeInfo nodeOutofService = takeNodeOutofService(0, null, 0, null, AdminStates.DECOMMISSIONED);
takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(), Long.MAX_VALUE, null, AdminStates.IN_MAINTENANCE);
// Adjust the expiration.
takeNodeOutofService(0, nodeOutofService.getDatanodeUuid(), Time.now() + EXPIRATION_IN_MS, null, AdminStates.NORMAL);
cleanupFile(fileSys, file);
}
Aggregations