use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class DFSStripedInputStream method refreshLocatedBlock.
/**
* The super method {@link DFSInputStream#refreshLocatedBlock} refreshes
* cached LocatedBlock by executing {@link DFSInputStream#getBlockAt} again.
* This method extends the logic by first remembering the index of the
* internal block, and re-parsing the refreshed block group with the same
* index.
*/
@Override
protected LocatedBlock refreshLocatedBlock(LocatedBlock block) throws IOException {
int idx = StripedBlockUtil.getBlockIndex(block.getBlock().getLocalBlock());
LocatedBlock lb = getBlockGroupAt(block.getStartOffset());
// If indexing information is returned, iterate through the index array
// to find the entry for position idx in the group
LocatedStripedBlock lsb = (LocatedStripedBlock) lb;
int i = 0;
for (; i < lsb.getBlockIndices().length; i++) {
if (lsb.getBlockIndices()[i] == idx) {
break;
}
}
if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("refreshLocatedBlock for striped blocks, offset=" + block.getStartOffset() + ". Obtained block " + lb + ", idx=" + idx);
}
return StripedBlockUtil.constructInternalBlock(lsb, i, cellSize, dataBlkNum, idx);
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class ClientNamenodeProtocolServerSideTranslatorPB method addBlock.
@Override
public AddBlockResponseProto addBlock(RpcController controller, AddBlockRequestProto req) throws ServiceException {
try {
List<DatanodeInfoProto> excl = req.getExcludeNodesList();
List<String> favor = req.getFavoredNodesList();
EnumSet<AddBlockFlag> flags = PBHelperClient.convertAddBlockFlags(req.getFlagsList());
LocatedBlock result = server.addBlock(req.getSrc(), req.getClientName(), req.hasPrevious() ? PBHelperClient.convert(req.getPrevious()) : null, (excl == null || excl.size() == 0) ? null : PBHelperClient.convert(excl.toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(), (favor == null || favor.size() == 0) ? null : favor.toArray(new String[favor.size()]), flags);
return AddBlockResponseProto.newBuilder().setBlock(PBHelperClient.convertLocatedBlock(result)).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class DatanodeProtocolServerSideTranslatorPB method reportBadBlocks.
@Override
public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller, ReportBadBlocksRequestProto request) throws ServiceException {
List<LocatedBlockProto> lbps = request.getBlocksList();
LocatedBlock[] blocks = new LocatedBlock[lbps.size()];
for (int i = 0; i < lbps.size(); i++) {
blocks[i] = PBHelperClient.convertLocatedBlockProto(lbps.get(i));
}
try {
impl.reportBadBlocks(blocks);
} catch (IOException e) {
throw new ServiceException(e);
}
return VOID_REPORT_BAD_BLOCK_RESPONSE;
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class TestDecommissionWithStriped method assertBlockIndexAndTokenPosition.
/**
* Verify block index and token values. Must update block indices and block
* tokens after sorting.
*/
private void assertBlockIndexAndTokenPosition(List<LocatedBlock> lbs, List<HashMap<DatanodeInfo, Byte>> locToIndexList, List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
for (int i = 0; i < lbs.size(); i++) {
LocatedBlock lb = lbs.get(i);
LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
HashMap<DatanodeInfo, Byte> locToIndex = locToIndexList.get(i);
HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken = locToTokenList.get(i);
DatanodeInfo[] di = lb.getLocations();
for (int j = 0; j < di.length; j++) {
Assert.assertEquals("Block index value mismatches after sorting", (byte) locToIndex.get(di[j]), stripedBlk.getBlockIndices()[j]);
Assert.assertEquals("Block token value mismatches after sorting", locToToken.get(di[j]), stripedBlk.getBlockTokens()[j]);
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.
the class TestDecommissionWithStriped method testDecommissionWithURBlockForSameBlockGroup.
@Test(timeout = 120000)
public void testDecommissionWithURBlockForSameBlockGroup() throws Exception {
LOG.info("Starting test testDecommissionWithURBlocksForSameBlockGroup");
final Path ecFile = new Path(ecDir, "testDecommissionWithCorruptBlocks");
int writeBytes = cellSize * dataBlocks * 2;
writeStripedFile(dfs, ecFile, writeBytes);
Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
final List<DatanodeInfo> decommisionNodes = new ArrayList<DatanodeInfo>();
LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0).get(0);
DatanodeInfo[] dnLocs = lb.getLocations();
assertEquals(dataBlocks + parityBlocks, dnLocs.length);
int decommNodeIndex = dataBlocks - 1;
int stopNodeIndex = 1;
// add the nodes which will be decommissioning
decommisionNodes.add(dnLocs[decommNodeIndex]);
// stop excess dns to avoid immediate reconstruction.
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
List<DataNodeProperties> stoppedDns = new ArrayList<>();
for (DatanodeInfo liveDn : info) {
boolean usedNode = false;
for (DatanodeInfo datanodeInfo : dnLocs) {
if (liveDn.getXferAddr().equals(datanodeInfo.getXferAddr())) {
usedNode = true;
break;
}
}
if (!usedNode) {
DataNode dn = cluster.getDataNode(liveDn.getIpcPort());
stoppedDns.add(cluster.stopDataNode(liveDn.getXferAddr()));
cluster.setDataNodeDead(dn.getDatanodeId());
LOG.info("stop datanode " + dn.getDatanodeId().getHostName());
}
}
DataNode dn = cluster.getDataNode(dnLocs[stopNodeIndex].getIpcPort());
cluster.stopDataNode(dnLocs[stopNodeIndex].getXferAddr());
cluster.setDataNodeDead(dn.getDatanodeId());
numDNs = numDNs - 1;
// Decommission node in a new thread. Verify that node is decommissioned.
final CountDownLatch decomStarted = new CountDownLatch(0);
Thread decomTh = new Thread() {
public void run() {
try {
decomStarted.countDown();
decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
} catch (Exception e) {
LOG.error("Exception while decommissioning", e);
Assert.fail("Shouldn't throw exception!");
}
}
;
};
int deadDecomissioned = fsn.getNumDecomDeadDataNodes();
int liveDecomissioned = fsn.getNumDecomLiveDataNodes();
decomTh.start();
decomStarted.await(5, TimeUnit.SECONDS);
// grace period to trigger decommissioning call
Thread.sleep(3000);
// start datanode so that decommissioning live node will be finished
for (DataNodeProperties dnp : stoppedDns) {
cluster.restartDataNode(dnp);
LOG.info("Restarts stopped datanode:{} to trigger block reconstruction", dnp.datanode);
}
cluster.waitActive();
LOG.info("Waiting to finish decommissioning node:{}", decommisionNodes);
// waiting 20secs to finish decommission
decomTh.join(20000);
LOG.info("Finished decommissioning node:{}", decommisionNodes);
assertEquals(deadDecomissioned, fsn.getNumDecomDeadDataNodes());
assertEquals(liveDecomissioned + decommisionNodes.size(), fsn.getNumDecomLiveDataNodes());
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
assertEquals("All datanodes must be alive", numDNs, client.datanodeReport(DatanodeReportType.LIVE).length);
assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes, null, blockGroupSize);
cleanupFile(dfs, ecFile);
}
Aggregations