Search in sources :

Example 56 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class DFSStripedInputStream method refreshLocatedBlock.

/**
   * The super method {@link DFSInputStream#refreshLocatedBlock} refreshes
   * cached LocatedBlock by executing {@link DFSInputStream#getBlockAt} again.
   * This method extends the logic by first remembering the index of the
   * internal block, and re-parsing the refreshed block group with the same
   * index.
   */
@Override
protected LocatedBlock refreshLocatedBlock(LocatedBlock block) throws IOException {
    int idx = StripedBlockUtil.getBlockIndex(block.getBlock().getLocalBlock());
    LocatedBlock lb = getBlockGroupAt(block.getStartOffset());
    // If indexing information is returned, iterate through the index array
    // to find the entry for position idx in the group
    LocatedStripedBlock lsb = (LocatedStripedBlock) lb;
    int i = 0;
    for (; i < lsb.getBlockIndices().length; i++) {
        if (lsb.getBlockIndices()[i] == idx) {
            break;
        }
    }
    if (DFSClient.LOG.isDebugEnabled()) {
        DFSClient.LOG.debug("refreshLocatedBlock for striped blocks, offset=" + block.getStartOffset() + ". Obtained block " + lb + ", idx=" + idx);
    }
    return StripedBlockUtil.constructInternalBlock(lsb, i, cellSize, dataBlkNum, idx);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 57 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class ClientNamenodeProtocolServerSideTranslatorPB method addBlock.

@Override
public AddBlockResponseProto addBlock(RpcController controller, AddBlockRequestProto req) throws ServiceException {
    try {
        List<DatanodeInfoProto> excl = req.getExcludeNodesList();
        List<String> favor = req.getFavoredNodesList();
        EnumSet<AddBlockFlag> flags = PBHelperClient.convertAddBlockFlags(req.getFlagsList());
        LocatedBlock result = server.addBlock(req.getSrc(), req.getClientName(), req.hasPrevious() ? PBHelperClient.convert(req.getPrevious()) : null, (excl == null || excl.size() == 0) ? null : PBHelperClient.convert(excl.toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(), (favor == null || favor.size() == 0) ? null : favor.toArray(new String[favor.size()]), flags);
        return AddBlockResponseProto.newBuilder().setBlock(PBHelperClient.convertLocatedBlock(result)).build();
    } catch (IOException e) {
        throw new ServiceException(e);
    }
}
Also used : ServiceException(com.google.protobuf.ServiceException) AddBlockFlag(org.apache.hadoop.hdfs.AddBlockFlag) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) DatanodeInfoProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto)

Example 58 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class DatanodeProtocolServerSideTranslatorPB method reportBadBlocks.

@Override
public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller, ReportBadBlocksRequestProto request) throws ServiceException {
    List<LocatedBlockProto> lbps = request.getBlocksList();
    LocatedBlock[] blocks = new LocatedBlock[lbps.size()];
    for (int i = 0; i < lbps.size(); i++) {
        blocks[i] = PBHelperClient.convertLocatedBlockProto(lbps.get(i));
    }
    try {
        impl.reportBadBlocks(blocks);
    } catch (IOException e) {
        throw new ServiceException(e);
    }
    return VOID_REPORT_BAD_BLOCK_RESPONSE;
}
Also used : ServiceException(com.google.protobuf.ServiceException) LocatedBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 59 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestDecommissionWithStriped method assertBlockIndexAndTokenPosition.

/**
   * Verify block index and token values. Must update block indices and block
   * tokens after sorting.
   */
private void assertBlockIndexAndTokenPosition(List<LocatedBlock> lbs, List<HashMap<DatanodeInfo, Byte>> locToIndexList, List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
    for (int i = 0; i < lbs.size(); i++) {
        LocatedBlock lb = lbs.get(i);
        LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
        HashMap<DatanodeInfo, Byte> locToIndex = locToIndexList.get(i);
        HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken = locToTokenList.get(i);
        DatanodeInfo[] di = lb.getLocations();
        for (int j = 0; j < di.length; j++) {
            Assert.assertEquals("Block index value mismatches after sorting", (byte) locToIndex.get(di[j]), stripedBlk.getBlockIndices()[j]);
            Assert.assertEquals("Block token value mismatches after sorting", locToToken.get(di[j]), stripedBlk.getBlockTokens()[j]);
        }
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Token(org.apache.hadoop.security.token.Token)

Example 60 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestDecommissionWithStriped method testDecommissionWithURBlockForSameBlockGroup.

@Test(timeout = 120000)
public void testDecommissionWithURBlockForSameBlockGroup() throws Exception {
    LOG.info("Starting test testDecommissionWithURBlocksForSameBlockGroup");
    final Path ecFile = new Path(ecDir, "testDecommissionWithCorruptBlocks");
    int writeBytes = cellSize * dataBlocks * 2;
    writeStripedFile(dfs, ecFile, writeBytes);
    Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
    final List<DatanodeInfo> decommisionNodes = new ArrayList<DatanodeInfo>();
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0).get(0);
    DatanodeInfo[] dnLocs = lb.getLocations();
    assertEquals(dataBlocks + parityBlocks, dnLocs.length);
    int decommNodeIndex = dataBlocks - 1;
    int stopNodeIndex = 1;
    // add the nodes which will be decommissioning
    decommisionNodes.add(dnLocs[decommNodeIndex]);
    // stop excess dns to avoid immediate reconstruction.
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    List<DataNodeProperties> stoppedDns = new ArrayList<>();
    for (DatanodeInfo liveDn : info) {
        boolean usedNode = false;
        for (DatanodeInfo datanodeInfo : dnLocs) {
            if (liveDn.getXferAddr().equals(datanodeInfo.getXferAddr())) {
                usedNode = true;
                break;
            }
        }
        if (!usedNode) {
            DataNode dn = cluster.getDataNode(liveDn.getIpcPort());
            stoppedDns.add(cluster.stopDataNode(liveDn.getXferAddr()));
            cluster.setDataNodeDead(dn.getDatanodeId());
            LOG.info("stop datanode " + dn.getDatanodeId().getHostName());
        }
    }
    DataNode dn = cluster.getDataNode(dnLocs[stopNodeIndex].getIpcPort());
    cluster.stopDataNode(dnLocs[stopNodeIndex].getXferAddr());
    cluster.setDataNodeDead(dn.getDatanodeId());
    numDNs = numDNs - 1;
    // Decommission node in a new thread. Verify that node is decommissioned.
    final CountDownLatch decomStarted = new CountDownLatch(0);
    Thread decomTh = new Thread() {

        public void run() {
            try {
                decomStarted.countDown();
                decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
            } catch (Exception e) {
                LOG.error("Exception while decommissioning", e);
                Assert.fail("Shouldn't throw exception!");
            }
        }

        ;
    };
    int deadDecomissioned = fsn.getNumDecomDeadDataNodes();
    int liveDecomissioned = fsn.getNumDecomLiveDataNodes();
    decomTh.start();
    decomStarted.await(5, TimeUnit.SECONDS);
    // grace period to trigger decommissioning call
    Thread.sleep(3000);
    // start datanode so that decommissioning live node will be finished
    for (DataNodeProperties dnp : stoppedDns) {
        cluster.restartDataNode(dnp);
        LOG.info("Restarts stopped datanode:{} to trigger block reconstruction", dnp.datanode);
    }
    cluster.waitActive();
    LOG.info("Waiting to finish decommissioning node:{}", decommisionNodes);
    // waiting 20secs to finish decommission
    decomTh.join(20000);
    LOG.info("Finished decommissioning node:{}", decommisionNodes);
    assertEquals(deadDecomissioned, fsn.getNumDecomDeadDataNodes());
    assertEquals(liveDecomissioned + decommisionNodes.size(), fsn.getNumDecomLiveDataNodes());
    // Ensure decommissioned datanode is not automatically shutdown
    DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
    assertEquals("All datanodes must be alive", numDNs, client.datanodeReport(DatanodeReportType.LIVE).length);
    assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
    StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes, null, blockGroupSize);
    cleanupFile(dfs, ecFile);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) CountDownLatch(java.util.concurrent.CountDownLatch) IOException(java.io.IOException) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Test(org.junit.Test)

Aggregations

LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)206 Test (org.junit.Test)94 Path (org.apache.hadoop.fs.Path)86 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)78 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)52 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)51 Configuration (org.apache.hadoop.conf.Configuration)43 IOException (java.io.IOException)36 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)33 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)25 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)25 ArrayList (java.util.ArrayList)24 StorageType (org.apache.hadoop.fs.StorageType)24 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)24 Block (org.apache.hadoop.hdfs.protocol.Block)16 FileSystem (org.apache.hadoop.fs.FileSystem)15 InetSocketAddress (java.net.InetSocketAddress)11 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)10