Search in sources :

Example 26 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestDFSStripedInputStream method testStatefulRead.

private void testStatefulRead(boolean useByteBuffer, boolean cellMisalignPacket) throws Exception {
    final int numBlocks = 2;
    final int fileSize = numBlocks * blockGroupSize;
    if (cellMisalignPacket) {
        conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT + 1);
        tearDown();
        setup();
    }
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, stripesPerBlock, false, ecPolicy);
    LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, fileSize);
    assert lbs.getLocatedBlocks().size() == numBlocks;
    for (LocatedBlock lb : lbs.getLocatedBlocks()) {
        assert lb instanceof LocatedStripedBlock;
        LocatedStripedBlock bg = (LocatedStripedBlock) (lb);
        for (int i = 0; i < dataBlocks; i++) {
            Block blk = new Block(bg.getBlock().getBlockId() + i, stripesPerBlock * cellSize, bg.getBlock().getGenerationStamp());
            blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
            cluster.injectBlocks(i, Arrays.asList(blk), bg.getBlock().getBlockPoolId());
        }
    }
    DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, ecPolicy, null);
    byte[] expected = new byte[fileSize];
    for (LocatedBlock bg : lbs.getLocatedBlocks()) {
        /**
       * A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks
       */
        for (int i = 0; i < stripesPerBlock; i++) {
            for (int j = 0; j < dataBlocks; j++) {
                for (int k = 0; k < cellSize; k++) {
                    int posInBlk = i * cellSize + k;
                    int posInFile = (int) bg.getStartOffset() + i * cellSize * dataBlocks + j * cellSize + k;
                    expected[posInFile] = SimulatedFSDataset.simulatedByte(new Block(bg.getBlock().getBlockId() + j), posInBlk);
                }
            }
        }
    }
    if (useByteBuffer) {
        ByteBuffer readBuffer = ByteBuffer.allocate(fileSize);
        int done = 0;
        while (done < fileSize) {
            int ret = in.read(readBuffer);
            assertTrue(ret > 0);
            done += ret;
        }
        assertArrayEquals(expected, readBuffer.array());
    } else {
        byte[] readBuffer = new byte[fileSize];
        int done = 0;
        while (done < fileSize) {
            int ret = in.read(readBuffer, done, fileSize - done);
            assertTrue(ret > 0);
            done += ret;
        }
        assertArrayEquals(expected, readBuffer);
    }
    fs.delete(filePath, true);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) ByteBuffer(java.nio.ByteBuffer)

Example 27 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestDFSStripedInputStream method testRefreshBlock.

/**
   * Test {@link DFSStripedInputStream#getBlockAt(long)}.
   */
@Test
public void testRefreshBlock() throws Exception {
    final int numBlocks = 4;
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, stripesPerBlock, false, ecPolicy);
    LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, blockGroupSize * numBlocks);
    final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, ecPolicy, null);
    List<LocatedBlock> lbList = lbs.getLocatedBlocks();
    for (LocatedBlock aLbList : lbList) {
        LocatedStripedBlock lsb = (LocatedStripedBlock) aLbList;
        LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(lsb, cellSize, dataBlocks, parityBlocks);
        for (int j = 0; j < dataBlocks; j++) {
            LocatedBlock refreshed = in.refreshLocatedBlock(blks[j]);
            assertEquals(blks[j].getBlock(), refreshed.getBlock());
            assertEquals(blks[j].getStartOffset(), refreshed.getStartOffset());
            assertArrayEquals(blks[j].getLocations(), refreshed.getLocations());
        }
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 28 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestReadStripedFileWithDecoding method corruptBlocks.

private void corruptBlocks(Path srcPath, int dataBlkDelNum, int parityBlkDelNum, boolean deleteBlockFile) throws IOException {
    int recoverBlkNum = dataBlkDelNum + parityBlkDelNum;
    LocatedBlocks locatedBlocks = getLocatedBlocks(srcPath);
    LocatedStripedBlock lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
    int[] delDataBlkIndices = StripedFileTestUtil.randomArray(0, dataBlocks, dataBlkDelNum);
    Assert.assertNotNull(delDataBlkIndices);
    int[] delParityBlkIndices = StripedFileTestUtil.randomArray(dataBlocks, dataBlocks + parityBlocks, parityBlkDelNum);
    Assert.assertNotNull(delParityBlkIndices);
    int[] delBlkIndices = new int[recoverBlkNum];
    System.arraycopy(delDataBlkIndices, 0, delBlkIndices, 0, delDataBlkIndices.length);
    System.arraycopy(delParityBlkIndices, 0, delBlkIndices, delDataBlkIndices.length, delParityBlkIndices.length);
    ExtendedBlock[] delBlocks = new ExtendedBlock[recoverBlkNum];
    for (int i = 0; i < recoverBlkNum; i++) {
        delBlocks[i] = StripedBlockUtil.constructInternalBlock(lastBlock.getBlock(), cellSize, dataBlocks, delBlkIndices[i]);
        if (deleteBlockFile) {
            // delete the block file
            cluster.corruptBlockOnDataNodesByDeletingBlockFile(delBlocks[i]);
        } else {
            // corrupt the block file
            cluster.corruptBlockOnDataNodes(delBlocks[i]);
        }
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock)

Example 29 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestReadStripedFileWithDecoding method testInvalidateBlock.

@Test
public void testInvalidateBlock() throws IOException {
    final Path file = new Path("/invalidate");
    final int length = 10;
    final byte[] bytes = StripedFileTestUtil.generateBytes(length);
    DFSTestUtil.writeFile(fs, file, bytes);
    int dnIndex = findFirstDataNode(file, cellSize * dataBlocks);
    Assert.assertNotEquals(-1, dnIndex);
    LocatedStripedBlock slb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
    final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(slb, cellSize, dataBlocks, parityBlocks);
    final Block b = blks[0].getBlock().getLocalBlock();
    DataNode dn = cluster.getDataNodes().get(dnIndex);
    // disable the heartbeat from DN so that the invalidated block record is kept
    // in NameNode until heartbeat expires and NN mark the dn as dead
    DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
    try {
        // delete the file
        fs.delete(file, true);
        // check the block is added to invalidateBlocks
        final FSNamesystem fsn = cluster.getNamesystem();
        final BlockManager bm = fsn.getBlockManager();
        DatanodeDescriptor dnd = NameNodeAdapter.getDatanode(fsn, dn.getDatanodeId());
        Assert.assertTrue(bm.containsInvalidateBlock(blks[0].getLocations()[0], b) || dnd.containsInvalidateBlock(b));
    } finally {
        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 30 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestReconstructStripedFile method assertFileBlocksReconstruction.

/**
   * Test the file blocks reconstruction.
   * 1. Check the replica is reconstructed in the target datanode,
   *    and verify the block replica length, generationStamp and content.
   * 2. Read the file and verify content.
   */
private void assertFileBlocksReconstruction(String fileName, int fileLen, ReconstructionType type, int toRecoverBlockNum) throws Exception {
    if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
        Assert.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
    }
    assertTrue("File length must be positive.", fileLen > 0);
    Path file = new Path(fileName);
    final byte[] data = new byte[fileLen];
    Arrays.fill(data, (byte) 1);
    DFSTestUtil.writeFile(fs, file, data);
    StripedFileTestUtil.waitBlockGroupsReported(fs, fileName);
    LocatedBlocks locatedBlocks = StripedFileTestUtil.getLocatedBlocks(file, fs);
    assertEquals(locatedBlocks.getFileLength(), fileLen);
    LocatedStripedBlock lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
    DatanodeInfo[] storageInfos = lastBlock.getLocations();
    byte[] indices = lastBlock.getBlockIndices();
    BitSet bitset = new BitSet(dnNum);
    for (DatanodeInfo storageInfo : storageInfos) {
        bitset.set(dnMap.get(storageInfo));
    }
    int[] dead = generateDeadDnIndices(type, toRecoverBlockNum, indices);
    LOG.info("Note: indices == " + Arrays.toString(indices) + ". Generate errors on datanodes: " + Arrays.toString(dead));
    DatanodeInfo[] dataDNs = new DatanodeInfo[toRecoverBlockNum];
    int[] deadDnIndices = new int[toRecoverBlockNum];
    ExtendedBlock[] blocks = new ExtendedBlock[toRecoverBlockNum];
    File[] replicas = new File[toRecoverBlockNum];
    long[] replicaLengths = new long[toRecoverBlockNum];
    File[] metadatas = new File[toRecoverBlockNum];
    byte[][] replicaContents = new byte[toRecoverBlockNum][];
    Map<ExtendedBlock, DataNode> errorMap = new HashMap<>(dead.length);
    for (int i = 0; i < toRecoverBlockNum; i++) {
        dataDNs[i] = storageInfos[dead[i]];
        deadDnIndices[i] = dnMap.get(dataDNs[i]);
        // Check the block replica file on deadDn before it dead.
        blocks[i] = StripedBlockUtil.constructInternalBlock(lastBlock.getBlock(), cellSize, dataBlkNum, indices[dead[i]]);
        errorMap.put(blocks[i], cluster.getDataNodes().get(deadDnIndices[i]));
        replicas[i] = cluster.getBlockFile(deadDnIndices[i], blocks[i]);
        replicaLengths[i] = replicas[i].length();
        metadatas[i] = cluster.getBlockMetadataFile(deadDnIndices[i], blocks[i]);
        // the block replica on the datanode should be the same as expected
        assertEquals(replicaLengths[i], StripedBlockUtil.getInternalBlockLength(lastBlock.getBlockSize(), cellSize, dataBlkNum, indices[dead[i]]));
        assertTrue(metadatas[i].getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        LOG.info("replica " + i + " locates in file: " + replicas[i]);
        replicaContents[i] = DFSTestUtil.readFileAsBytes(replicas[i]);
    }
    int lastGroupDataLen = fileLen % (dataBlkNum * blockSize);
    int lastGroupNumBlk = lastGroupDataLen == 0 ? dataBlkNum : Math.min(dataBlkNum, ((lastGroupDataLen - 1) / cellSize + 1));
    int groupSize = lastGroupNumBlk + parityBlkNum;
    // shutdown datanodes or generate corruption
    int stoppedDN = generateErrors(errorMap, type);
    // Check the locatedBlocks of the file again
    locatedBlocks = StripedFileTestUtil.getLocatedBlocks(file, fs);
    lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
    storageInfos = lastBlock.getLocations();
    assertEquals(storageInfos.length, groupSize - stoppedDN);
    int[] targetDNs = new int[dnNum - groupSize];
    int n = 0;
    for (int i = 0; i < dnNum; i++) {
        if (!bitset.get(i)) {
            // not contain replica of the block.
            targetDNs[n++] = i;
        }
    }
    StripedFileTestUtil.waitForReconstructionFinished(file, fs, groupSize);
    targetDNs = sortTargetsByReplicas(blocks, targetDNs);
    // Check the replica on the new target node.
    for (int i = 0; i < toRecoverBlockNum; i++) {
        File replicaAfterReconstruction = cluster.getBlockFile(targetDNs[i], blocks[i]);
        LOG.info("replica after reconstruction " + replicaAfterReconstruction);
        File metadataAfterReconstruction = cluster.getBlockMetadataFile(targetDNs[i], blocks[i]);
        assertEquals(replicaLengths[i], replicaAfterReconstruction.length());
        LOG.info("replica before " + replicas[i]);
        assertTrue(metadataAfterReconstruction.getName().endsWith(blocks[i].getGenerationStamp() + ".meta"));
        byte[] replicaContentAfterReconstruction = DFSTestUtil.readFileAsBytes(replicaAfterReconstruction);
        Assert.assertArrayEquals(replicaContents[i], replicaContentAfterReconstruction);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HashMap(java.util.HashMap) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BitSet(java.util.BitSet) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) File(java.io.File)

Aggregations

LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)43 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)26 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)20 Test (org.junit.Test)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)17 Path (org.apache.hadoop.fs.Path)10 Block (org.apache.hadoop.hdfs.protocol.Block)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 Token (org.apache.hadoop.security.token.Token)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)6 HashMap (java.util.HashMap)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)5 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 BitSet (java.util.BitSet)4 StorageType (org.apache.hadoop.fs.StorageType)4 File (java.io.File)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3