Search in sources :

Example 36 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestAddStripedBlocks method testGetLocatedStripedBlocks.

@Test
public void testGetLocatedStripedBlocks() throws Exception {
    final Path file = new Path("/file1");
    // create an empty file
    FSDataOutputStream out = null;
    try {
        out = dfs.create(file, (short) 1);
        writeAndFlushStripedOutputStream((DFSStripedOutputStream) out.getWrappedStream(), DFS_BYTES_PER_CHECKSUM_DEFAULT);
        FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
        BlockInfoStriped lastBlk = (BlockInfoStriped) fileNode.getLastBlock();
        DatanodeInfo[] expectedDNs = DatanodeStorageInfo.toDatanodeInfos(lastBlk.getUnderConstructionFeature().getExpectedStorageLocations());
        byte[] indices = lastBlk.getUnderConstructionFeature().getBlockIndices();
        LocatedBlocks blks = dfs.getClient().getLocatedBlocks(file.toString(), 0L);
        Assert.assertEquals(1, blks.locatedBlockCount());
        LocatedBlock lblk = blks.get(0);
        Assert.assertTrue(lblk instanceof LocatedStripedBlock);
        DatanodeInfo[] datanodes = lblk.getLocations();
        byte[] blockIndices = ((LocatedStripedBlock) lblk).getBlockIndices();
        Assert.assertEquals(groupSize, datanodes.length);
        Assert.assertEquals(groupSize, blockIndices.length);
        Assert.assertArrayEquals(indices, blockIndices);
        Assert.assertArrayEquals(expectedDNs, datanodes);
    } finally {
        IOUtils.cleanup(null, out);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 37 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedStripedBlock.

@Test
public void testProcessOverReplicatedStripedBlock() throws Exception {
    // create a file which has exact one block group to the first GROUP_SIZE DNs
    long fileLen = dataBlocks * blockSize;
    DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    for (int i = 0; i < groupSize; i++) {
        blk.setBlockId(groupId + i);
        cluster.injectBlocks(i, Arrays.asList(blk), bpid);
    }
    cluster.triggerBlockReports();
    // let a internal block be over replicated with 2 redundant blocks.
    blk.setBlockId(groupId);
    cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
    cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
    // let a internal block be over replicated with 1 redundant block.
    blk.setBlockId(groupId + dataBlocks);
    cluster.injectBlocks(numDNs - 1, Arrays.asList(blk), bpid);
    // update blocksMap
    cluster.triggerBlockReports();
    // add to invalidates
    cluster.triggerHeartbeats();
    // datanode delete block
    cluster.triggerHeartbeats();
    // update blocksMap
    cluster.triggerBlockReports();
    // verify that all internal blocks exists
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Example 38 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class StripedFileTestUtil method waitForReconstructionFinished.

/**
   * Wait for the reconstruction to be finished when the file has
   * corrupted blocks.
   */
public static LocatedBlocks waitForReconstructionFinished(Path file, DistributedFileSystem fs, int groupSize) throws Exception {
    LOG.info("Waiting for reconstruction to be finished for the file:" + file + ", groupSize:" + groupSize);
    final int attempts = 60;
    for (int i = 0; i < attempts; i++) {
        LocatedBlocks locatedBlocks = getLocatedBlocks(file, fs);
        LocatedStripedBlock lastBlock = (LocatedStripedBlock) locatedBlocks.getLastLocatedBlock();
        DatanodeInfo[] storageInfos = lastBlock.getLocations();
        if (storageInfos.length >= groupSize) {
            return locatedBlocks;
        }
        Thread.sleep(1000);
    }
    throw new IOException("Time out waiting for EC block reconstruction.");
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) IOException(java.io.IOException)

Example 39 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestDFSStripedInputStream method testPread.

@Test
public void testPread() throws Exception {
    final int numBlocks = 2;
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, stripesPerBlock, false, ecPolicy);
    LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, blockGroupSize * numBlocks);
    int fileLen = blockGroupSize * numBlocks;
    byte[] expected = new byte[fileLen];
    assertEquals(numBlocks, lbs.getLocatedBlocks().size());
    for (int bgIdx = 0; bgIdx < numBlocks; bgIdx++) {
        LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(bgIdx));
        for (int i = 0; i < dataBlocks; i++) {
            Block blk = new Block(bg.getBlock().getBlockId() + i, stripesPerBlock * cellSize, bg.getBlock().getGenerationStamp());
            blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
            cluster.injectBlocks(i, Arrays.asList(blk), bg.getBlock().getBlockPoolId());
        }
        /**
       * A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks
       */
        for (int i = 0; i < stripesPerBlock; i++) {
            for (int j = 0; j < dataBlocks; j++) {
                for (int k = 0; k < cellSize; k++) {
                    int posInBlk = i * cellSize + k;
                    int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
                    expected[bgIdx * blockGroupSize + posInFile] = SimulatedFSDataset.simulatedByte(new Block(bg.getBlock().getBlockId() + j), posInBlk);
                }
            }
        }
    }
    DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, ecPolicy, null);
    int[] startOffsets = { 0, 1, cellSize - 102, cellSize, cellSize + 102, cellSize * dataBlocks, cellSize * dataBlocks + 102, blockGroupSize - 102, blockGroupSize, blockGroupSize + 102, fileLen - 1 };
    for (int startOffset : startOffsets) {
        startOffset = Math.max(0, Math.min(startOffset, fileLen - 1));
        int remaining = fileLen - startOffset;
        byte[] buf = new byte[fileLen];
        int ret = in.read(startOffset, buf, 0, fileLen);
        assertEquals(remaining, ret);
        for (int i = 0; i < remaining; i++) {
            Assert.assertEquals("Byte at " + (startOffset + i) + " should be the " + "same", expected[startOffset + i], buf[i]);
        }
    }
    in.close();
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Example 40 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestDFSStripedInputStream method testPreadWithDNFailure.

@Test
public void testPreadWithDNFailure() throws Exception {
    final int numBlocks = 4;
    final int failedDNIdx = dataBlocks - 1;
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, stripesPerBlock, false, ecPolicy);
    LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, blockGroupSize);
    assert lbs.get(0) instanceof LocatedStripedBlock;
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    for (int i = 0; i < dataBlocks + parityBlocks; i++) {
        Block blk = new Block(bg.getBlock().getBlockId() + i, stripesPerBlock * cellSize, bg.getBlock().getGenerationStamp());
        blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
        cluster.injectBlocks(i, Arrays.asList(blk), bg.getBlock().getBlockPoolId());
    }
    DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, ecPolicy, null);
    int readSize = blockGroupSize;
    byte[] readBuffer = new byte[readSize];
    byte[] expected = new byte[readSize];
    /** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
    for (int i = 0; i < stripesPerBlock; i++) {
        for (int j = 0; j < dataBlocks; j++) {
            for (int k = 0; k < cellSize; k++) {
                int posInBlk = i * cellSize + k;
                int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
                expected[posInFile] = SimulatedFSDataset.simulatedByte(new Block(bg.getBlock().getBlockId() + j), posInBlk);
            }
        }
    }
    ErasureCoderOptions coderOptions = new ErasureCoderOptions(dataBlocks, parityBlocks);
    RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(conf, ecPolicy.getCodecName(), coderOptions);
    // Update the expected content for decoded data
    int[] missingBlkIdx = new int[parityBlocks];
    for (int i = 0; i < missingBlkIdx.length; i++) {
        if (i == 0) {
            missingBlkIdx[i] = failedDNIdx;
        } else {
            missingBlkIdx[i] = dataBlocks + i;
        }
    }
    cluster.stopDataNode(failedDNIdx);
    for (int i = 0; i < stripesPerBlock; i++) {
        byte[][] decodeInputs = new byte[dataBlocks + parityBlocks][cellSize];
        byte[][] decodeOutputs = new byte[missingBlkIdx.length][cellSize];
        for (int j = 0; j < dataBlocks; j++) {
            int posInBuf = i * cellSize * dataBlocks + j * cellSize;
            if (j != failedDNIdx) {
                System.arraycopy(expected, posInBuf, decodeInputs[j], 0, cellSize);
            }
        }
        for (int j = dataBlocks; j < dataBlocks + parityBlocks; j++) {
            for (int k = 0; k < cellSize; k++) {
                int posInBlk = i * cellSize + k;
                decodeInputs[j][k] = SimulatedFSDataset.simulatedByte(new Block(bg.getBlock().getBlockId() + j), posInBlk);
            }
        }
        for (int m : missingBlkIdx) {
            decodeInputs[m] = null;
        }
        rawDecoder.decode(decodeInputs, missingBlkIdx, decodeOutputs);
        int posInBuf = i * cellSize * dataBlocks + failedDNIdx * cellSize;
        System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, cellSize);
    }
    int delta = 10;
    int done = 0;
    // read a small delta, shouldn't trigger decode
    // |cell_0 |
    // |10     |
    done += in.read(0, readBuffer, 0, delta);
    assertEquals(delta, done);
    assertArrayEquals(Arrays.copyOf(expected, done), Arrays.copyOf(readBuffer, done));
    // both head and trail cells are partial
    // |c_0      |c_1    |c_2 |c_3 |c_4      |c_5         |
    // |256K - 10|missing|256K|256K|256K - 10|not in range|
    done += in.read(delta, readBuffer, delta, cellSize * (dataBlocks - 1) - 2 * delta);
    assertEquals(cellSize * (dataBlocks - 1) - delta, done);
    assertArrayEquals(Arrays.copyOf(expected, done), Arrays.copyOf(readBuffer, done));
    // read the rest
    done += in.read(done, readBuffer, done, readSize - done);
    assertEquals(readSize, done);
    assertArrayEquals(expected, readBuffer);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) ErasureCoderOptions(org.apache.hadoop.io.erasurecode.ErasureCoderOptions) RawErasureDecoder(org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Aggregations

LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)43 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)26 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)20 Test (org.junit.Test)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)17 Path (org.apache.hadoop.fs.Path)10 Block (org.apache.hadoop.hdfs.protocol.Block)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 Token (org.apache.hadoop.security.token.Token)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)6 HashMap (java.util.HashMap)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)5 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 BitSet (java.util.BitSet)4 StorageType (org.apache.hadoop.fs.StorageType)4 File (java.io.File)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3