Search in sources :

Example 71 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDFSStripedInputStream method testPreadWithDNFailure.

@Test
public void testPreadWithDNFailure() throws Exception {
    final int numBlocks = 4;
    final int failedDNIdx = dataBlocks - 1;
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, stripesPerBlock, false, ecPolicy);
    LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, blockGroupSize);
    assert lbs.get(0) instanceof LocatedStripedBlock;
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    for (int i = 0; i < dataBlocks + parityBlocks; i++) {
        Block blk = new Block(bg.getBlock().getBlockId() + i, stripesPerBlock * cellSize, bg.getBlock().getGenerationStamp());
        blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
        cluster.injectBlocks(i, Arrays.asList(blk), bg.getBlock().getBlockPoolId());
    }
    DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, ecPolicy, null);
    int readSize = blockGroupSize;
    byte[] readBuffer = new byte[readSize];
    byte[] expected = new byte[readSize];
    /** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
    for (int i = 0; i < stripesPerBlock; i++) {
        for (int j = 0; j < dataBlocks; j++) {
            for (int k = 0; k < cellSize; k++) {
                int posInBlk = i * cellSize + k;
                int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
                expected[posInFile] = SimulatedFSDataset.simulatedByte(new Block(bg.getBlock().getBlockId() + j), posInBlk);
            }
        }
    }
    ErasureCoderOptions coderOptions = new ErasureCoderOptions(dataBlocks, parityBlocks);
    RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(conf, ecPolicy.getCodecName(), coderOptions);
    // Update the expected content for decoded data
    int[] missingBlkIdx = new int[parityBlocks];
    for (int i = 0; i < missingBlkIdx.length; i++) {
        if (i == 0) {
            missingBlkIdx[i] = failedDNIdx;
        } else {
            missingBlkIdx[i] = dataBlocks + i;
        }
    }
    cluster.stopDataNode(failedDNIdx);
    for (int i = 0; i < stripesPerBlock; i++) {
        byte[][] decodeInputs = new byte[dataBlocks + parityBlocks][cellSize];
        byte[][] decodeOutputs = new byte[missingBlkIdx.length][cellSize];
        for (int j = 0; j < dataBlocks; j++) {
            int posInBuf = i * cellSize * dataBlocks + j * cellSize;
            if (j != failedDNIdx) {
                System.arraycopy(expected, posInBuf, decodeInputs[j], 0, cellSize);
            }
        }
        for (int j = dataBlocks; j < dataBlocks + parityBlocks; j++) {
            for (int k = 0; k < cellSize; k++) {
                int posInBlk = i * cellSize + k;
                decodeInputs[j][k] = SimulatedFSDataset.simulatedByte(new Block(bg.getBlock().getBlockId() + j), posInBlk);
            }
        }
        for (int m : missingBlkIdx) {
            decodeInputs[m] = null;
        }
        rawDecoder.decode(decodeInputs, missingBlkIdx, decodeOutputs);
        int posInBuf = i * cellSize * dataBlocks + failedDNIdx * cellSize;
        System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, cellSize);
    }
    int delta = 10;
    int done = 0;
    // read a small delta, shouldn't trigger decode
    // |cell_0 |
    // |10     |
    done += in.read(0, readBuffer, 0, delta);
    assertEquals(delta, done);
    assertArrayEquals(Arrays.copyOf(expected, done), Arrays.copyOf(readBuffer, done));
    // both head and trail cells are partial
    // |c_0      |c_1    |c_2 |c_3 |c_4      |c_5         |
    // |256K - 10|missing|256K|256K|256K - 10|not in range|
    done += in.read(delta, readBuffer, delta, cellSize * (dataBlocks - 1) - 2 * delta);
    assertEquals(cellSize * (dataBlocks - 1) - delta, done);
    assertArrayEquals(Arrays.copyOf(expected, done), Arrays.copyOf(readBuffer, done));
    // read the rest
    done += in.read(done, readBuffer, done, readSize - done);
    assertEquals(readSize, done);
    assertArrayEquals(expected, readBuffer);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) ErasureCoderOptions(org.apache.hadoop.io.erasurecode.ErasureCoderOptions) RawErasureDecoder(org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Example 72 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDFSStripedInputStream method testStatefulReadWithDNFailure.

@Test
public void testStatefulReadWithDNFailure() throws Exception {
    final int numBlocks = 4;
    final int failedDNIdx = dataBlocks - 1;
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, stripesPerBlock, false, ecPolicy);
    LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, blockGroupSize);
    assert lbs.get(0) instanceof LocatedStripedBlock;
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    for (int i = 0; i < dataBlocks + parityBlocks; i++) {
        Block blk = new Block(bg.getBlock().getBlockId() + i, stripesPerBlock * cellSize, bg.getBlock().getGenerationStamp());
        blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
        cluster.injectBlocks(i, Arrays.asList(blk), bg.getBlock().getBlockPoolId());
    }
    DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, ecPolicy, null);
    int readSize = blockGroupSize;
    byte[] readBuffer = new byte[readSize];
    byte[] expected = new byte[readSize];
    /** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
    for (int i = 0; i < stripesPerBlock; i++) {
        for (int j = 0; j < dataBlocks; j++) {
            for (int k = 0; k < cellSize; k++) {
                int posInBlk = i * cellSize + k;
                int posInFile = i * cellSize * dataBlocks + j * cellSize + k;
                expected[posInFile] = SimulatedFSDataset.simulatedByte(new Block(bg.getBlock().getBlockId() + j), posInBlk);
            }
        }
    }
    ErasureCoderOptions coderOptions = new ErasureCoderOptions(dataBlocks, parityBlocks);
    RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(conf, ecPolicy.getCodecName(), coderOptions);
    // Update the expected content for decoded data
    int[] missingBlkIdx = new int[parityBlocks];
    for (int i = 0; i < missingBlkIdx.length; i++) {
        if (i == 0) {
            missingBlkIdx[i] = failedDNIdx;
        } else {
            missingBlkIdx[i] = dataBlocks + i;
        }
    }
    cluster.stopDataNode(failedDNIdx);
    for (int i = 0; i < stripesPerBlock; i++) {
        byte[][] decodeInputs = new byte[dataBlocks + parityBlocks][cellSize];
        byte[][] decodeOutputs = new byte[missingBlkIdx.length][cellSize];
        for (int j = 0; j < dataBlocks; j++) {
            int posInBuf = i * cellSize * dataBlocks + j * cellSize;
            if (j != failedDNIdx) {
                System.arraycopy(expected, posInBuf, decodeInputs[j], 0, cellSize);
            }
        }
        for (int j = dataBlocks; j < dataBlocks + parityBlocks; j++) {
            for (int k = 0; k < cellSize; k++) {
                int posInBlk = i * cellSize + k;
                decodeInputs[j][k] = SimulatedFSDataset.simulatedByte(new Block(bg.getBlock().getBlockId() + j), posInBlk);
            }
        }
        for (int m : missingBlkIdx) {
            decodeInputs[m] = null;
        }
        rawDecoder.decode(decodeInputs, missingBlkIdx, decodeOutputs);
        int posInBuf = i * cellSize * dataBlocks + failedDNIdx * cellSize;
        System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, cellSize);
    }
    int delta = 10;
    int done = 0;
    // read a small delta, shouldn't trigger decode
    // |cell_0 |
    // |10     |
    done += in.read(readBuffer, 0, delta);
    assertEquals(delta, done);
    // |256K - 10|missing|256K|256K|256K - 10|not in range|
    while (done < (cellSize * (dataBlocks - 1) - 2 * delta)) {
        int ret = in.read(readBuffer, delta, cellSize * (dataBlocks - 1) - 2 * delta);
        assertTrue(ret > 0);
        done += ret;
    }
    assertEquals(cellSize * (dataBlocks - 1) - delta, done);
    // read the rest
    int restSize;
    restSize = readSize - done;
    while (done < restSize) {
        int ret = in.read(readBuffer, done, restSize);
        assertTrue(ret > 0);
        done += ret;
    }
    assertEquals(readSize, done);
    assertArrayEquals(expected, readBuffer);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) ErasureCoderOptions(org.apache.hadoop.io.erasurecode.ErasureCoderOptions) RawErasureDecoder(org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Example 73 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDFSStripedInputStream method testStatefulRead.

private void testStatefulRead(boolean useByteBuffer, boolean cellMisalignPacket) throws Exception {
    final int numBlocks = 2;
    final int fileSize = numBlocks * blockGroupSize;
    if (cellMisalignPacket) {
        conf.setInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT + 1);
        tearDown();
        setup();
    }
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, stripesPerBlock, false, ecPolicy);
    LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, fileSize);
    assert lbs.getLocatedBlocks().size() == numBlocks;
    for (LocatedBlock lb : lbs.getLocatedBlocks()) {
        assert lb instanceof LocatedStripedBlock;
        LocatedStripedBlock bg = (LocatedStripedBlock) (lb);
        for (int i = 0; i < dataBlocks; i++) {
            Block blk = new Block(bg.getBlock().getBlockId() + i, stripesPerBlock * cellSize, bg.getBlock().getGenerationStamp());
            blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
            cluster.injectBlocks(i, Arrays.asList(blk), bg.getBlock().getBlockPoolId());
        }
    }
    DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, ecPolicy, null);
    byte[] expected = new byte[fileSize];
    for (LocatedBlock bg : lbs.getLocatedBlocks()) {
        /**
       * A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks
       */
        for (int i = 0; i < stripesPerBlock; i++) {
            for (int j = 0; j < dataBlocks; j++) {
                for (int k = 0; k < cellSize; k++) {
                    int posInBlk = i * cellSize + k;
                    int posInFile = (int) bg.getStartOffset() + i * cellSize * dataBlocks + j * cellSize + k;
                    expected[posInFile] = SimulatedFSDataset.simulatedByte(new Block(bg.getBlock().getBlockId() + j), posInBlk);
                }
            }
        }
    }
    if (useByteBuffer) {
        ByteBuffer readBuffer = ByteBuffer.allocate(fileSize);
        int done = 0;
        while (done < fileSize) {
            int ret = in.read(readBuffer);
            assertTrue(ret > 0);
            done += ret;
        }
        assertArrayEquals(expected, readBuffer.array());
    } else {
        byte[] readBuffer = new byte[fileSize];
        int done = 0;
        while (done < fileSize) {
            int ret = in.read(readBuffer, done, fileSize - done);
            assertTrue(ret > 0);
            done += ret;
        }
        assertArrayEquals(expected, readBuffer);
    }
    fs.delete(filePath, true);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) ByteBuffer(java.nio.ByteBuffer)

Example 74 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDFSStripedInputStream method testRefreshBlock.

/**
   * Test {@link DFSStripedInputStream#getBlockAt(long)}.
   */
@Test
public void testRefreshBlock() throws Exception {
    final int numBlocks = 4;
    DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks, stripesPerBlock, false, ecPolicy);
    LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, blockGroupSize * numBlocks);
    final DFSStripedInputStream in = new DFSStripedInputStream(fs.getClient(), filePath.toString(), false, ecPolicy, null);
    List<LocatedBlock> lbList = lbs.getLocatedBlocks();
    for (LocatedBlock aLbList : lbList) {
        LocatedStripedBlock lsb = (LocatedStripedBlock) aLbList;
        LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(lsb, cellSize, dataBlocks, parityBlocks);
        for (int j = 0; j < dataBlocks; j++) {
            LocatedBlock refreshed = in.refreshLocatedBlock(blks[j]);
            assertEquals(blks[j].getBlock(), refreshed.getBlock());
            assertEquals(blks[j].getStartOffset(), refreshed.getStartOffset());
            assertArrayEquals(blks[j].getLocations(), refreshed.getLocations());
        }
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 75 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDFSUtil method testLocatedBlocks2Locations.

/**
   * Test conversion of LocatedBlock to BlockLocation
   */
@Test
public void testLocatedBlocks2Locations() {
    DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo[] ds = new DatanodeInfo[1];
    ds[0] = d;
    // ok
    ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
    LocatedBlock l1 = new LocatedBlock(b1, ds);
    l1.setStartOffset(0);
    l1.setCorrupt(false);
    // corrupt
    ExtendedBlock b2 = new ExtendedBlock("bpid", 2, 1, 1);
    LocatedBlock l2 = new LocatedBlock(b2, ds);
    l2.setStartOffset(0);
    l2.setCorrupt(true);
    List<LocatedBlock> ls = Arrays.asList(l1, l2);
    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true, null, null);
    BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
    assertTrue("expected 2 blocks but got " + bs.length, bs.length == 2);
    int corruptCount = 0;
    for (BlockLocation b : bs) {
        if (b.isCorrupt()) {
            corruptCount++;
        }
    }
    assertTrue("expected 1 corrupt files but got " + corruptCount, corruptCount == 1);
    // test an empty location
    bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
    assertEquals(0, bs.length);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) Test(org.junit.Test)

Aggregations

LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)118 Test (org.junit.Test)67 Path (org.apache.hadoop.fs.Path)65 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)52 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)33 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)32 Configuration (org.apache.hadoop.conf.Configuration)29 IOException (java.io.IOException)20 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)20 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)20 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)18 FileSystem (org.apache.hadoop.fs.FileSystem)17 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)17 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)13 Block (org.apache.hadoop.hdfs.protocol.Block)11 InetSocketAddress (java.net.InetSocketAddress)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)9 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)7 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)7