Search in sources :

Example 31 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestStripedBlockUtil method testDivideByteRangeIntoStripes.

/**
   * Test dividing a byte range into aligned stripes and verify the aligned
   * ranges can be translated back to the byte range.
   */
@Test
public void testDivideByteRangeIntoStripes() {
    ByteBuffer assembled = ByteBuffer.allocate(stripesPerBlock * stripeSize);
    for (int bgSize : blockGroupSizes) {
        LocatedStripedBlock blockGroup = createDummyLocatedBlock(bgSize);
        byte[][] internalBlkBufs = createInternalBlkBuffers(bgSize);
        for (int brStart : byteRangeStartOffsets) {
            for (int brSize : byteRangeSizes) {
                if (brStart + brSize > bgSize) {
                    continue;
                }
                AlignedStripe[] stripes = divideByteRangeIntoStripes(ecPolicy, cellSize, blockGroup, brStart, brStart + brSize - 1, assembled);
                for (AlignedStripe stripe : stripes) {
                    for (int i = 0; i < dataBlocks; i++) {
                        StripingChunk chunk = stripe.chunks[i];
                        if (chunk == null || chunk.state != StripingChunk.REQUESTED) {
                            continue;
                        }
                        int done = 0;
                        int len;
                        for (ByteBuffer slice : chunk.getChunkBuffer().getSlices()) {
                            len = slice.remaining();
                            slice.put(internalBlkBufs[i], (int) stripe.getOffsetInBlock() + done, len);
                            done += len;
                        }
                    }
                }
                for (int i = 0; i < brSize; i++) {
                    if (hashIntToByte(brStart + i) != assembled.get(i)) {
                        System.out.println("Oops");
                    }
                    assertEquals("Byte at " + (brStart + i) + " should be the same", hashIntToByte(brStart + i), assembled.get(i));
                }
            }
        }
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 32 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestStripedBlockUtil method createDummyLocatedBlock.

private LocatedStripedBlock createDummyLocatedBlock(int bgSize) {
    final long blockGroupID = -1048576;
    DatanodeInfo[] locs = new DatanodeInfo[groupSize];
    String[] storageIDs = new String[groupSize];
    StorageType[] storageTypes = new StorageType[groupSize];
    byte[] indices = new byte[groupSize];
    for (int i = 0; i < groupSize; i++) {
        indices[i] = (byte) ((i + 2) % dataBlocks);
        // Location port always equal to logical index of a block,
        // for easier verification
        locs[i] = DFSTestUtil.getLocalDatanodeInfo(indices[i]);
        storageIDs[i] = locs[i].getDatanodeUuid();
        storageTypes[i] = StorageType.DISK;
    }
    return new LocatedStripedBlock(new ExtendedBlock("pool", blockGroupID, bgSize, 1001), locs, storageIDs, storageTypes, indices, 0, false, null);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock)

Example 33 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class DFSStripedInputStream method readOneStripe.

/**
   * Read a new stripe covering the current position, and store the data in the
   * {@link #curStripeBuf}.
   */
private void readOneStripe(CorruptedBlocks corruptedBlocks) throws IOException {
    resetCurStripeBuffer();
    // compute stripe range based on pos
    final long offsetInBlockGroup = getOffsetInBlockGroup();
    final long stripeLen = cellSize * dataBlkNum;
    final int stripeIndex = (int) (offsetInBlockGroup / stripeLen);
    final int stripeBufOffset = (int) (offsetInBlockGroup % stripeLen);
    final int stripeLimit = (int) Math.min(currentLocatedBlock.getBlockSize() - (stripeIndex * stripeLen), stripeLen);
    StripeRange stripeRange = new StripeRange(offsetInBlockGroup, stripeLimit - stripeBufOffset);
    LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
    AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy, cellSize, blockGroup, offsetInBlockGroup, offsetInBlockGroup + stripeRange.getLength() - 1, curStripeBuf);
    final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(blockGroup, cellSize, dataBlkNum, parityBlkNum);
    // read the whole stripe
    for (AlignedStripe stripe : stripes) {
        // Parse group to get chosen DN location
        StripeReader sreader = new StatefulStripeReader(stripe, ecPolicy, blks, blockReaders, corruptedBlocks, decoder, this);
        sreader.readStripe();
    }
    curStripeBuf.position(stripeBufOffset);
    curStripeBuf.limit(stripeLimit);
    curStripeRange = stripeRange;
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) StripeRange(org.apache.hadoop.hdfs.util.StripedBlockUtil.StripeRange) AlignedStripe(org.apache.hadoop.hdfs.util.StripedBlockUtil.AlignedStripe) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 34 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class DFSStripedInputStream method fetchBlockByteRange.

/**
   * Real implementation of pread.
   */
@Override
protected void fetchBlockByteRange(LocatedBlock block, long start, long end, ByteBuffer buf, CorruptedBlocks corruptedBlocks) throws IOException {
    // Refresh the striped block group
    LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
    AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(ecPolicy, cellSize, blockGroup, start, end, buf);
    final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(blockGroup, cellSize, dataBlkNum, parityBlkNum);
    final BlockReaderInfo[] preaderInfos = new BlockReaderInfo[groupSize];
    try {
        for (AlignedStripe stripe : stripes) {
            // Parse group to get chosen DN location
            StripeReader preader = new PositionStripeReader(stripe, ecPolicy, blks, preaderInfos, corruptedBlocks, decoder, this);
            try {
                preader.readStripe();
            } finally {
                preader.close();
            }
        }
        buf.position(buf.position() + (int) (end - start + 1));
    } finally {
        for (BlockReaderInfo preaderInfo : preaderInfos) {
            closeReader(preaderInfo);
        }
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) AlignedStripe(org.apache.hadoop.hdfs.util.StripedBlockUtil.AlignedStripe) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockReaderInfo(org.apache.hadoop.hdfs.StripeReader.BlockReaderInfo)

Example 35 with LocatedStripedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedAndCorruptStripedBlock.

@Test
public void testProcessOverReplicatedAndCorruptStripedBlock() throws Exception {
    long fileLen = dataBlocks * blockSize;
    DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    BlockInfoStriped blockInfo = new BlockInfoStriped(blk, StripedFileTestUtil.getDefaultECPolicy());
    for (int i = 0; i < groupSize; i++) {
        blk.setBlockId(groupId + i);
        cluster.injectBlocks(i, Arrays.asList(blk), bpid);
    }
    cluster.triggerBlockReports();
    // let a internal block be corrupt
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
    List<String> storages = Arrays.asList(bg.getStorageIDs());
    cluster.getNamesystem().writeLock();
    try {
        bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(), infos.get(0), storages.get(0), "TEST");
    } finally {
        cluster.getNamesystem().writeUnlock();
    }
    assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo)).corruptReplicas());
    // let a internal block be over replicated with 2 redundant block.
    blk.setBlockId(groupId + 2);
    cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
    cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
    // update blocksMap
    cluster.triggerBlockReports();
    // verify that all internal blocks exists except b0
    // the redundant internal blocks will not be deleted before the corrupted
    // block gets reconstructed. but since we set
    // DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY to 0, the reconstruction will
    // not happen
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    bg = (LocatedStripedBlock) (lbs.get(0));
    assertEquals(groupSize + 1, bg.getBlockIndices().length);
    assertEquals(groupSize + 1, bg.getLocations().length);
    BitSet set = new BitSet(groupSize);
    for (byte index : bg.getBlockIndices()) {
        set.set(index);
    }
    Assert.assertFalse(set.get(0));
    for (int i = 1; i < groupSize; i++) {
        assertTrue(set.get(i));
    }
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BitSet(java.util.BitSet) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Aggregations

LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)43 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)26 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)20 Test (org.junit.Test)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)17 Path (org.apache.hadoop.fs.Path)10 Block (org.apache.hadoop.hdfs.protocol.Block)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 Token (org.apache.hadoop.security.token.Token)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)6 HashMap (java.util.HashMap)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)5 IOException (java.io.IOException)4 ArrayList (java.util.ArrayList)4 BitSet (java.util.BitSet)4 StorageType (org.apache.hadoop.fs.StorageType)4 File (java.io.File)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3