use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class TestStripedBlockUtil method testDivideByteRangeIntoStripes.
/**
* Test dividing a byte range into aligned stripes and verify the aligned
* ranges can be translated back to the byte range.
*/
@Test
public void testDivideByteRangeIntoStripes() {
ByteBuffer assembled = ByteBuffer.allocate(stripesPerBlock * stripeSize);
for (int bgSize : blockGroupSizes) {
LocatedStripedBlock blockGroup = createDummyLocatedBlock(bgSize);
byte[][] internalBlkBufs = createInternalBlkBuffers(bgSize);
for (int brStart : byteRangeStartOffsets) {
for (int brSize : byteRangeSizes) {
if (brStart + brSize > bgSize) {
continue;
}
AlignedStripe[] stripes = divideByteRangeIntoStripes(ecPolicy, cellSize, blockGroup, brStart, brStart + brSize - 1, assembled);
for (AlignedStripe stripe : stripes) {
for (int i = 0; i < dataBlocks; i++) {
StripingChunk chunk = stripe.chunks[i];
if (chunk == null || chunk.state != StripingChunk.REQUESTED) {
continue;
}
int done = 0;
int len;
for (ByteBuffer slice : chunk.getChunkBuffer().getSlices()) {
len = slice.remaining();
slice.put(internalBlkBufs[i], (int) stripe.getOffsetInBlock() + done, len);
done += len;
}
}
}
for (int i = 0; i < brSize; i++) {
if (hashIntToByte(brStart + i) != assembled.get(i)) {
System.out.println("Oops");
}
assertEquals("Byte at " + (brStart + i) + " should be the same", hashIntToByte(brStart + i), assembled.get(i));
}
}
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class TestStripedBlockUtil method createDummyLocatedBlock.
private LocatedStripedBlock createDummyLocatedBlock(int bgSize) {
final long blockGroupID = -1048576;
DatanodeInfo[] locs = new DatanodeInfo[groupSize];
String[] storageIDs = new String[groupSize];
StorageType[] storageTypes = new StorageType[groupSize];
byte[] indices = new byte[groupSize];
for (int i = 0; i < groupSize; i++) {
indices[i] = (byte) ((i + 2) % dataBlocks);
// Location port always equal to logical index of a block,
// for easier verification
locs[i] = DFSTestUtil.getLocalDatanodeInfo(indices[i]);
storageIDs[i] = locs[i].getDatanodeUuid();
storageTypes[i] = StorageType.DISK;
}
return new LocatedStripedBlock(new ExtendedBlock("pool", blockGroupID, bgSize, 1001), locs, storageIDs, storageTypes, indices, 0, false, null);
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class DFSStripedInputStream method readOneStripe.
/**
* Read a new stripe covering the current position, and store the data in the
* {@link #curStripeBuf}.
*/
private void readOneStripe(CorruptedBlocks corruptedBlocks) throws IOException {
resetCurStripeBuffer();
// compute stripe range based on pos
final long offsetInBlockGroup = getOffsetInBlockGroup();
final long stripeLen = cellSize * dataBlkNum;
final int stripeIndex = (int) (offsetInBlockGroup / stripeLen);
final int stripeBufOffset = (int) (offsetInBlockGroup % stripeLen);
final int stripeLimit = (int) Math.min(currentLocatedBlock.getBlockSize() - (stripeIndex * stripeLen), stripeLen);
StripeRange stripeRange = new StripeRange(offsetInBlockGroup, stripeLimit - stripeBufOffset);
LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy, cellSize, blockGroup, offsetInBlockGroup, offsetInBlockGroup + stripeRange.getLength() - 1, curStripeBuf);
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(blockGroup, cellSize, dataBlkNum, parityBlkNum);
// read the whole stripe
for (AlignedStripe stripe : stripes) {
// Parse group to get chosen DN location
StripeReader sreader = new StatefulStripeReader(stripe, ecPolicy, blks, blockReaders, corruptedBlocks, decoder, this);
sreader.readStripe();
}
curStripeBuf.position(stripeBufOffset);
curStripeBuf.limit(stripeLimit);
curStripeRange = stripeRange;
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class DFSStripedInputStream method fetchBlockByteRange.
/**
* Real implementation of pread.
*/
@Override
protected void fetchBlockByteRange(LocatedBlock block, long start, long end, ByteBuffer buf, CorruptedBlocks corruptedBlocks) throws IOException {
// Refresh the striped block group
LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(ecPolicy, cellSize, blockGroup, start, end, buf);
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(blockGroup, cellSize, dataBlkNum, parityBlkNum);
final BlockReaderInfo[] preaderInfos = new BlockReaderInfo[groupSize];
try {
for (AlignedStripe stripe : stripes) {
// Parse group to get chosen DN location
StripeReader preader = new PositionStripeReader(stripe, ecPolicy, blks, preaderInfos, corruptedBlocks, decoder, this);
try {
preader.readStripe();
} finally {
preader.close();
}
}
buf.position(buf.position() + (int) (end - start + 1));
} finally {
for (BlockReaderInfo preaderInfo : preaderInfos) {
closeReader(preaderInfo);
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedAndCorruptStripedBlock.
@Test
public void testProcessOverReplicatedAndCorruptStripedBlock() throws Exception {
long fileLen = dataBlocks * blockSize;
DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
long gs = bg.getBlock().getGenerationStamp();
String bpid = bg.getBlock().getBlockPoolId();
long groupId = bg.getBlock().getBlockId();
Block blk = new Block(groupId, blockSize, gs);
BlockInfoStriped blockInfo = new BlockInfoStriped(blk, StripedFileTestUtil.getDefaultECPolicy());
for (int i = 0; i < groupSize; i++) {
blk.setBlockId(groupId + i);
cluster.injectBlocks(i, Arrays.asList(blk), bpid);
}
cluster.triggerBlockReports();
// let a internal block be corrupt
BlockManager bm = cluster.getNamesystem().getBlockManager();
List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
List<String> storages = Arrays.asList(bg.getStorageIDs());
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(), infos.get(0), storages.get(0), "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}
assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo)).corruptReplicas());
// let a internal block be over replicated with 2 redundant block.
blk.setBlockId(groupId + 2);
cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
// update blocksMap
cluster.triggerBlockReports();
// verify that all internal blocks exists except b0
// the redundant internal blocks will not be deleted before the corrupted
// block gets reconstructed. but since we set
// DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY to 0, the reconstruction will
// not happen
lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
bg = (LocatedStripedBlock) (lbs.get(0));
assertEquals(groupSize + 1, bg.getBlockIndices().length);
assertEquals(groupSize + 1, bg.getLocations().length);
BitSet set = new BitSet(groupSize);
for (byte index : bg.getBlockIndices()) {
set.set(index);
}
Assert.assertFalse(set.get(0));
for (int i = 1; i < groupSize; i++) {
assertTrue(set.get(i));
}
}
Aggregations