use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class StripedFileTestUtil method verifyLocatedStripedBlocks.
/**
* Verify that blocks in striped block group are on different nodes, and every
* internal blocks exists.
*/
public static void verifyLocatedStripedBlocks(LocatedBlocks lbs, int groupSize) {
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
assert lb instanceof LocatedStripedBlock;
HashSet<DatanodeInfo> locs = new HashSet<>();
Collections.addAll(locs, lb.getLocations());
assertEquals(groupSize, lb.getLocations().length);
assertEquals(groupSize, locs.size());
// verify that every internal blocks exists
byte[] blockIndices = ((LocatedStripedBlock) lb).getBlockIndices();
assertEquals(groupSize, blockIndices.length);
HashSet<Integer> found = new HashSet<>();
for (int index : blockIndices) {
assert index >= 0;
found.add(index);
}
assertEquals(groupSize, found.size());
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class StripedFileTestUtil method checkData.
static void checkData(DistributedFileSystem dfs, Path srcPath, int length, List<DatanodeInfo> killedList, List<Long> oldGSList, int blkGroupSize) throws IOException {
StripedFileTestUtil.verifyLength(dfs, srcPath, length);
List<List<LocatedBlock>> blockGroupList = new ArrayList<>();
LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(srcPath.toString(), 0L, Long.MAX_VALUE);
int expectedNumGroup = 0;
if (length > 0) {
expectedNumGroup = (length - 1) / blkGroupSize + 1;
}
assertEquals(expectedNumGroup, lbs.getLocatedBlocks().size());
final ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(srcPath);
final int cellSize = ecPolicy.getCellSize();
final int dataBlkNum = ecPolicy.getNumDataUnits();
final int parityBlkNum = ecPolicy.getNumParityUnits();
int index = 0;
for (LocatedBlock firstBlock : lbs.getLocatedBlocks()) {
Assert.assertTrue(firstBlock instanceof LocatedStripedBlock);
final long gs = firstBlock.getBlock().getGenerationStamp();
final long oldGS = oldGSList != null ? oldGSList.get(index++) : -1L;
final String s = "gs=" + gs + ", oldGS=" + oldGS;
LOG.info(s);
Assert.assertTrue(s, gs >= oldGS);
LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup((LocatedStripedBlock) firstBlock, cellSize, dataBlkNum, parityBlkNum);
blockGroupList.add(Arrays.asList(blocks));
}
// test each block group
for (int group = 0; group < blockGroupList.size(); group++) {
final boolean isLastGroup = group == blockGroupList.size() - 1;
final int groupSize = !isLastGroup ? blkGroupSize : length - (blockGroupList.size() - 1) * blkGroupSize;
final int numCellInGroup = (groupSize - 1) / cellSize + 1;
final int lastCellIndex = (numCellInGroup - 1) % dataBlkNum;
final int lastCellSize = groupSize - (numCellInGroup - 1) * cellSize;
//get the data of this block
List<LocatedBlock> blockList = blockGroupList.get(group);
byte[][] dataBlockBytes = new byte[dataBlkNum][];
byte[][] parityBlockBytes = new byte[parityBlkNum][];
Set<Integer> checkSet = new HashSet<>();
// for each block, use BlockReader to read data
for (int i = 0; i < blockList.size(); i++) {
final int j = i >= dataBlkNum ? 0 : i;
final int numCellInBlock = (numCellInGroup - 1) / dataBlkNum + (j <= lastCellIndex ? 1 : 0);
final int blockSize = numCellInBlock * cellSize + (isLastGroup && j == lastCellIndex ? lastCellSize - cellSize : 0);
final byte[] blockBytes = new byte[blockSize];
if (i < dataBlkNum) {
dataBlockBytes[i] = blockBytes;
} else {
parityBlockBytes[i - dataBlkNum] = blockBytes;
}
final LocatedBlock lb = blockList.get(i);
LOG.info("i,j=" + i + ", " + j + ", numCellInBlock=" + numCellInBlock + ", blockSize=" + blockSize + ", lb=" + lb);
if (lb == null) {
continue;
}
final ExtendedBlock block = lb.getBlock();
assertEquals(blockSize, block.getNumBytes());
if (block.getNumBytes() == 0) {
continue;
}
DatanodeInfo dn = blockList.get(i).getLocations()[0];
if (!killedList.contains(dn)) {
final BlockReader blockReader = BlockReaderTestUtil.getBlockReader(dfs, lb, 0, block.getNumBytes());
blockReader.readAll(blockBytes, 0, (int) block.getNumBytes());
blockReader.close();
checkSet.add(i);
}
}
LOG.info("Internal blocks to check: " + checkSet);
// check data
final int groupPosInFile = group * blkGroupSize;
for (int i = 0; i < dataBlockBytes.length; i++) {
boolean killed = false;
if (!checkSet.contains(i)) {
killed = true;
}
final byte[] actual = dataBlockBytes[i];
for (int posInBlk = 0; posInBlk < actual.length; posInBlk++) {
final long posInFile = StripedBlockUtil.offsetInBlkToOffsetInBG(cellSize, dataBlkNum, posInBlk, i) + groupPosInFile;
Assert.assertTrue(posInFile < length);
final byte expected = getByte(posInFile);
if (killed) {
actual[posInBlk] = expected;
} else {
if (expected != actual[posInBlk]) {
String s = "expected=" + expected + " but actual=" + actual[posInBlk] + ", posInFile=" + posInFile + ", posInBlk=" + posInBlk + ". group=" + group + ", i=" + i;
Assert.fail(s);
}
}
}
}
// check parity
verifyParityBlocks(dfs.getConf(), lbs.getLocatedBlocks().get(group).getBlockSize(), cellSize, dataBlockBytes, parityBlockBytes, checkSet, ecPolicy.getCodecName());
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class PBHelperClient method convertLocatedBlockProto.
public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
if (proto == null)
return null;
List<DatanodeInfoProto> locs = proto.getLocsList();
DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
for (int i = 0; i < locs.size(); i++) {
targets[i] = convert(locs.get(i));
}
final StorageType[] storageTypes = convertStorageTypes(proto.getStorageTypesList(), locs.size());
final int storageIDsCount = proto.getStorageIDsCount();
final String[] storageIDs;
if (storageIDsCount == 0) {
storageIDs = null;
} else {
Preconditions.checkState(storageIDsCount == locs.size());
storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
}
byte[] indices = null;
if (proto.hasBlockIndices()) {
indices = proto.getBlockIndices().toByteArray();
}
// Set values from the isCached list, re-using references from loc
List<DatanodeInfo> cachedLocs = new ArrayList<>(locs.size());
List<Boolean> isCachedList = proto.getIsCachedList();
for (int i = 0; i < isCachedList.size(); i++) {
if (isCachedList.get(i)) {
cachedLocs.add(targets[i]);
}
}
final LocatedBlock lb;
if (indices == null) {
lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
} else {
lb = new LocatedStripedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, indices, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
List<TokenProto> tokenProtos = proto.getBlockTokensList();
Token<BlockTokenIdentifier>[] blockTokens = convertTokens(tokenProtos);
((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
}
lb.setBlockToken(convert(proto.getBlockToken()));
return lb;
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class TestReadStripedFileWithDecoding method testReportBadBlock.
/**
* After reading a corrupted block, make sure the client can correctly report
* the corruption to the NameNode.
*/
@Test
public void testReportBadBlock() throws IOException {
// create file
final Path file = new Path("/corrupted");
// length of "corruption"
final int length = 10;
final byte[] bytes = StripedFileTestUtil.generateBytes(length);
DFSTestUtil.writeFile(fs, file, bytes);
// corrupt the first data block
int dnIndex = findFirstDataNode(file, cellSize * dataBlocks);
Assert.assertNotEquals(-1, dnIndex);
LocatedStripedBlock slb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(slb, cellSize, dataBlocks, parityBlocks);
// find the first block file
File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
File blkFile = MiniDFSCluster.getBlockFile(storageDir, blks[0].getBlock());
Assert.assertTrue("Block file does not exist", blkFile.exists());
// corrupt the block file
LOG.info("Deliberately corrupting file " + blkFile.getName());
try (FileOutputStream out = new FileOutputStream(blkFile)) {
out.write("corruption".getBytes());
}
// in NameNode
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
}
try {
// do stateful read
StripedFileTestUtil.verifyStatefulRead(fs, file, length, bytes, ByteBuffer.allocate(1024));
// check whether the corruption has been reported to the NameNode
final FSNamesystem ns = cluster.getNamesystem();
final BlockManager bm = ns.getBlockManager();
BlockInfo blockInfo = (ns.getFSDirectory().getINode4Write(file.toString()).asFile().getBlocks())[0];
Assert.assertEquals(1, bm.getCorruptReplicas(blockInfo).size());
} finally {
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class TestBlockTokenWithDFSStriped method isBlockTokenExpired.
@Override
protected boolean isBlockTokenExpired(LocatedBlock lb) throws IOException {
LocatedStripedBlock lsb = (LocatedStripedBlock) lb;
LocatedBlock[] internalBlocks = StripedBlockUtil.parseStripedBlockGroup(lsb, cellSize, dataBlocks, parityBlocks);
for (LocatedBlock internalBlock : internalBlocks) {
if (super.isBlockTokenExpired(internalBlock)) {
return true;
}
}
return false;
}
Aggregations