use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class TestSortLocatedStripedBlock method prepareBlockIndexAndTokenList.
private void prepareBlockIndexAndTokenList(List<LocatedBlock> lbs, List<HashMap<DatanodeInfo, Byte>> locToIndexList, List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
for (LocatedBlock lb : lbs) {
HashMap<DatanodeInfo, Byte> locToIndex = new HashMap<DatanodeInfo, Byte>();
locToIndexList.add(locToIndex);
HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken = new HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>();
locToTokenList.add(locToToken);
DatanodeInfo[] di = lb.getLocations();
LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
for (int i = 0; i < di.length; i++) {
locToIndex.put(di[i], stripedBlk.getBlockIndices()[i]);
locToToken.put(di[i], stripedBlk.getBlockTokens()[i]);
}
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class TestSortLocatedStripedBlock method testWithMultipleInServiceAndDecommnDatanodes.
/**
* Test to verify sorting with multiple in-service and decommissioned
* datanodes exists in storage lists.
*
* We have storage list, marked decommissioned internal blocks with a '
* d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13
* mapping to indices
* 0', 1', 2, 3, 4, 5, 6, 7', 8', 0, 1, 7, 8, 1
*
* Decommissioned node indices: 0', 1', 7', 8'
*
* Additional In-Service node d13 at the end, block index: 1
*
* So in the original list nodes d0, d1, d7, d8 are decommissioned state.
*
* After sorting the expected block indices list will be,
* 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 0', 1', 7', 8'
*
* After sorting the expected storage list will be,
* d9, d10, d2, d3, d4, d5, d6, d11, d12, d13, d0, d1, d7, d8.
*
* Note: after sorting block indices will not be in ascending order.
*/
@Test(timeout = 10000)
public void testWithMultipleInServiceAndDecommnDatanodes() {
LOG.info("Starting test testWithMultipleInServiceAndDecommnDatanodes");
// two located block groups
int lbsCount = 2;
List<Integer> decommnNodeIndices = new ArrayList<>();
decommnNodeIndices.add(0);
decommnNodeIndices.add(1);
decommnNodeIndices.add(7);
decommnNodeIndices.add(8);
List<Integer> targetNodeIndices = new ArrayList<>();
targetNodeIndices.addAll(decommnNodeIndices);
// at the end add an additional In-Service node to blockIndex=1
targetNodeIndices.add(1);
// map contains decommissioned node details in each located strip block
// which will be used for assertions
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(lbsCount * decommnNodeIndices.size());
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount, dataBlocks, parityBlocks, decommnNodeIndices, targetNodeIndices, decommissionedNodes);
List<DatanodeInfo> staleDns = new ArrayList<>();
for (LocatedBlock lb : lbs) {
DatanodeInfo[] locations = lb.getLocations();
DatanodeInfo staleDn = locations[locations.length - 1];
staleDn.setLastUpdateMonotonic(Time.monotonicNow() - (STALE_INTERVAL * 2));
staleDns.add(staleDn);
}
// prepare expected block index and token list.
List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList = new ArrayList<>();
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
dm.sortLocatedBlocks(null, lbs);
assertDecommnNodePosition(groupSize + 1, decommissionedNodes, lbs);
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
for (LocatedBlock lb : lbs) {
byte[] blockIndices = ((LocatedStripedBlock) lb).getBlockIndices();
// after sorting stale block index will be placed after normal nodes.
Assert.assertEquals("Failed to move stale node to bottom!", 1, blockIndices[9]);
DatanodeInfo[] locations = lb.getLocations();
// After sorting stale node d13 will be placed after normal nodes
Assert.assertEquals("Failed to move stale dn after normal one!", staleDns.remove(0), locations[9]);
}
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class TestFsck method testFsckCorruptECFile.
@Test(timeout = 300000)
public void testFsckCorruptECFile() throws Exception {
DistributedFileSystem fs = null;
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
int totalSize = dataBlocks + parityBlocks;
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
fs = cluster.getFileSystem();
Map<Integer, Integer> dnIndices = new HashMap<>();
ArrayList<DataNode> dnList = cluster.getDataNodes();
for (int i = 0; i < totalSize; i++) {
dnIndices.put(dnList.get(i).getIpcPort(), i);
}
// create file
Path ecDirPath = new Path("/striped");
fs.mkdir(ecDirPath, FsPermission.getDirDefault());
fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
Path file = new Path(ecDirPath, "corrupted");
final int length = cellSize * dataBlocks;
final byte[] bytes = StripedFileTestUtil.generateBytes(length);
DFSTestUtil.writeFile(fs, file, bytes);
LocatedStripedBlock lsb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(lsb, cellSize, dataBlocks, parityBlocks);
// make an unrecoverable ec file with corrupted blocks
for (int i = 0; i < parityBlocks + 1; i++) {
int ipcPort = blks[i].getLocations()[0].getIpcPort();
int dnIndex = dnIndices.get(ipcPort);
File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
File blkFile = MiniDFSCluster.getBlockFile(storageDir, blks[i].getBlock());
Assert.assertTrue("Block file does not exist", blkFile.exists());
FileOutputStream out = new FileOutputStream(blkFile);
out.write("corruption".getBytes());
}
// kept in NameNode
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
}
// Read the file to trigger reportBadBlocks
try {
IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf, true);
} catch (IOException ie) {
assertTrue(ie.getMessage().contains("missingChunksNum=" + (parityBlocks + 1)));
}
waitForUnrecoverableBlockGroup(conf);
String outStr = runFsck(conf, 1, true, "/");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
assertTrue(outStr.contains("Under-erasure-coded block groups:\t0"));
outStr = runFsck(conf, -1, true, "/", "-list-corruptfileblocks");
assertTrue(outStr.contains("has 1 CORRUPT files"));
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class PBHelperClient method convertLocatedBlock.
public static LocatedBlockProto convertLocatedBlock(LocatedBlock b) {
if (b == null)
return null;
Builder builder = LocatedBlockProto.newBuilder();
DatanodeInfo[] locs = b.getLocations();
List<DatanodeInfo> cachedLocs = Lists.newLinkedList(Arrays.asList(b.getCachedLocations()));
for (int i = 0; i < locs.length; i++) {
DatanodeInfo loc = locs[i];
builder.addLocs(i, PBHelperClient.convert(loc));
boolean locIsCached = cachedLocs.contains(loc);
builder.addIsCached(locIsCached);
if (locIsCached) {
cachedLocs.remove(loc);
}
}
Preconditions.checkArgument(cachedLocs.size() == 0, "Found additional cached replica locations that are not in the set of" + " storage-backed locations!");
StorageType[] storageTypes = b.getStorageTypes();
if (storageTypes != null) {
for (StorageType storageType : storageTypes) {
builder.addStorageTypes(convertStorageType(storageType));
}
}
final String[] storageIDs = b.getStorageIDs();
if (storageIDs != null) {
builder.addAllStorageIDs(Arrays.asList(storageIDs));
}
if (b instanceof LocatedStripedBlock) {
LocatedStripedBlock sb = (LocatedStripedBlock) b;
byte[] indices = sb.getBlockIndices();
builder.setBlockIndices(PBHelperClient.getByteString(indices));
Token<BlockTokenIdentifier>[] blockTokens = sb.getBlockTokens();
builder.addAllBlockTokens(convert(blockTokens));
}
return builder.setB(PBHelperClient.convert(b.getBlock())).setBlockToken(PBHelperClient.convert(b.getBlockToken())).setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
use of org.apache.hadoop.hdfs.protocol.LocatedStripedBlock in project hadoop by apache.
the class BlockManager method setBlockToken.
/** Generate a block token for the located block. */
public void setBlockToken(final LocatedBlock b, final AccessMode mode) throws IOException {
if (isBlockTokenEnabled()) {
// Use cached UGI if serving RPC calls.
if (b.isStriped()) {
Preconditions.checkState(b instanceof LocatedStripedBlock);
LocatedStripedBlock sb = (LocatedStripedBlock) b;
byte[] indices = sb.getBlockIndices();
Token<BlockTokenIdentifier>[] blockTokens = new Token[indices.length];
ExtendedBlock internalBlock = new ExtendedBlock(b.getBlock());
for (int i = 0; i < indices.length; i++) {
internalBlock.setBlockId(b.getBlock().getBlockId() + indices[i]);
blockTokens[i] = blockTokenSecretManager.generateToken(NameNode.getRemoteUser().getShortUserName(), internalBlock, EnumSet.of(mode));
}
sb.setBlockTokens(blockTokens);
} else {
b.setBlockToken(blockTokenSecretManager.generateToken(NameNode.getRemoteUser().getShortUserName(), b.getBlock(), EnumSet.of(mode)));
}
}
}
Aggregations