use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestDecommissionWithStriped method prepareBlockIndexAndTokenList.
private void prepareBlockIndexAndTokenList(List<LocatedBlock> lbs, List<HashMap<DatanodeInfo, Byte>> locToIndexList, List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
for (LocatedBlock lb : lbs) {
HashMap<DatanodeInfo, Byte> locToIndex = new HashMap<DatanodeInfo, Byte>();
locToIndexList.add(locToIndex);
HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken = new HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>();
locToTokenList.add(locToToken);
DatanodeInfo[] di = lb.getLocations();
LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
for (int i = 0; i < di.length; i++) {
locToIndex.put(di[i], stripedBlk.getBlockIndices()[i]);
locToToken.put(di[i], stripedBlk.getBlockTokens()[i]);
}
}
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestSortLocatedStripedBlock method prepareBlockIndexAndTokenList.
private void prepareBlockIndexAndTokenList(List<LocatedBlock> lbs, List<HashMap<DatanodeInfo, Byte>> locToIndexList, List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
for (LocatedBlock lb : lbs) {
HashMap<DatanodeInfo, Byte> locToIndex = new HashMap<DatanodeInfo, Byte>();
locToIndexList.add(locToIndex);
HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken = new HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>();
locToTokenList.add(locToToken);
DatanodeInfo[] di = lb.getLocations();
LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
for (int i = 0; i < di.length; i++) {
locToIndex.put(di[i], stripedBlk.getBlockIndices()[i]);
locToToken.put(di[i], stripedBlk.getBlockTokens()[i]);
}
}
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestSortLocatedStripedBlock method testWithMultipleInServiceAndDecommnDatanodes.
/**
* Test to verify sorting with multiple in-service and decommissioned
* datanodes exists in storage lists.
*
* We have storage list, marked decommissioned internal blocks with a '
* d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13
* mapping to indices
* 0', 1', 2, 3, 4, 5, 6, 7', 8', 0, 1, 7, 8, 1
*
* Decommissioned node indices: 0', 1', 7', 8'
*
* Additional In-Service node d13 at the end, block index: 1
*
* So in the original list nodes d0, d1, d7, d8 are decommissioned state.
*
* After sorting the expected block indices list will be,
* 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 0', 1', 7', 8'
*
* After sorting the expected storage list will be,
* d9, d10, d2, d3, d4, d5, d6, d11, d12, d13, d0, d1, d7, d8.
*
* Note: after sorting block indices will not be in ascending order.
*/
@Test(timeout = 10000)
public void testWithMultipleInServiceAndDecommnDatanodes() {
LOG.info("Starting test testWithMultipleInServiceAndDecommnDatanodes");
// two located block groups
int lbsCount = 2;
List<Integer> decommnNodeIndices = new ArrayList<>();
decommnNodeIndices.add(0);
decommnNodeIndices.add(1);
decommnNodeIndices.add(7);
decommnNodeIndices.add(8);
List<Integer> targetNodeIndices = new ArrayList<>();
targetNodeIndices.addAll(decommnNodeIndices);
// at the end add an additional In-Service node to blockIndex=1
targetNodeIndices.add(1);
// map contains decommissioned node details in each located strip block
// which will be used for assertions
HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(lbsCount * decommnNodeIndices.size());
List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount, dataBlocks, parityBlocks, decommnNodeIndices, targetNodeIndices, decommissionedNodes);
List<DatanodeInfo> staleDns = new ArrayList<>();
for (LocatedBlock lb : lbs) {
DatanodeInfo[] locations = lb.getLocations();
DatanodeInfo staleDn = locations[locations.length - 1];
staleDn.setLastUpdateMonotonic(Time.monotonicNow() - (STALE_INTERVAL * 2));
staleDns.add(staleDn);
}
// prepare expected block index and token list.
List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList = new ArrayList<>();
prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
dm.sortLocatedBlocks(null, lbs);
assertDecommnNodePosition(groupSize + 1, decommissionedNodes, lbs);
assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
for (LocatedBlock lb : lbs) {
byte[] blockIndices = ((LocatedStripedBlock) lb).getBlockIndices();
// after sorting stale block index will be placed after normal nodes.
Assert.assertEquals("Failed to move stale node to bottom!", 1, blockIndices[9]);
DatanodeInfo[] locations = lb.getLocations();
// After sorting stale node d13 will be placed after normal nodes
Assert.assertEquals("Failed to move stale dn after normal one!", staleDns.remove(0), locations[9]);
}
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class PBHelperClient method convert.
public static BlockTokenSecretProto convert(BlockTokenIdentifier blockTokenSecret) {
BlockTokenSecretProto.Builder builder = BlockTokenSecretProto.newBuilder();
builder.setExpiryDate(blockTokenSecret.getExpiryDate());
builder.setKeyId(blockTokenSecret.getKeyId());
String userId = blockTokenSecret.getUserId();
if (userId != null) {
builder.setUserId(userId);
}
String blockPoolId = blockTokenSecret.getBlockPoolId();
if (blockPoolId != null) {
builder.setBlockPoolId(blockPoolId);
}
builder.setBlockId(blockTokenSecret.getBlockId());
for (BlockTokenIdentifier.AccessMode aMode : blockTokenSecret.getAccessModes()) {
builder.addModes(convert(aMode));
}
return builder.build();
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestBlockTokenWithDFS method testWrite.
/**
* testing that WRITE operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test
public void testWrite() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
Configuration conf = getConf(numDataNodes);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
final NameNode nn = cluster.getNameNode();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second)
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToWrite = new Path(FILE_TO_WRITE);
FileSystem fs = cluster.getFileSystem();
byte[] expected = generateBytes(FILE_SIZE);
FSDataOutputStream stm = writeFile(fs, fileToWrite, (short) numDataNodes, BLOCK_SIZE);
// write a partial block
int mid = expected.length - 1;
stm.write(expected, 0, mid);
stm.hflush();
/*
* wait till token used in stm expires
*/
Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
// remove a datanode to force re-establishing pipeline
cluster.stopDataNode(0);
// write the rest of the file
stm.write(expected, mid, expected.length - mid);
stm.close();
// check if write is successful
FSDataInputStream in4 = fs.open(fileToWrite);
assertTrue(checkFile1(in4, expected));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations