use of org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys in project hadoop by apache.
the class DataNode method registerBlockPoolWithSecretManager.
/**
* After the block pool has contacted the NN, registers that block pool
* with the secret manager, updating it with the secrets provided by the NN.
* @throws IOException on error
*/
private synchronized void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistration, String blockPoolId) throws IOException {
ExportedBlockKeys keys = bpRegistration.getExportedKeys();
if (!hasAnyBlockPoolRegistered) {
hasAnyBlockPoolRegistered = true;
isBlockTokenEnabled = keys.isBlockTokenEnabled();
} else {
if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
throw new RuntimeException("Inconsistent configuration of block access" + " tokens. Either all block pools must be configured to use block" + " tokens, or none may be.");
}
}
if (!isBlockTokenEnabled)
return;
if (!blockPoolTokenSecretManager.isBlockPoolRegistered(blockPoolId)) {
long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
long blockTokenLifetime = keys.getTokenLifetime();
LOG.info("Block token params received from NN: for block pool " + blockPoolId + " keyUpdateInterval=" + blockKeyUpdateInterval / (60 * 1000) + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000) + " min(s)");
final boolean enableProtobuf = getConf().getBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE, DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE_DEFAULT);
final BlockTokenSecretManager secretMgr = new BlockTokenSecretManager(0, blockTokenLifetime, blockPoolId, dnConf.encryptionAlgorithm, enableProtobuf);
blockPoolTokenSecretManager.addBlockPool(blockPoolId, secretMgr);
}
}
use of org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys in project hadoop by apache.
the class TestComputeInvalidateWork method testDatanodeReRegistration.
@Test(timeout = 12000)
public void testDatanodeReRegistration() throws Exception {
// Create a test file
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path path = new Path("/testRR");
// Create a file and shutdown the DNs, which populates InvalidateBlocks
DFSTestUtil.createFile(dfs, path, dfs.getDefaultBlockSize(), (short) NUM_OF_DATANODES, 0xED0ED0);
DFSTestUtil.waitForReplication(dfs, path, (short) NUM_OF_DATANODES, 12000);
for (DataNode dn : cluster.getDataNodes()) {
dn.shutdown();
}
dfs.delete(path, false);
namesystem.writeLock();
InvalidateBlocks invalidateBlocks;
int expected = NUM_OF_DATANODES;
try {
invalidateBlocks = (InvalidateBlocks) Whitebox.getInternalState(cluster.getNamesystem().getBlockManager(), "invalidateBlocks");
assertEquals("Expected invalidate blocks to be the number of DNs", (long) expected, invalidateBlocks.numBlocks());
} finally {
namesystem.writeUnlock();
}
// Re-register each DN and see that it wipes the invalidation work
for (DataNode dn : cluster.getDataNodes()) {
DatanodeID did = dn.getDatanodeId();
DatanodeRegistration reg = new DatanodeRegistration(new DatanodeID(UUID.randomUUID().toString(), did), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion());
namesystem.writeLock();
try {
bm.getDatanodeManager().registerDatanode(reg);
expected--;
assertEquals("Expected number of invalidate blocks to decrease", (long) expected, invalidateBlocks.numBlocks());
} finally {
namesystem.writeUnlock();
}
}
}
Aggregations