use of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager in project hadoop by apache.
the class TestFailoverWithBlockTokensEnabled method setAndCheckSerialNumber.
private void setAndCheckSerialNumber(int serialNumber, BlockTokenSecretManager... btsms) {
for (BlockTokenSecretManager btsm : btsms) {
btsm.setSerialNo(serialNumber);
}
for (int i = 0; i < btsms.length; i++) {
for (int j = 0; j < btsms.length; j++) {
if (j == i) {
continue;
}
int first = btsms[i].getSerialNoForTesting();
int second = btsms[j].getSerialNoForTesting();
assertFalse("Overlap found for set serial number (" + serialNumber + ") is " + i + ": " + first + " == " + j + ": " + second, first == second);
}
}
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager in project hadoop by apache.
the class TestFailoverWithBlockTokensEnabled method ensureSerialNumbersNeverOverlap.
@Test
public void ensureSerialNumbersNeverOverlap() {
BlockTokenSecretManager btsm1 = cluster.getNamesystem(0).getBlockManager().getBlockTokenSecretManager();
BlockTokenSecretManager btsm2 = cluster.getNamesystem(1).getBlockManager().getBlockTokenSecretManager();
BlockTokenSecretManager btsm3 = cluster.getNamesystem(2).getBlockManager().getBlockTokenSecretManager();
setAndCheckSerialNumber(0, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MAX_VALUE, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MIN_VALUE, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MAX_VALUE / 2, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MIN_VALUE / 2, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MAX_VALUE / 3, btsm1, btsm2, btsm3);
setAndCheckSerialNumber(Integer.MIN_VALUE / 3, btsm1, btsm2, btsm3);
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager in project hadoop by apache.
the class DataNode method registerBlockPoolWithSecretManager.
/**
* After the block pool has contacted the NN, registers that block pool
* with the secret manager, updating it with the secrets provided by the NN.
* @throws IOException on error
*/
private synchronized void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistration, String blockPoolId) throws IOException {
ExportedBlockKeys keys = bpRegistration.getExportedKeys();
if (!hasAnyBlockPoolRegistered) {
hasAnyBlockPoolRegistered = true;
isBlockTokenEnabled = keys.isBlockTokenEnabled();
} else {
if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
throw new RuntimeException("Inconsistent configuration of block access" + " tokens. Either all block pools must be configured to use block" + " tokens, or none may be.");
}
}
if (!isBlockTokenEnabled)
return;
if (!blockPoolTokenSecretManager.isBlockPoolRegistered(blockPoolId)) {
long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
long blockTokenLifetime = keys.getTokenLifetime();
LOG.info("Block token params received from NN: for block pool " + blockPoolId + " keyUpdateInterval=" + blockKeyUpdateInterval / (60 * 1000) + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000) + " min(s)");
final boolean enableProtobuf = getConf().getBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE, DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE_DEFAULT);
final BlockTokenSecretManager secretMgr = new BlockTokenSecretManager(0, blockTokenLifetime, blockPoolId, dnConf.encryptionAlgorithm, enableProtobuf);
blockPoolTokenSecretManager.addBlockPool(blockPoolId, secretMgr);
}
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager in project hadoop by apache.
the class TestEncryptedTransfer method testLongLivedClient.
@Test
public void testLongLivedClient() throws IOException, InterruptedException {
FileChecksum checksum = writeUnencryptedAndThenRestartEncryptedCluster();
BlockTokenSecretManager btsm = cluster.getNamesystem().getBlockManager().getBlockTokenSecretManager();
btsm.setKeyUpdateIntervalForTesting(2 * 1000);
btsm.setTokenLifetime(2 * 1000);
btsm.clearAllKeysForTesting();
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
// Sleep for 15 seconds, after which the encryption key will no longer be
// valid. It needs to be a few multiples of the block token lifetime,
// since several block tokens are valid at any given time (the current
// and the last two, by default.)
LOG.info("Sleeping so that encryption keys expire...");
Thread.sleep(15 * 1000);
LOG.info("Done sleeping.");
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
assertEquals(checksum, fs.getFileChecksum(TEST_PATH));
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager in project hadoop by apache.
the class TestEncryptedTransfer method testLongLivedClientPipelineRecovery.
@Test
public void testLongLivedClientPipelineRecovery() throws IOException, InterruptedException, TimeoutException {
if (resolverClazz != null) {
// TestTrustedChannelResolver does not use encryption keys.
return;
}
// use 4 datanodes to make sure that after 1 data node is stopped,
// client only retries establishing pipeline with the 4th node.
int numDataNodes = 4;
// do not consider load factor when selecting a data node
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY, false);
setEncryptionConfigKeys();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
fs = getFileSystem(conf);
DFSClient client = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
DFSClient spyClient = Mockito.spy(client);
DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyClient);
writeTestDataToFile(fs);
BlockTokenSecretManager btsm = cluster.getNamesystem().getBlockManager().getBlockTokenSecretManager();
// Reduce key update interval and token life for testing.
btsm.setKeyUpdateIntervalForTesting(2 * 1000);
btsm.setTokenLifetime(2 * 1000);
btsm.clearAllKeysForTesting();
// Wait until the encryption key becomes invalid.
LOG.info("Wait until encryption keys become invalid...");
DataEncryptionKey encryptionKey = spyClient.getEncryptionKey();
List<DataNode> dataNodes = cluster.getDataNodes();
for (DataNode dn : dataNodes) {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return !dn.getBlockPoolTokenSecretManager().get(encryptionKey.blockPoolId).hasKey(encryptionKey.keyId);
}
}, 100, 30 * 1000);
}
LOG.info("The encryption key is invalid on all nodes now.");
try (FSDataOutputStream out = fs.append(TEST_PATH)) {
DFSOutputStream dfstream = (DFSOutputStream) out.getWrappedStream();
// shut down the first datanode in the pipeline.
DatanodeInfo[] targets = dfstream.getPipeline();
cluster.stopDataNode(targets[0].getXferAddr());
// write data to induce pipeline recovery
out.write(PLAIN_TEXT.getBytes());
out.hflush();
assertFalse("The first datanode in the pipeline was not replaced.", Arrays.asList(dfstream.getPipeline()).contains(targets[0]));
}
// verify that InvalidEncryptionKeyException is handled properly
Mockito.verify(spyClient, times(1)).clearDataEncryptionKey();
}
Aggregations