Search in sources :

Example 6 with ExportedBlockKeys

use of org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys in project hadoop by apache.

the class DataNode method registerBlockPoolWithSecretManager.

/**
   * After the block pool has contacted the NN, registers that block pool
   * with the secret manager, updating it with the secrets provided by the NN.
   * @throws IOException on error
   */
private synchronized void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistration, String blockPoolId) throws IOException {
    ExportedBlockKeys keys = bpRegistration.getExportedKeys();
    if (!hasAnyBlockPoolRegistered) {
        hasAnyBlockPoolRegistered = true;
        isBlockTokenEnabled = keys.isBlockTokenEnabled();
    } else {
        if (isBlockTokenEnabled != keys.isBlockTokenEnabled()) {
            throw new RuntimeException("Inconsistent configuration of block access" + " tokens. Either all block pools must be configured to use block" + " tokens, or none may be.");
        }
    }
    if (!isBlockTokenEnabled)
        return;
    if (!blockPoolTokenSecretManager.isBlockPoolRegistered(blockPoolId)) {
        long blockKeyUpdateInterval = keys.getKeyUpdateInterval();
        long blockTokenLifetime = keys.getTokenLifetime();
        LOG.info("Block token params received from NN: for block pool " + blockPoolId + " keyUpdateInterval=" + blockKeyUpdateInterval / (60 * 1000) + " min(s), tokenLifetime=" + blockTokenLifetime / (60 * 1000) + " min(s)");
        final boolean enableProtobuf = getConf().getBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE, DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_PROTOBUF_ENABLE_DEFAULT);
        final BlockTokenSecretManager secretMgr = new BlockTokenSecretManager(0, blockTokenLifetime, blockPoolId, dnConf.encryptionAlgorithm, enableProtobuf);
        blockPoolTokenSecretManager.addBlockPool(blockPoolId, secretMgr);
    }
}
Also used : ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) BlockTokenSecretManager(org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)

Example 7 with ExportedBlockKeys

use of org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys in project hadoop by apache.

the class TestComputeInvalidateWork method testDatanodeReRegistration.

@Test(timeout = 12000)
public void testDatanodeReRegistration() throws Exception {
    // Create a test file
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final Path path = new Path("/testRR");
    // Create a file and shutdown the DNs, which populates InvalidateBlocks
    DFSTestUtil.createFile(dfs, path, dfs.getDefaultBlockSize(), (short) NUM_OF_DATANODES, 0xED0ED0);
    DFSTestUtil.waitForReplication(dfs, path, (short) NUM_OF_DATANODES, 12000);
    for (DataNode dn : cluster.getDataNodes()) {
        dn.shutdown();
    }
    dfs.delete(path, false);
    namesystem.writeLock();
    InvalidateBlocks invalidateBlocks;
    int expected = NUM_OF_DATANODES;
    try {
        invalidateBlocks = (InvalidateBlocks) Whitebox.getInternalState(cluster.getNamesystem().getBlockManager(), "invalidateBlocks");
        assertEquals("Expected invalidate blocks to be the number of DNs", (long) expected, invalidateBlocks.numBlocks());
    } finally {
        namesystem.writeUnlock();
    }
    // Re-register each DN and see that it wipes the invalidation work
    for (DataNode dn : cluster.getDataNodes()) {
        DatanodeID did = dn.getDatanodeId();
        DatanodeRegistration reg = new DatanodeRegistration(new DatanodeID(UUID.randomUUID().toString(), did), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), VersionInfo.getVersion());
        namesystem.writeLock();
        try {
            bm.getDatanodeManager().registerDatanode(reg);
            expected--;
            assertEquals("Expected number of invalidate blocks to decrease", (long) expected, invalidateBlocks.numBlocks());
        } finally {
            namesystem.writeUnlock();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) Test(org.junit.Test)

Aggregations

ExportedBlockKeys (org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)7 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)4 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 Test (org.junit.Test)4 BlockKey (org.apache.hadoop.hdfs.security.token.block.BlockKey)2 ServiceException (com.google.protobuf.ServiceException)1 IOException (java.io.IOException)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 DatanodeRegistrationProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)1 ExportedBlockKeysProto (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto)1 GetBlockKeysResponseProto (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto)1 BlockTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)1 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)1