Search in sources :

Example 1 with BlockTokenSecretManager

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager in project hadoop by apache.

the class TestDFSStripedOutputStreamWithFailure method runTest.

/**
   * runTest implementation.
   * @param length file length
   * @param killPos killing positions in ascending order
   * @param dnIndex DN index to kill when meets killing positions
   * @param tokenExpire wait token to expire when kill a DN
   * @throws Exception
   */
private void runTest(final int length, final int[] killPos, final int[] dnIndex, final boolean tokenExpire) throws Exception {
    if (killPos[0] <= FLUSH_POS) {
        LOG.warn("killPos=" + Arrays.toString(killPos) + " <= FLUSH_POS=" + FLUSH_POS + ", length=" + length + ", dnIndex=" + Arrays.toString(dnIndex));
        //skip test
        return;
    }
    Preconditions.checkArgument(length > killPos[0], "length=%s <= killPos=%s", length, killPos);
    Preconditions.checkArgument(killPos.length == dnIndex.length);
    final Path p = new Path(dir, "dn" + Arrays.toString(dnIndex) + "len" + length + "kill" + Arrays.toString(killPos));
    final String fullPath = p.toString();
    LOG.info("fullPath=" + fullPath);
    if (tokenExpire) {
        final NameNode nn = cluster.getNameNode();
        final BlockManager bm = nn.getNamesystem().getBlockManager();
        final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
        // set a short token lifetime (1 second)
        SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
    }
    final AtomicInteger pos = new AtomicInteger();
    final FSDataOutputStream out = dfs.create(p);
    final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out.getWrappedStream();
    // first GS of this block group which never proceeds blockRecovery
    long firstGS = -1;
    // the old GS before bumping
    long oldGS = -1;
    List<Long> gsList = new ArrayList<>();
    final List<DatanodeInfo> killedDN = new ArrayList<>();
    int numKilled = 0;
    for (; pos.get() < length; ) {
        final int i = pos.getAndIncrement();
        if (numKilled < killPos.length && i == killPos[numKilled]) {
            assertTrue(firstGS != -1);
            final long gs = getGenerationStamp(stripedOut);
            if (numKilled == 0) {
                assertEquals(firstGS, gs);
            } else {
                //TODO: implement hflush/hsync and verify gs strict greater than oldGS
                assertTrue(gs >= oldGS);
            }
            oldGS = gs;
            if (tokenExpire) {
                DFSTestUtil.flushInternal(stripedOut);
                waitTokenExpires(out);
            }
            killedDN.add(killDatanode(cluster, stripedOut, dnIndex[numKilled], pos));
            numKilled++;
        }
        write(out, i);
        if (i % blockGroupSize == FLUSH_POS) {
            firstGS = getGenerationStamp(stripedOut);
            oldGS = firstGS;
        }
        if (i > 0 && (i + 1) % blockGroupSize == 0) {
            gsList.add(oldGS);
        }
    }
    gsList.add(oldGS);
    out.close();
    assertEquals(dnIndex.length, numKilled);
    StripedFileTestUtil.waitBlockGroupsReported(dfs, fullPath, numKilled);
    cluster.triggerBlockReports();
    StripedFileTestUtil.checkData(dfs, p, length, killedDN, gsList, blockGroupSize);
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) BlockTokenSecretManager(org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)

Example 2 with BlockTokenSecretManager

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager in project hadoop by apache.

the class TestBlockTokenWithDFS method testWrite.

/**
   * testing that WRITE operation can handle token expiration when
   * re-establishing pipeline is needed
   */
@Test
public void testWrite() throws Exception {
    MiniDFSCluster cluster = null;
    int numDataNodes = 2;
    Configuration conf = getConf(numDataNodes);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        assertEquals(numDataNodes, cluster.getDataNodes().size());
        final NameNode nn = cluster.getNameNode();
        final BlockManager bm = nn.getNamesystem().getBlockManager();
        final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
        // set a short token lifetime (1 second)
        SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
        Path fileToWrite = new Path(FILE_TO_WRITE);
        FileSystem fs = cluster.getFileSystem();
        byte[] expected = generateBytes(FILE_SIZE);
        FSDataOutputStream stm = writeFile(fs, fileToWrite, (short) numDataNodes, BLOCK_SIZE);
        // write a partial block
        int mid = expected.length - 1;
        stm.write(expected, 0, mid);
        stm.hflush();
        /*
       * wait till token used in stm expires
       */
        Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
        while (!SecurityTestUtil.isBlockTokenExpired(token)) {
            try {
                Thread.sleep(10);
            } catch (InterruptedException ignored) {
            }
        }
        // remove a datanode to force re-establishing pipeline
        cluster.stopDataNode(0);
        // write the rest of the file
        stm.write(expected, mid, expected.length - mid);
        stm.close();
        // check if write is successful
        FSDataInputStream in4 = fs.open(fileToWrite);
        assertTrue(checkFile1(in4, expected));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) BlockTokenSecretManager(org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager) Test(org.junit.Test)

Example 3 with BlockTokenSecretManager

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager in project hadoop by apache.

the class TestBlockTokenWithDFS method doTestRead.

protected void doTestRead(Configuration conf, MiniDFSCluster cluster, boolean isStriped) throws Exception {
    final int numDataNodes = cluster.getDataNodes().size();
    final NameNode nn = cluster.getNameNode();
    final NamenodeProtocols nnProto = nn.getRpcServer();
    final BlockManager bm = nn.getNamesystem().getBlockManager();
    final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
    // set a short token lifetime (1 second) initially
    SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
    Path fileToRead = new Path(FILE_TO_READ);
    FileSystem fs = cluster.getFileSystem();
    byte[] expected = generateBytes(FILE_SIZE);
    createFile(fs, fileToRead, expected);
    /*
       * setup for testing expiration handling of cached tokens
       */
    // read using blockSeekTo(). Acquired tokens are cached in in1
    FSDataInputStream in1 = fs.open(fileToRead);
    assertTrue(checkFile1(in1, expected));
    // read using blockSeekTo(). Acquired tokens are cached in in2
    FSDataInputStream in2 = fs.open(fileToRead);
    assertTrue(checkFile1(in2, expected));
    // read using fetchBlockByteRange(). Acquired tokens are cached in in3
    FSDataInputStream in3 = fs.open(fileToRead);
    assertTrue(checkFile2(in3, expected));
    /*
       * testing READ interface on DN using a BlockReader
       */
    DFSClient client = null;
    try {
        client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    } finally {
        if (client != null)
            client.close();
    }
    List<LocatedBlock> locatedBlocks = nnProto.getBlockLocations(FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
    // first block
    LocatedBlock lblock = locatedBlocks.get(0);
    // verify token is not expired
    assertFalse(isBlockTokenExpired(lblock));
    // read with valid token, should succeed
    tryRead(conf, lblock, true);
    while (!isBlockTokenExpired(lblock)) {
        try {
            Thread.sleep(10);
        } catch (InterruptedException ignored) {
        }
    }
    /*
       * continue testing READ interface on DN using a BlockReader
       */
    // verify token is expired
    assertTrue(isBlockTokenExpired(lblock));
    // read should fail
    tryRead(conf, lblock, false);
    // use a valid new token
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
    // read should succeed
    tryRead(conf, lblock, true);
    // use a token with wrong blockID
    long rightId = lblock.getBlock().getBlockId();
    long wrongId = rightId + 1;
    lblock.getBlock().setBlockId(wrongId);
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
    lblock.getBlock().setBlockId(rightId);
    // read should fail
    tryRead(conf, lblock, false);
    // use a token with wrong access modes
    bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.WRITE);
    // read should fail
    tryRead(conf, lblock, false);
    // set a long token lifetime for future tokens
    SecurityTestUtil.setBlockTokenLifetime(sm, 600 * 1000L);
    /*
       * testing that when cached tokens are expired, DFSClient will re-fetch
       * tokens transparently for READ.
       */
    // confirm all tokens cached in in1 are expired by now
    List<LocatedBlock> lblocks = DFSTestUtil.getAllBlocks(in1);
    for (LocatedBlock blk : lblocks) {
        assertTrue(isBlockTokenExpired(blk));
    }
    // verify blockSeekTo() is able to re-fetch token transparently
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    // confirm all tokens cached in in2 are expired by now
    List<LocatedBlock> lblocks2 = DFSTestUtil.getAllBlocks(in2);
    for (LocatedBlock blk : lblocks2) {
        assertTrue(isBlockTokenExpired(blk));
    }
    // via another interface method)
    if (isStriped) {
        // striped block doesn't support seekToNewSource
        in2.seek(0);
    } else {
        assertTrue(in2.seekToNewSource(0));
    }
    assertTrue(checkFile1(in2, expected));
    // confirm all tokens cached in in3 are expired by now
    List<LocatedBlock> lblocks3 = DFSTestUtil.getAllBlocks(in3);
    for (LocatedBlock blk : lblocks3) {
        assertTrue(isBlockTokenExpired(blk));
    }
    // verify fetchBlockByteRange() is able to re-fetch token transparently
    assertTrue(checkFile2(in3, expected));
    /*
       * testing that after datanodes are restarted on the same ports, cached
       * tokens should still work and there is no need to fetch new tokens from
       * namenode. This test should run while namenode is down (to make sure no
       * new tokens can be fetched from namenode).
       */
    // restart datanodes on the same ports that they currently use
    assertTrue(cluster.restartDataNodes(true));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());
    cluster.shutdownNameNode(0);
    // confirm tokens cached in in1 are still valid
    lblocks = DFSTestUtil.getAllBlocks(in1);
    for (LocatedBlock blk : lblocks) {
        assertFalse(isBlockTokenExpired(blk));
    }
    // verify blockSeekTo() still works (forced to use cached tokens)
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    // confirm tokens cached in in2 are still valid
    lblocks2 = DFSTestUtil.getAllBlocks(in2);
    for (LocatedBlock blk : lblocks2) {
        assertFalse(isBlockTokenExpired(blk));
    }
    // verify blockSeekTo() still works (forced to use cached tokens)
    if (isStriped) {
        in2.seek(0);
    } else {
        in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    // confirm tokens cached in in3 are still valid
    lblocks3 = DFSTestUtil.getAllBlocks(in3);
    for (LocatedBlock blk : lblocks3) {
        assertFalse(isBlockTokenExpired(blk));
    }
    // verify fetchBlockByteRange() still works (forced to use cached tokens)
    assertTrue(checkFile2(in3, expected));
    /*
       * testing that when namenode is restarted, cached tokens should still
       * work and there is no need to fetch new tokens from namenode. Like the
       * previous test, this test should also run while namenode is down. The
       * setup for this test depends on the previous test.
       */
    // restart the namenode and then shut it down for test
    cluster.restartNameNode(0);
    cluster.shutdownNameNode(0);
    // verify blockSeekTo() still works (forced to use cached tokens)
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    // verify again blockSeekTo() still works (forced to use cached tokens)
    if (isStriped) {
        in2.seek(0);
    } else {
        in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    // verify fetchBlockByteRange() still works (forced to use cached tokens)
    assertTrue(checkFile2(in3, expected));
    /*
       * testing that after both namenode and datanodes got restarted (namenode
       * first, followed by datanodes), DFSClient can't access DN without
       * re-fetching tokens and is able to re-fetch tokens transparently. The
       * setup of this test depends on the previous test.
       */
    // restore the cluster and restart the datanodes for test
    cluster.restartNameNode(0);
    assertTrue(cluster.restartDataNodes(true));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());
    // shutdown namenode so that DFSClient can't get new tokens from namenode
    cluster.shutdownNameNode(0);
    // verify blockSeekTo() fails (cached tokens become invalid)
    in1.seek(0);
    assertFalse(checkFile1(in1, expected));
    // verify fetchBlockByteRange() fails (cached tokens become invalid)
    assertFalse(checkFile2(in3, expected));
    // restart the namenode to allow DFSClient to re-fetch tokens
    cluster.restartNameNode(0);
    // verify blockSeekTo() works again (by transparently re-fetching
    // tokens from namenode)
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    if (isStriped) {
        in2.seek(0);
    } else {
        in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    // verify fetchBlockByteRange() works again (by transparently
    // re-fetching tokens from namenode)
    assertTrue(checkFile2(in3, expected));
    /*
       * testing that when datanodes are restarted on different ports, DFSClient
       * is able to re-fetch tokens transparently to connect to them
       */
    // restart datanodes on newly assigned ports
    assertTrue(cluster.restartDataNodes(false));
    cluster.waitActive();
    assertEquals(numDataNodes, cluster.getDataNodes().size());
    // verify blockSeekTo() is able to re-fetch token transparently
    in1.seek(0);
    assertTrue(checkFile1(in1, expected));
    // verify blockSeekTo() is able to re-fetch token transparently
    if (isStriped) {
        in2.seek(0);
    } else {
        in2.seekToNewSource(0);
    }
    assertTrue(checkFile1(in2, expected));
    // verify fetchBlockByteRange() is able to re-fetch token transparently
    assertTrue(checkFile2(in3, expected));
}
Also used : Path(org.apache.hadoop.fs.Path) DFSClient(org.apache.hadoop.hdfs.DFSClient) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) InetSocketAddress(java.net.InetSocketAddress) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) BlockTokenSecretManager(org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)

Example 4 with BlockTokenSecretManager

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager in project hadoop by apache.

the class TestFailoverWithBlockTokensEnabled method lowerKeyUpdateIntervalAndClearKeys.

private static void lowerKeyUpdateIntervalAndClearKeys(FSNamesystem namesystem) {
    BlockTokenSecretManager btsm = namesystem.getBlockManager().getBlockTokenSecretManager();
    btsm.setKeyUpdateIntervalForTesting(2 * 1000);
    btsm.setTokenLifetime(2 * 1000);
    btsm.clearAllKeysForTesting();
}
Also used : BlockTokenSecretManager(org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)

Example 5 with BlockTokenSecretManager

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager in project hadoop by apache.

the class TestFailoverWithBlockTokensEnabled method setAndCheckSerialNumber.

private void setAndCheckSerialNumber(int serialNumber, BlockTokenSecretManager... btsms) {
    for (BlockTokenSecretManager btsm : btsms) {
        btsm.setSerialNo(serialNumber);
    }
    for (int i = 0; i < btsms.length; i++) {
        for (int j = 0; j < btsms.length; j++) {
            if (j == i) {
                continue;
            }
            int first = btsms[i].getSerialNoForTesting();
            int second = btsms[j].getSerialNoForTesting();
            assertFalse("Overlap found for set serial number (" + serialNumber + ") is " + i + ": " + first + " == " + j + ": " + second, first == second);
        }
    }
}
Also used : BlockTokenSecretManager(org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)

Aggregations

BlockTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)10 Test (org.junit.Test)5 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Path (org.apache.hadoop.fs.Path)4 NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)4 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 Configuration (org.apache.hadoop.conf.Configuration)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 BlockTokenIdentifier (org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier)2 InetSocketAddress (java.net.InetSocketAddress)1 ArrayList (java.util.ArrayList)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 FileChecksum (org.apache.hadoop.fs.FileChecksum)1 DFSClient (org.apache.hadoop.hdfs.DFSClient)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 DataEncryptionKey (org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey)1 ExportedBlockKeys (org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)1 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)1