Search in sources :

Example 16 with BlockTokenIdentifier

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.

the class DataNode method checkBlockToken.

private void checkBlockToken(ExtendedBlock block, Token<BlockTokenIdentifier> token, AccessMode accessMode) throws IOException {
    if (isBlockTokenEnabled) {
        BlockTokenIdentifier id = new BlockTokenIdentifier();
        ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
        DataInputStream in = new DataInputStream(buf);
        id.readFields(in);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Got: " + id.toString());
        }
        blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode);
    }
}
Also used : BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) ByteArrayInputStream(java.io.ByteArrayInputStream) DataInputStream(java.io.DataInputStream)

Example 17 with BlockTokenIdentifier

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.

the class DFSTestUtil method transferRbw.

/** For {@link TestTransferRbw} */
public static BlockOpResponseProto transferRbw(final ExtendedBlock b, final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
    assertEquals(2, datanodes.length);
    final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
    try (Socket s = DataStreamer.createSocketForPipeline(datanodes[0], datanodes.length, dfsClient);
        DataOutputStream out = new DataOutputStream(new BufferedOutputStream(NetUtils.getOutputStream(s, writeTimeout), DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
        DataInputStream in = new DataInputStream(NetUtils.getInputStream(s))) {
        // send the request
        new Sender(out).transferBlock(b, new Token<BlockTokenIdentifier>(), dfsClient.clientName, new DatanodeInfo[] { datanodes[1] }, new StorageType[] { StorageType.DEFAULT });
        out.flush();
        return BlockOpResponseProto.parseDelimitedFrom(in);
    }
}
Also used : Sender(org.apache.hadoop.hdfs.protocol.datatransfer.Sender) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream) DataInputStream(java.io.DataInputStream) BufferedOutputStream(java.io.BufferedOutputStream) DomainSocket(org.apache.hadoop.net.unix.DomainSocket) Socket(java.net.Socket)

Example 18 with BlockTokenIdentifier

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.

the class TestDecommissionWithStriped method testDecommission.

private void testDecommission(int writeBytes, int storageCount, int decomNodeCount, String filename) throws IOException, Exception {
    Path ecFile = new Path(ecDir, filename);
    writeStripedFile(dfs, ecFile, writeBytes);
    List<DatanodeInfo> decommisionNodes = getDecommissionDatanode(dfs, ecFile, writeBytes, decomNodeCount);
    int deadDecomissioned = fsn.getNumDecomDeadDataNodes();
    int liveDecomissioned = fsn.getNumDecomLiveDataNodes();
    List<LocatedBlock> lbs = ((HdfsDataInputStream) dfs.open(ecFile)).getAllBlocks();
    // prepare expected block index and token list.
    List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
    List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList = new ArrayList<>();
    prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
    // Decommission node. Verify that node is decommissioned.
    decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
    assertEquals(deadDecomissioned, fsn.getNumDecomDeadDataNodes());
    assertEquals(liveDecomissioned + decommisionNodes.size(), fsn.getNumDecomLiveDataNodes());
    // Ensure decommissioned datanode is not automatically shutdown
    DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
    assertEquals("All datanodes must be alive", numDNs, client.datanodeReport(DatanodeReportType.LIVE).length);
    assertNull(checkFile(dfs, ecFile, storageCount, decommisionNodes, numDNs));
    StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes, null, blockGroupSize);
    assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
    cleanupFile(dfs, ecFile);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Example 19 with BlockTokenIdentifier

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.

the class TestBlockReaderLocalLegacy method testBlockReaderLocalLegacyWithAppend.

@Test(timeout = 20000)
public void testBlockReaderLocalLegacyWithAppend() throws Exception {
    final short REPL_FACTOR = 1;
    final HdfsConfiguration conf = getConfiguration(null);
    conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final Path path = new Path("/testBlockReaderLocalLegacy");
    DFSTestUtil.createFile(dfs, path, 10, REPL_FACTOR, 0);
    DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR);
    final ClientDatanodeProtocol proxy;
    final Token<BlockTokenIdentifier> token;
    final ExtendedBlock originalBlock;
    final long originalGS;
    {
        final LocatedBlock lb = cluster.getNameNode().getRpcServer().getBlockLocations(path.toString(), 0, 1).get(0);
        proxy = DFSUtilClient.createClientDatanodeProtocolProxy(lb.getLocations()[0], conf, 60000, false);
        token = lb.getBlockToken();
        // get block and generation stamp
        final ExtendedBlock blk = new ExtendedBlock(lb.getBlock());
        originalBlock = new ExtendedBlock(blk);
        originalGS = originalBlock.getGenerationStamp();
        // test getBlockLocalPathInfo
        final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(blk, token);
        Assert.assertEquals(originalGS, info.getBlock().getGenerationStamp());
    }
    {
        // append one byte
        FSDataOutputStream out = dfs.append(path);
        out.write(1);
        out.close();
    }
    {
        // get new generation stamp
        final LocatedBlock lb = cluster.getNameNode().getRpcServer().getBlockLocations(path.toString(), 0, 1).get(0);
        final long newGS = lb.getBlock().getGenerationStamp();
        Assert.assertTrue(newGS > originalGS);
        // getBlockLocalPathInfo using the original block.
        Assert.assertEquals(originalGS, originalBlock.getGenerationStamp());
        final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(originalBlock, token);
        Assert.assertEquals(newGS, info.getBlock().getGenerationStamp());
    }
    cluster.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 20 with BlockTokenIdentifier

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.

the class TestBlockTokenWithDFS method testAppend.

/**
   * testing that APPEND operation can handle token expiration when
   * re-establishing pipeline is needed
   */
@Test
public void testAppend() throws Exception {
    MiniDFSCluster cluster = null;
    int numDataNodes = 2;
    Configuration conf = getConf(numDataNodes);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        assertEquals(numDataNodes, cluster.getDataNodes().size());
        final NameNode nn = cluster.getNameNode();
        final BlockManager bm = nn.getNamesystem().getBlockManager();
        final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
        // set a short token lifetime (1 second)
        SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
        Path fileToAppend = new Path(FILE_TO_APPEND);
        FileSystem fs = cluster.getFileSystem();
        byte[] expected = generateBytes(FILE_SIZE);
        // write a one-byte file
        FSDataOutputStream stm = writeFile(fs, fileToAppend, (short) numDataNodes, BLOCK_SIZE);
        stm.write(expected, 0, 1);
        stm.close();
        // open the file again for append
        stm = fs.append(fileToAppend);
        int mid = expected.length - 1;
        stm.write(expected, 1, mid - 1);
        stm.hflush();
        /*
       * wait till token used in stm expires
       */
        Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
        while (!SecurityTestUtil.isBlockTokenExpired(token)) {
            try {
                Thread.sleep(10);
            } catch (InterruptedException ignored) {
            }
        }
        // remove a datanode to force re-establishing pipeline
        cluster.stopDataNode(0);
        // append the rest of the file
        stm.write(expected, mid, expected.length - mid);
        stm.close();
        // check if append is successful
        FSDataInputStream in5 = fs.open(fileToAppend);
        assertTrue(checkFile1(in5, expected));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) BlockTokenSecretManager(org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager) Test(org.junit.Test)

Aggregations

BlockTokenIdentifier (org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier)23 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)10 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 Test (org.junit.Test)7 IOException (java.io.IOException)6 Path (org.apache.hadoop.fs.Path)5 DataInputStream (java.io.DataInputStream)4 InetSocketAddress (java.net.InetSocketAddress)4 Socket (java.net.Socket)4 HashMap (java.util.HashMap)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 Token (org.apache.hadoop.security.token.Token)4 Configuration (org.apache.hadoop.conf.Configuration)3 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 BlockReaderFactory (org.apache.hadoop.hdfs.client.impl.BlockReaderFactory)3 Peer (org.apache.hadoop.hdfs.net.Peer)3