Search in sources :

Example 21 with BlockTokenIdentifier

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.

the class TestBlockTokenWithDFS method tryRead.

// try reading a block using a BlockReader directly
protected void tryRead(final Configuration conf, LocatedBlock lblock, boolean shouldSucceed) {
    InetSocketAddress targetAddr = null;
    IOException ioe = null;
    BlockReader blockReader = null;
    ExtendedBlock block = lblock.getBlock();
    try {
        DatanodeInfo[] nodes = lblock.getLocations();
        targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
        blockReader = new BlockReaderFactory(new DfsClientConf(conf)).setFileName(BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId())).setBlock(block).setBlockToken(lblock.getBlockToken()).setInetSocketAddress(targetAddr).setStartOffset(0).setLength(0).setVerifyChecksum(true).setClientName("TestBlockTokenWithDFS").setDatanodeInfo(nodes[0]).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setClientCacheContext(ClientContext.getFromConf(conf)).setConfiguration(conf).setTracer(FsTracer.get(conf)).setRemotePeerFactory(new RemotePeerFactory() {

            @Override
            public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
                Peer peer = null;
                Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
                try {
                    sock.connect(addr, HdfsConstants.READ_TIMEOUT);
                    sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
                    peer = DFSUtilClient.peerFromSocket(sock);
                } finally {
                    if (peer == null) {
                        IOUtils.closeSocket(sock);
                    }
                }
                return peer;
            }
        }).build();
    } catch (IOException ex) {
        ioe = ex;
    } finally {
        if (blockReader != null) {
            try {
                blockReader.close();
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }
    }
    if (shouldSucceed) {
        Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, " + "when it is expected to be valid", blockReader);
    } else {
        Assert.assertNotNull("OP_READ_BLOCK: access token is valid, " + "when it is expected to be invalid", ioe);
        Assert.assertTrue("OP_READ_BLOCK failed due to reasons other than access token: ", ioe instanceof InvalidBlockTokenException);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) InetSocketAddress(java.net.InetSocketAddress) BlockReader(org.apache.hadoop.hdfs.BlockReader) Peer(org.apache.hadoop.hdfs.net.Peer) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) DfsClientConf(org.apache.hadoop.hdfs.client.impl.DfsClientConf) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) BlockReaderFactory(org.apache.hadoop.hdfs.client.impl.BlockReaderFactory) RemotePeerFactory(org.apache.hadoop.hdfs.RemotePeerFactory) Socket(java.net.Socket)

Example 22 with BlockTokenIdentifier

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.

the class TestFailoverWithBlockTokensEnabled method ensureInvalidBlockTokensAreRejected.

@Test
public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException {
    cluster.transitionToActive(0);
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA);
    assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
    DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
    DFSClient spyDfsClient = Mockito.spy(dfsClient);
    Mockito.doAnswer(new Answer<LocatedBlocks>() {

        @Override
        public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable {
            LocatedBlocks locatedBlocks = (LocatedBlocks) arg0.callRealMethod();
            for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
                Token<BlockTokenIdentifier> token = lb.getBlockToken();
                BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier();
                // This will make the token invalid, since the password
                // won't match anymore
                id.setExpiryDate(Time.now() + 10);
                Token<BlockTokenIdentifier> newToken = new Token<BlockTokenIdentifier>(id.getBytes(), token.getPassword(), token.getKind(), token.getService());
                lb.setBlockToken(newToken);
            }
            return locatedBlocks;
        }
    }).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(), Mockito.anyLong(), Mockito.anyLong());
    DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyDfsClient);
    try {
        assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
        fail("Shouldn't have been able to read a file with invalid block tokens");
    } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains("Could not obtain block", ioe);
    }
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Token(org.apache.hadoop.security.token.Token) IOException(java.io.IOException) Test(org.junit.Test)

Example 23 with BlockTokenIdentifier

use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.

the class TestShortCircuitLocalRead method testDeprecatedGetBlockLocalPathInfoRpc.

@Test(timeout = 60000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    try {
        DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
        LocatedBlocks lb = cluster.getNameNode().getRpcServer().getBlockLocations("/tmp/x", 0, 16);
        // Create a new block object, because the block inside LocatedBlock at
        // namenode is of type BlockInfo.
        ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
        Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
        final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
        ClientDatanodeProtocol proxy = DFSUtilClient.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
        try {
            proxy.getBlockLocalPathInfo(blk, token);
            Assert.fail("The call should have failed as this user " + " is not configured in " + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
        } catch (IOException ex) {
            Assert.assertTrue(ex.getMessage().contains("not configured in " + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
        }
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) Test(org.junit.Test)

Aggregations

BlockTokenIdentifier (org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier)23 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)10 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)7 Test (org.junit.Test)7 IOException (java.io.IOException)6 Path (org.apache.hadoop.fs.Path)5 DataInputStream (java.io.DataInputStream)4 InetSocketAddress (java.net.InetSocketAddress)4 Socket (java.net.Socket)4 HashMap (java.util.HashMap)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 FileSystem (org.apache.hadoop.fs.FileSystem)4 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 Token (org.apache.hadoop.security.token.Token)4 Configuration (org.apache.hadoop.conf.Configuration)3 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 BlockReaderFactory (org.apache.hadoop.hdfs.client.impl.BlockReaderFactory)3 Peer (org.apache.hadoop.hdfs.net.Peer)3