use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestBlockTokenWithDFS method tryRead.
// try reading a block using a BlockReader directly
protected void tryRead(final Configuration conf, LocatedBlock lblock, boolean shouldSucceed) {
InetSocketAddress targetAddr = null;
IOException ioe = null;
BlockReader blockReader = null;
ExtendedBlock block = lblock.getBlock();
try {
DatanodeInfo[] nodes = lblock.getLocations();
targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
blockReader = new BlockReaderFactory(new DfsClientConf(conf)).setFileName(BlockReaderFactory.getFileName(targetAddr, "test-blockpoolid", block.getBlockId())).setBlock(block).setBlockToken(lblock.getBlockToken()).setInetSocketAddress(targetAddr).setStartOffset(0).setLength(0).setVerifyChecksum(true).setClientName("TestBlockTokenWithDFS").setDatanodeInfo(nodes[0]).setCachingStrategy(CachingStrategy.newDefaultStrategy()).setClientCacheContext(ClientContext.getFromConf(conf)).setConfiguration(conf).setTracer(FsTracer.get(conf)).setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr, Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId) throws IOException {
Peer peer = null;
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
try {
sock.connect(addr, HdfsConstants.READ_TIMEOUT);
sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
peer = DFSUtilClient.peerFromSocket(sock);
} finally {
if (peer == null) {
IOUtils.closeSocket(sock);
}
}
return peer;
}
}).build();
} catch (IOException ex) {
ioe = ex;
} finally {
if (blockReader != null) {
try {
blockReader.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
if (shouldSucceed) {
Assert.assertNotNull("OP_READ_BLOCK: access token is invalid, " + "when it is expected to be valid", blockReader);
} else {
Assert.assertNotNull("OP_READ_BLOCK: access token is valid, " + "when it is expected to be invalid", ioe);
Assert.assertTrue("OP_READ_BLOCK failed due to reasons other than access token: ", ioe instanceof InvalidBlockTokenException);
}
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestFailoverWithBlockTokensEnabled method ensureInvalidBlockTokensAreRejected.
@Test
public void ensureInvalidBlockTokensAreRejected() throws IOException, URISyntaxException {
cluster.transitionToActive(0);
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
DFSTestUtil.writeFile(fs, TEST_PATH, TEST_DATA);
assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
DFSClient dfsClient = DFSClientAdapter.getDFSClient((DistributedFileSystem) fs);
DFSClient spyDfsClient = Mockito.spy(dfsClient);
Mockito.doAnswer(new Answer<LocatedBlocks>() {
@Override
public LocatedBlocks answer(InvocationOnMock arg0) throws Throwable {
LocatedBlocks locatedBlocks = (LocatedBlocks) arg0.callRealMethod();
for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
Token<BlockTokenIdentifier> token = lb.getBlockToken();
BlockTokenIdentifier id = lb.getBlockToken().decodeIdentifier();
// This will make the token invalid, since the password
// won't match anymore
id.setExpiryDate(Time.now() + 10);
Token<BlockTokenIdentifier> newToken = new Token<BlockTokenIdentifier>(id.getBytes(), token.getPassword(), token.getKind(), token.getService());
lb.setBlockToken(newToken);
}
return locatedBlocks;
}
}).when(spyDfsClient).getLocatedBlocks(Mockito.anyString(), Mockito.anyLong(), Mockito.anyLong());
DFSClientAdapter.setDFSClient((DistributedFileSystem) fs, spyDfsClient);
try {
assertEquals(TEST_DATA, DFSTestUtil.readFile(fs, TEST_PATH));
fail("Shouldn't have been able to read a file with invalid block tokens");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Could not obtain block", ioe);
}
}
use of org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier in project hadoop by apache.
the class TestShortCircuitLocalRead method testDeprecatedGetBlockLocalPathInfoRpc.
@Test(timeout = 60000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
try {
DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
LocatedBlocks lb = cluster.getNameNode().getRpcServer().getBlockLocations("/tmp/x", 0, 16);
// Create a new block object, because the block inside LocatedBlock at
// namenode is of type BlockInfo.
ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
ClientDatanodeProtocol proxy = DFSUtilClient.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
try {
proxy.getBlockLocalPathInfo(blk, token);
Assert.fail("The call should have failed as this user " + " is not configured in " + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
} catch (IOException ex) {
Assert.assertTrue(ex.getMessage().contains("not configured in " + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
}
} finally {
fs.close();
cluster.shutdown();
}
}
Aggregations