use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestBlockTokenWithDFS method doTestRead.
protected void doTestRead(Configuration conf, MiniDFSCluster cluster, boolean isStriped) throws Exception {
final int numDataNodes = cluster.getDataNodes().size();
final NameNode nn = cluster.getNameNode();
final NamenodeProtocols nnProto = nn.getRpcServer();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second) initially
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToRead = new Path(FILE_TO_READ);
FileSystem fs = cluster.getFileSystem();
byte[] expected = generateBytes(FILE_SIZE);
createFile(fs, fileToRead, expected);
/*
* setup for testing expiration handling of cached tokens
*/
// read using blockSeekTo(). Acquired tokens are cached in in1
FSDataInputStream in1 = fs.open(fileToRead);
assertTrue(checkFile1(in1, expected));
// read using blockSeekTo(). Acquired tokens are cached in in2
FSDataInputStream in2 = fs.open(fileToRead);
assertTrue(checkFile1(in2, expected));
// read using fetchBlockByteRange(). Acquired tokens are cached in in3
FSDataInputStream in3 = fs.open(fileToRead);
assertTrue(checkFile2(in3, expected));
/*
* testing READ interface on DN using a BlockReader
*/
DFSClient client = null;
try {
client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
} finally {
if (client != null)
client.close();
}
List<LocatedBlock> locatedBlocks = nnProto.getBlockLocations(FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
// first block
LocatedBlock lblock = locatedBlocks.get(0);
// verify token is not expired
assertFalse(isBlockTokenExpired(lblock));
// read with valid token, should succeed
tryRead(conf, lblock, true);
while (!isBlockTokenExpired(lblock)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
/*
* continue testing READ interface on DN using a BlockReader
*/
// verify token is expired
assertTrue(isBlockTokenExpired(lblock));
// read should fail
tryRead(conf, lblock, false);
// use a valid new token
bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
// read should succeed
tryRead(conf, lblock, true);
// use a token with wrong blockID
long rightId = lblock.getBlock().getBlockId();
long wrongId = rightId + 1;
lblock.getBlock().setBlockId(wrongId);
bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
lblock.getBlock().setBlockId(rightId);
// read should fail
tryRead(conf, lblock, false);
// use a token with wrong access modes
bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.WRITE);
// read should fail
tryRead(conf, lblock, false);
// set a long token lifetime for future tokens
SecurityTestUtil.setBlockTokenLifetime(sm, 600 * 1000L);
/*
* testing that when cached tokens are expired, DFSClient will re-fetch
* tokens transparently for READ.
*/
// confirm all tokens cached in in1 are expired by now
List<LocatedBlock> lblocks = DFSTestUtil.getAllBlocks(in1);
for (LocatedBlock blk : lblocks) {
assertTrue(isBlockTokenExpired(blk));
}
// verify blockSeekTo() is able to re-fetch token transparently
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// confirm all tokens cached in in2 are expired by now
List<LocatedBlock> lblocks2 = DFSTestUtil.getAllBlocks(in2);
for (LocatedBlock blk : lblocks2) {
assertTrue(isBlockTokenExpired(blk));
}
// via another interface method)
if (isStriped) {
// striped block doesn't support seekToNewSource
in2.seek(0);
} else {
assertTrue(in2.seekToNewSource(0));
}
assertTrue(checkFile1(in2, expected));
// confirm all tokens cached in in3 are expired by now
List<LocatedBlock> lblocks3 = DFSTestUtil.getAllBlocks(in3);
for (LocatedBlock blk : lblocks3) {
assertTrue(isBlockTokenExpired(blk));
}
// verify fetchBlockByteRange() is able to re-fetch token transparently
assertTrue(checkFile2(in3, expected));
/*
* testing that after datanodes are restarted on the same ports, cached
* tokens should still work and there is no need to fetch new tokens from
* namenode. This test should run while namenode is down (to make sure no
* new tokens can be fetched from namenode).
*/
// restart datanodes on the same ports that they currently use
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
// confirm tokens cached in in1 are still valid
lblocks = DFSTestUtil.getAllBlocks(in1);
for (LocatedBlock blk : lblocks) {
assertFalse(isBlockTokenExpired(blk));
}
// verify blockSeekTo() still works (forced to use cached tokens)
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// confirm tokens cached in in2 are still valid
lblocks2 = DFSTestUtil.getAllBlocks(in2);
for (LocatedBlock blk : lblocks2) {
assertFalse(isBlockTokenExpired(blk));
}
// verify blockSeekTo() still works (forced to use cached tokens)
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// confirm tokens cached in in3 are still valid
lblocks3 = DFSTestUtil.getAllBlocks(in3);
for (LocatedBlock blk : lblocks3) {
assertFalse(isBlockTokenExpired(blk));
}
// verify fetchBlockByteRange() still works (forced to use cached tokens)
assertTrue(checkFile2(in3, expected));
/*
* testing that when namenode is restarted, cached tokens should still
* work and there is no need to fetch new tokens from namenode. Like the
* previous test, this test should also run while namenode is down. The
* setup for this test depends on the previous test.
*/
// restart the namenode and then shut it down for test
cluster.restartNameNode(0);
cluster.shutdownNameNode(0);
// verify blockSeekTo() still works (forced to use cached tokens)
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// verify again blockSeekTo() still works (forced to use cached tokens)
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// verify fetchBlockByteRange() still works (forced to use cached tokens)
assertTrue(checkFile2(in3, expected));
/*
* testing that after both namenode and datanodes got restarted (namenode
* first, followed by datanodes), DFSClient can't access DN without
* re-fetching tokens and is able to re-fetch tokens transparently. The
* setup of this test depends on the previous test.
*/
// restore the cluster and restart the datanodes for test
cluster.restartNameNode(0);
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
// shutdown namenode so that DFSClient can't get new tokens from namenode
cluster.shutdownNameNode(0);
// verify blockSeekTo() fails (cached tokens become invalid)
in1.seek(0);
assertFalse(checkFile1(in1, expected));
// verify fetchBlockByteRange() fails (cached tokens become invalid)
assertFalse(checkFile2(in3, expected));
// restart the namenode to allow DFSClient to re-fetch tokens
cluster.restartNameNode(0);
// verify blockSeekTo() works again (by transparently re-fetching
// tokens from namenode)
in1.seek(0);
assertTrue(checkFile1(in1, expected));
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// verify fetchBlockByteRange() works again (by transparently
// re-fetching tokens from namenode)
assertTrue(checkFile2(in3, expected));
/*
* testing that when datanodes are restarted on different ports, DFSClient
* is able to re-fetch tokens transparently to connect to them
*/
// restart datanodes on newly assigned ports
assertTrue(cluster.restartDataNodes(false));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
// verify blockSeekTo() is able to re-fetch token transparently
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// verify blockSeekTo() is able to re-fetch token transparently
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// verify fetchBlockByteRange() is able to re-fetch token transparently
assertTrue(checkFile2(in3, expected));
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestCheckpoint method testCheckpointSignature.
/* Test case to test CheckpointSignature */
@Test
public void testCheckpointSignature() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
NameNode nn = cluster.getNameNode();
NamenodeProtocols nnRpc = nn.getRpcServer();
secondary = startSecondaryNameNode(conf);
// prepare checkpoint image
secondary.doCheckpoint();
CheckpointSignature sig = nnRpc.rollEditLog();
// manipulate the CheckpointSignature fields
sig.setBlockpoolID("somerandomebpid");
sig.clusterID = "somerandomcid";
try {
// this should fail
sig.validateStorageInfo(nn.getFSImage());
assertTrue("This test is expected to fail.", false);
} catch (Exception ignored) {
}
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestCheckpoint method testCheckpointWithFailedStorageDir.
/**
* Test that, if a storage directory is failed when a checkpoint occurs,
* the non-failed storage directory receives the checkpoint.
*/
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File currentDir = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN experiences failure of a volume -- fake by
// setting its current dir to a-x permissions
NamenodeProtocols nn = cluster.getNameNodeRpc();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0 = storage.getStorageDir(0);
StorageDirectory sd1 = storage.getStorageDir(1);
currentDir = sd0.getCurrentDir();
FileUtil.setExecutable(currentDir, false);
// Upload checkpoint when NN has a bad storage dir. This should
// succeed and create the checkpoint in the good dir.
secondary.doCheckpoint();
GenericTestUtils.assertExists(new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
// Restore the good dir
FileUtil.setExecutable(currentDir, true);
nn.restoreFailedStorage("true");
nn.rollEditLog();
// Checkpoint again -- this should upload to both dirs
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster, ImmutableList.of(8));
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
FileUtil.setExecutable(currentDir, true);
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestWebHdfsCreatePermissions method testPermissions.
private void testPermissions(int expectedResponse, String expectedPermission, String path, String... params) throws Exception {
final String user = System.getProperty("user.name");
final StringBuilder uri = new StringBuilder(cluster.getHttpUri(0));
uri.append("/webhdfs/v1").append(path).append("?user.name=").append(user).append("&");
for (String param : params) {
uri.append(param).append("&");
}
LOG.info(uri.toString());
try {
URL url = new URL(uri.toString());
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(expectedResponse, conn.getResponseCode());
NamenodeProtocols namenode = cluster.getNameNode().getRpcServer();
FsPermission resultingPermission = namenode.getFileInfo(path).getPermission();
Assert.assertEquals(expectedPermission, resultingPermission.toString());
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestUpdatePipelineWithSnapshots method testUpdatePipelineAfterDelete.
// Regression test for HDFS-6647.
@Test
public void testUpdatePipelineAfterDelete() throws Exception {
Configuration conf = new HdfsConfiguration();
Path file = new Path("/test-file");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
DFSOutputStream out = null;
try {
// Create a file and make sure a block is allocated for it.
out = (DFSOutputStream) (fs.create(file).getWrappedStream());
out.write(1);
out.hflush();
// Create a snapshot that includes the file.
SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs, new Path("/"), "s1");
// Grab the block info of this file for later use.
FSDataInputStream in = null;
ExtendedBlock oldBlock = null;
try {
in = fs.open(file);
oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
} finally {
IOUtils.closeStream(in);
}
// Allocate a new block ID/gen stamp so we can simulate pipeline
// recovery.
String clientName = ((DistributedFileSystem) fs).getClient().getClientName();
LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(oldBlock, clientName);
ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), oldBlock.getBlockId(), oldBlock.getNumBytes(), newLocatedBlock.getBlock().getGenerationStamp());
// Delete the file from the present FS. It will still exist the
// previously-created snapshot. This will log an OP_DELETE for the
// file in question.
fs.delete(file, true);
// logged for the file in question.
try {
namenode.updatePipeline(clientName, oldBlock, newBlock, newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
} catch (IOException ioe) {
// normal
assertExceptionContains("does not exist or it is not under construction", ioe);
}
// Make sure the NN can restart with the edit logs as we have them now.
cluster.restartNameNode(true);
} finally {
IOUtils.closeStream(out);
}
} finally {
cluster.shutdown();
}
}
Aggregations