use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestWebHDFS method testWebHdfsAppend.
@Test
public void testWebHdfsAppend() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
final int dnNumber = 3;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNumber).build();
final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final DistributedFileSystem fs = cluster.getFileSystem();
final Path appendFile = new Path("/testAppend.txt");
final String content = "hello world";
DFSTestUtil.writeFile(fs, appendFile, content);
for (int index = 0; index < dnNumber - 1; index++) {
cluster.shutdownDataNode(index);
}
cluster.restartNameNodes();
cluster.waitActive();
try {
DFSTestUtil.appendFile(webFS, appendFile, content);
fail("Should fail to append file since " + "datanode number is 1 and replication is 3");
} catch (IOException ignored) {
String resultContent = DFSTestUtil.readFile(fs, appendFile);
assertTrue(resultContent.equals(content));
}
} finally {
if (cluster != null) {
cluster.shutdown(true);
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestDelegationToken method testAddDelegationTokensDFSApi.
@Test
public void testAddDelegationTokensDFSApi() throws Exception {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("JobTracker");
DistributedFileSystem dfs = cluster.getFileSystem();
Credentials creds = new Credentials();
final Token<?>[] tokens = dfs.addDelegationTokens("JobTracker", creds);
Assert.assertEquals(1, tokens.length);
Assert.assertEquals(1, creds.numberOfTokens());
checkTokenIdentifier(ugi, tokens[0]);
final Token<?>[] tokens2 = dfs.addDelegationTokens("JobTracker", creds);
// already have token
Assert.assertEquals(0, tokens2.length);
Assert.assertEquals(1, creds.numberOfTokens());
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestDelegationToken method testDelegationTokenWithDoAs.
@Test
public void testDelegationTokenWithDoAs() throws Exception {
final DistributedFileSystem dfs = cluster.getFileSystem();
final Credentials creds = new Credentials();
final Token<?>[] tokens = dfs.addDelegationTokens("JobTracker", creds);
Assert.assertEquals(1, tokens.length);
@SuppressWarnings("unchecked") final Token<DelegationTokenIdentifier> token = (Token<DelegationTokenIdentifier>) tokens[0];
final UserGroupInformation longUgi = UserGroupInformation.createRemoteUser("JobTracker/foo.com@FOO.COM");
final UserGroupInformation shortUgi = UserGroupInformation.createRemoteUser("JobTracker");
longUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
try {
token.renew(config);
} catch (Exception e) {
Assert.fail("Could not renew delegation token for user " + longUgi);
}
return null;
}
});
shortUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
token.renew(config);
return null;
}
});
longUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
try {
token.cancel(config);
} catch (Exception e) {
Assert.fail("Could not cancel delegation token for user " + longUgi);
}
return null;
}
});
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestMover method testWithinSameNode.
private void testWithinSameNode(Configuration conf) throws Exception {
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testScheduleWithinSameNode/file";
Path dir = new Path("/testScheduleWithinSameNode");
dfs.mkdirs(dir);
// write to DISK
dfs.setStoragePolicy(dir, "HOT");
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testScheduleWithinSameNode");
out.close();
// verify before movement
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
StorageType[] storageTypes = lb.getStorageTypes();
for (StorageType storageType : storageTypes) {
Assert.assertTrue(StorageType.DISK == storageType);
}
// move to ARCHIVE
dfs.setStoragePolicy(dir, "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", dir.toString() });
Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
// Wait till namenode notified about the block location details
waitForLocatedBlockWithArchiveStorageType(dfs, file, 3);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestMover method testMoverFailedRetryWithPinnedBlocks.
/**
* Test to verify that mover should work well with pinned blocks as well as
* failed blocks. Mover should continue retrying the failed blocks only.
*/
@Test(timeout = 90000)
public void testMoverFailedRetryWithPinnedBlocks() throws Exception {
final Configuration conf = new HdfsConfiguration();
initConf(conf);
conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String parenDir = "/parent";
dfs.mkdirs(new Path(parenDir));
final String file1 = "/parent/testMoverFailedRetryWithPinnedBlocks1";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file1), (short) 2);
byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
out.write(fileData);
out.close();
// Adding pinned blocks.
createFileWithFavoredDatanodes(conf, cluster, dfs);
// Delete block file so, block move will fail with FileNotFoundException
LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
LocatedBlock lb = locatedBlocks.get(0);
cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
// move to ARCHIVE
dfs.setStoragePolicy(new Path(parenDir), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", parenDir.toString() });
Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
} finally {
cluster.shutdown();
}
}
Aggregations