use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestCheckpoint method testSaveNamespace.
/**
* Tests save namespace.
*/
@Test
public void testSaveNamespace() throws IOException {
MiniDFSCluster cluster = null;
DistributedFileSystem fs = null;
FileContext fc;
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
fs = (cluster.getFileSystem());
fc = FileContext.getFileContext(cluster.getURI(0));
// Saving image without safe mode should fail
DFSAdmin admin = new DFSAdmin(conf);
String[] args = new String[] { "-saveNamespace" };
try {
admin.run(args);
} catch (IOException eIO) {
assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
} catch (Exception e) {
throw new IOException(e);
}
// create new file
Path file = new Path("namespace.dat");
DFSTestUtil.createFile(fs, file, fileSize, fileSize, blockSize, replication, seed);
checkFile(fs, file, replication);
// create new link
Path symlink = new Path("file.link");
fc.createSymlink(file, symlink, false);
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
// verify that the edits file is NOT empty
Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
for (URI uri : editsDirs) {
File ed = new File(uri.getPath());
assertTrue(new File(ed, "current/" + NNStorage.getInProgressEditsFileName(1)).length() > Integer.SIZE / Byte.SIZE);
}
// Saving image in safe mode should succeed
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
} catch (Exception e) {
throw new IOException(e);
}
// TODO: Fix the test to not require a hard-coded transaction count.
final int EXPECTED_TXNS_FIRST_SEG = 13;
//
for (URI uri : editsDirs) {
File ed = new File(uri.getPath());
File curDir = new File(ed, "current");
LOG.info("Files in " + curDir + ":\n " + Joiner.on("\n ").join(curDir.list()));
// Verify that the first edits file got finalized
File originalEdits = new File(curDir, NNStorage.getInProgressEditsFileName(1));
assertFalse(originalEdits.exists());
File finalizedEdits = new File(curDir, NNStorage.getFinalizedEditsFileName(1, EXPECTED_TXNS_FIRST_SEG));
GenericTestUtils.assertExists(finalizedEdits);
assertTrue(finalizedEdits.length() > Integer.SIZE / Byte.SIZE);
GenericTestUtils.assertExists(new File(ed, "current/" + NNStorage.getInProgressEditsFileName(EXPECTED_TXNS_FIRST_SEG + 1)));
}
Collection<URI> imageDirs = cluster.getNameDirs(0);
for (URI uri : imageDirs) {
File imageDir = new File(uri.getPath());
File savedImage = new File(imageDir, "current/" + NNStorage.getImageFileName(EXPECTED_TXNS_FIRST_SEG));
assertTrue("Should have saved image at " + savedImage, savedImage.exists());
}
// restart cluster and verify file exists
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fs = (cluster.getFileSystem());
checkFile(fs, file, replication);
fc = FileContext.getFileContext(cluster.getURI(0));
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
} finally {
if (fs != null)
fs.close();
cleanup(cluster);
cluster = null;
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestAuditLoggerWithCommands method testSetQuota.
@Test
public void testSetQuota() throws Exception {
Path path = new Path("/testdir/testdir1");
fs.mkdirs(path);
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
try {
((DistributedFileSystem) fileSys).setQuota(path, 10l, 10l);
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
String acePattern = ".*allowed=false.*ugi=theDoctor.*cmd=setQuota.*";
int length = verifyAuditLogs(acePattern);
fileSys.close();
try {
((DistributedFileSystem) fileSys).setQuota(path, 10l, 10l);
fail("The operation should have failed with IOException");
} catch (IOException ace) {
}
assertTrue("Unexpected log from getContentSummary", length == auditlog.getOutput().split("\n").length);
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestAuditLoggerWithCommands method testRemoveCachePool.
@Test
public void testRemoveCachePool() throws Exception {
removeExistingCachePools(null);
CachePoolInfo cacheInfo = new CachePoolInfo("pool1").setMode(new FsPermission((short) 0));
((DistributedFileSystem) fs).addCachePool(cacheInfo);
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
try {
((DistributedFileSystem) fileSys).removeCachePool("pool1");
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
String aceRemoveCachePoolPattern = ".*allowed=false.*ugi=theDoctor.*cmd=removeCachePool.*";
int length = verifyAuditLogs(aceRemoveCachePoolPattern);
assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
try {
fileSys.close();
((DistributedFileSystem) fileSys).removeCachePool("pool1");
fail("The operation should have failed with IOException");
} catch (IOException e) {
}
assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestAuditLoggerWithCommands method testAddCachePool.
@Test
public void testAddCachePool() throws Exception {
removeExistingCachePools(null);
CachePoolInfo cacheInfo = new CachePoolInfo("pool1").setMode(new FsPermission((short) 0));
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
try {
((DistributedFileSystem) fileSys).addCachePool(cacheInfo);
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
String aceAddCachePoolPattern = ".*allowed=false.*ugi=theDoctor.*cmd=addCachePool.*";
int length = verifyAuditLogs(aceAddCachePoolPattern);
try {
fileSys.close();
((DistributedFileSystem) fileSys).addCachePool(cacheInfo);
fail("The operation should have failed with IOException");
} catch (IOException e) {
}
assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestAuditLoggerWithCommands method testRemoveCacheDirective.
@Test
public void testRemoveCacheDirective() throws Exception {
removeExistingCachePools(null);
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0)));
CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
String aceRemoveCachePattern = ".*allowed=false.*ugi=theDoctor.*cmd=removeCache.*";
int length = -1;
Long id = ((DistributedFileSystem) fs).addCacheDirective(alpha);
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
try {
((DistributedFileSystem) fileSys).removeCacheDirective(id);
fail("It should have failed with an AccessControlException");
} catch (AccessControlException ace) {
length = verifyAuditLogs(aceRemoveCachePattern);
}
try {
fileSys.close();
((DistributedFileSystem) fileSys).removeCacheDirective(id);
fail("The operation should have failed with IOException");
} catch (IOException e) {
}
assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Aggregations