use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.
the class TestCacheDirectives method testWaitForCachedReplicas.
@Test(timeout = 120000)
public void testWaitForCachedReplicas() throws Exception {
FileSystemTestHelper helper = new FileSystemTestHelper();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return ((namenode.getNamesystem().getCacheCapacity() == (NUM_DATANODES * CACHE_CAPACITY)) && (namenode.getNamesystem().getCacheUsed() == 0));
}
}, 500, 60000);
// Send a cache report referring to a bogus block. It is important that
// the NameNode be robust against this.
NamenodeProtocols nnRpc = namenode.getRpcServer();
DataNode dn0 = cluster.getDataNodes().get(0);
String bpid = cluster.getNamesystem().getBlockPoolId();
LinkedList<Long> bogusBlockIds = new LinkedList<Long>();
bogusBlockIds.add(999999L);
nnRpc.cacheReport(dn0.getDNRegistrationForBP(bpid), bpid, bogusBlockIds);
Path rootDir = helper.getDefaultWorkingDirectory(dfs);
// Create the pool
final String pool = "friendlyPool";
nnRpc.addCachePool(new CachePoolInfo("friendlyPool"));
// Create some test files
final int numFiles = 2;
final int numBlocksPerFile = 2;
final List<String> paths = new ArrayList<String>(numFiles);
for (int i = 0; i < numFiles; i++) {
Path p = new Path(rootDir, "testCachePaths-" + i);
FileSystemTestHelper.createFile(dfs, p, numBlocksPerFile, (int) BLOCK_SIZE);
paths.add(p.toUri().getPath());
}
// Check the initial statistics at the namenode
waitForCachedBlocks(namenode, 0, 0, "testWaitForCachedReplicas:0");
// Cache and check each path in sequence
int expected = 0;
for (int i = 0; i < numFiles; i++) {
CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().setPath(new Path(paths.get(i))).setPool(pool).build();
nnRpc.addCacheDirective(directive, EnumSet.noneOf(CacheFlag.class));
expected += numBlocksPerFile;
waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:1");
}
// Check that the datanodes have the right cache values
DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
assertEquals("Unexpected number of live nodes", NUM_DATANODES, live.length);
long totalUsed = 0;
for (DatanodeInfo dn : live) {
final long cacheCapacity = dn.getCacheCapacity();
final long cacheUsed = dn.getCacheUsed();
final long cacheRemaining = dn.getCacheRemaining();
assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
assertEquals("Capacity not equal to used + remaining", cacheCapacity, cacheUsed + cacheRemaining);
assertEquals("Remaining not equal to capacity - used", cacheCapacity - cacheUsed, cacheRemaining);
totalUsed += cacheUsed;
}
assertEquals(expected * BLOCK_SIZE, totalUsed);
// Uncache and check each path in sequence
RemoteIterator<CacheDirectiveEntry> entries = new CacheDirectiveIterator(nnRpc, null, FsTracer.get(conf));
for (int i = 0; i < numFiles; i++) {
CacheDirectiveEntry entry = entries.next();
nnRpc.removeCacheDirective(entry.getInfo().getId());
expected -= numBlocksPerFile;
waitForCachedBlocks(namenode, expected, expected, "testWaitForCachedReplicas:2");
}
}
use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.
the class TestCacheDirectives method testAddRemoveDirectives.
@Test(timeout = 60000)
public void testAddRemoveDirectives() throws Exception {
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0777)));
proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short) 0777)));
proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short) 0777)));
proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short) 0)));
CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
CacheDirectiveInfo beta = new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build();
CacheDirectiveInfo delta = new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build();
long alphaId = addAsUnprivileged(alpha);
long alphaId2 = addAsUnprivileged(alpha);
assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo", alphaId == alphaId2);
long betaId = addAsUnprivileged(beta);
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build());
fail("expected an error when adding to a non-existent pool.");
} catch (InvalidRequestException ioe) {
GenericTestUtils.assertExceptionContains("Unknown pool", ioe);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build());
fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone).");
} catch (AccessControlException e) {
GenericTestUtils.assertExceptionContains("Permission denied while accessing pool", e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build());
fail("expected an error when adding a malformed path " + "to the cache directives.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("is not a valid DFS filename", e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short) 1).setPool("").build());
fail("expected an error when adding a cache " + "directive with an empty pool name.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid empty pool name", e);
}
long deltaId = addAsUnprivileged(delta);
// We expect the following to succeed, because DistributedFileSystem
// qualifies the path.
long relativeId = addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build());
RemoteIterator<CacheDirectiveEntry> iter;
iter = dfs.listCacheDirectives(null);
validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId);
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build());
assertFalse(iter.hasNext());
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build());
validateListAll(iter, alphaId, alphaId2, deltaId, relativeId);
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
validateListAll(iter, betaId);
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build());
validateListAll(iter, alphaId2);
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build());
validateListAll(iter, relativeId);
dfs.removeCacheDirective(betaId);
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
assertFalse(iter.hasNext());
try {
dfs.removeCacheDirective(betaId);
fail("expected an error when removing a non-existent ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID", e);
}
try {
proto.removeCacheDirective(-42l);
fail("expected an error when removing a negative ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid negative ID", e);
}
try {
proto.removeCacheDirective(43l);
fail("expected an error when removing a non-existent ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID", e);
}
dfs.removeCacheDirective(alphaId);
dfs.removeCacheDirective(alphaId2);
dfs.removeCacheDirective(deltaId);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short) 555).build());
iter = dfs.listCacheDirectives(null);
assertTrue(iter.hasNext());
CacheDirectiveInfo modified = iter.next().getInfo();
assertEquals(relativeId, modified.getId().longValue());
assertEquals((short) 555, modified.getReplication().shortValue());
dfs.removeCacheDirective(relativeId);
iter = dfs.listCacheDirectives(null);
assertFalse(iter.hasNext());
// Verify that PBCDs with path "." work correctly
CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build();
long id = dfs.addCacheDirective(directive);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short) 2).build());
dfs.removeCacheDirective(id);
// Perform cache directive operations using a closed file system.
DistributedFileSystem dfs1 = (DistributedFileSystem) cluster.getNewFileSystemInstance(0);
dfs1.close();
try {
dfs1.listCacheDirectives(null);
fail("listCacheDirectives using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.addCacheDirective(alpha);
fail("addCacheDirective using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.modifyCacheDirective(alpha);
fail("modifyCacheDirective using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.removeCacheDirective(alphaId);
fail("removeCacheDirective using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
}
use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.
the class TestCacheDirectives method testExpiry.
@Test(timeout = 120000)
public void testExpiry() throws Exception {
String pool = "pool1";
dfs.addCachePool(new CachePoolInfo(pool));
Path p = new Path("/mypath");
DFSTestUtil.createFile(dfs, p, BLOCK_SIZE * 2, (short) 2, 0x999);
// Expire after test timeout
Date start = new Date();
Date expiry = DateUtils.addSeconds(start, 120);
final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(p).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry)).setReplication((short) 2).build());
waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1");
// Change it to expire sooner
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(0)).build());
waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
CacheDirectiveEntry ent = it.next();
assertFalse(it.hasNext());
Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
assertTrue("Directive should have expired", entryExpiry.before(new Date()));
// Change it back to expire later
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(120000)).build());
waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
it = dfs.listCacheDirectives(null);
ent = it.next();
assertFalse(it.hasNext());
entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
assertTrue("Directive should not have expired", entryExpiry.after(new Date()));
// Verify that setting a negative TTL throws an error
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(-1)).build());
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Cannot set a negative expiration", e);
}
}
use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.
the class TestAuditLoggerWithCommands method testAddCacheDirective.
@Test
public void testAddCacheDirective() throws Exception {
removeExistingCachePools(null);
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0)));
CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
try {
((DistributedFileSystem) fileSys).addCacheDirective(alpha);
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
String aceAddCachePattern = ".*allowed=false.*ugi=theDoctor.*cmd=addCache.*";
int length = verifyAuditLogs(aceAddCachePattern);
try {
fileSys.close();
((DistributedFileSystem) fileSys).addCacheDirective(alpha);
fail("The operation should have failed with IOException");
} catch (IOException e) {
}
assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.
the class TestAuditLoggerWithCommands method testModifyCacheDirective.
@Test
public void testModifyCacheDirective() throws Exception {
removeExistingCachePools(null);
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0)));
CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
Long id = ((DistributedFileSystem) fs).addCacheDirective(alpha);
try {
((DistributedFileSystem) fileSys).modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setReplication((short) 1).build());
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
String aceModifyCachePattern = ".*allowed=false.*ugi=theDoctor.*cmd=modifyCache.*";
verifyAuditLogs(aceModifyCachePattern);
fileSys.close();
try {
((DistributedFileSystem) fileSys).modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setReplication((short) 1).build());
fail("The operation should have failed with IOException");
} catch (IOException e) {
}
}
Aggregations