use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.
the class TestFsDatasetCacheRevocation method testPinning.
/**
* Test that when a client has a replica mmapped, we will not un-mlock that
* replica for a reasonable amount of time, even if an uncache request
* occurs.
*/
@Test(timeout = 120000)
public void testPinning() throws Exception {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
assumeNotWindows();
Configuration conf = getDefaultConf();
// Set a really long revocation timeout, so that we won't reach it during
// this test.
conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 1800000L);
// Poll very often
conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
MiniDFSCluster cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
// Create and cache a file.
final String TEST_FILE = "/test_file";
DFSTestUtil.createFile(dfs, new Path(TEST_FILE), BLOCK_SIZE, (short) 1, 0xcafe);
dfs.addCachePool(new CachePoolInfo("pool"));
long cacheDirectiveId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(new Path(TEST_FILE)).setReplication((short) 1).build());
FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);
// Mmap the file.
FSDataInputStream in = dfs.open(new Path(TEST_FILE));
ByteBuffer buf = in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));
// Attempt to uncache file. The file should still be cached.
dfs.removeCacheDirective(cacheDirectiveId);
Thread.sleep(500);
DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);
// Un-mmap the file. The file should be uncached after this.
in.releaseBuffer(buf);
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
// Cleanup
in.close();
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.
the class TestFsDatasetCacheRevocation method testRevocation.
/**
* Test that when we have an uncache request, and the client refuses to release
* the replica for a long time, we will un-mlock it.
*/
@Test(timeout = 120000)
public void testRevocation() throws Exception {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded());
assumeNotWindows();
BlockReaderTestUtil.enableHdfsCachingTracing();
BlockReaderTestUtil.enableShortCircuitShmTracing();
Configuration conf = getDefaultConf();
// Set a really short revocation timeout.
conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_TIMEOUT_MS, 250L);
// Poll very often
conf.setLong(DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS, 2L);
MiniDFSCluster cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
// Create and cache a file.
final String TEST_FILE = "/test_file2";
DFSTestUtil.createFile(dfs, new Path(TEST_FILE), BLOCK_SIZE, (short) 1, 0xcafe);
dfs.addCachePool(new CachePoolInfo("pool"));
long cacheDirectiveId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool("pool").setPath(new Path(TEST_FILE)).setReplication((short) 1).build());
FsDatasetSpi<?> fsd = cluster.getDataNodes().get(0).getFSDataset();
DFSTestUtil.verifyExpectedCacheUsage(BLOCK_SIZE, 1, fsd);
// Mmap the file.
FSDataInputStream in = dfs.open(new Path(TEST_FILE));
ByteBuffer buf = in.read(null, BLOCK_SIZE, EnumSet.noneOf(ReadOption.class));
// Attempt to uncache file. The file should get uncached.
LOG.info("removing cache directive {}", cacheDirectiveId);
dfs.removeCacheDirective(cacheDirectiveId);
LOG.info("finished removing cache directive {}", cacheDirectiveId);
Thread.sleep(1000);
DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
// Cleanup
in.releaseBuffer(buf);
in.close();
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.
the class TestAuditLoggerWithCommands method testModifyCacheDirective.
@Test
public void testModifyCacheDirective() throws Exception {
removeExistingCachePools(null);
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0)));
CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
Long id = ((DistributedFileSystem) fs).addCacheDirective(alpha);
try {
((DistributedFileSystem) fileSys).modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setReplication((short) 1).build());
fail("The operation should have failed with AccessControlException");
} catch (AccessControlException ace) {
}
String aceModifyCachePattern = ".*allowed=false.*ugi=theDoctor.*cmd=modifyCache.*";
verifyAuditLogs(aceModifyCachePattern);
fileSys.close();
try {
((DistributedFileSystem) fileSys).modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setReplication((short) 1).build());
fail("The operation should have failed with IOException");
} catch (IOException e) {
}
}
use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.
the class TestRetryCacheWithHA method testListCacheDirectives.
/**
* Add a list of cache directives, list cache directives,
* switch active NN, and list cache directives again.
*/
@Test(timeout = 60000)
public void testListCacheDirectives() throws Exception {
final int poolCount = 7;
HashSet<String> poolNames = new HashSet<String>(poolCount);
Path path = new Path("/p");
for (int i = 0; i < poolCount; i++) {
String poolName = "testListCacheDirectives-" + i;
CacheDirectiveInfo directiveInfo = new CacheDirectiveInfo.Builder().setPool(poolName).setPath(path).build();
dfs.addCachePool(new CachePoolInfo(poolName));
dfs.addCacheDirective(directiveInfo, EnumSet.of(CacheFlag.FORCE));
poolNames.add(poolName);
}
listCacheDirectives(poolNames, 0);
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.waitActive(1);
listCacheDirectives(poolNames, 1);
}
use of org.apache.hadoop.hdfs.protocol.CachePoolInfo in project hadoop by apache.
the class TestCacheDirectives method testMaxRelativeExpiry.
@Test(timeout = 30000)
public void testMaxRelativeExpiry() throws Exception {
// Test that negative and really big max expirations can't be set during add
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative", e);
}
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
fail("Added a pool with too big of a max expiry.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("too big", e);
}
// Test that setting a max relative expiry on a pool works
CachePoolInfo coolPool = new CachePoolInfo("coolPool");
final long poolExpiration = 1000 * 60 * 10l;
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
RemoteIterator<CachePoolEntry> poolIt = dfs.listCachePools();
CachePoolInfo listPool = poolIt.next().getInfo();
assertFalse("Should only be one pool", poolIt.hasNext());
assertEquals("Expected max relative expiry to match set value", poolExpiration, listPool.getMaxRelativeExpiryMs().longValue());
// Test that negative and really big max expirations can't be modified
try {
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
} catch (InvalidRequestException e) {
assertExceptionContains("negative", e);
}
try {
dfs.modifyCachePool(coolPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER + 1));
fail("Added a pool with too big of a max expiry.");
} catch (InvalidRequestException e) {
assertExceptionContains("too big", e);
}
// Test that adding a directives without an expiration uses the pool's max
CacheDirectiveInfo defaultExpiry = new CacheDirectiveInfo.Builder().setPath(new Path("/blah")).setPool(coolPool.getPoolName()).build();
dfs.addCacheDirective(defaultExpiry);
RemoteIterator<CacheDirectiveEntry> dirIt = dfs.listCacheDirectives(defaultExpiry);
CacheDirectiveInfo listInfo = dirIt.next().getInfo();
assertFalse("Should only have one entry in listing", dirIt.hasNext());
long listExpiration = listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Directive expiry should be approximately the pool's max expiry", Math.abs(listExpiration - poolExpiration) < 10 * 1000);
// Test that the max is enforced on add for relative and absolute
CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder().setPath(new Path("/lolcat")).setPool(coolPool.getPoolName());
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Added a directive that exceeds pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Added a directive that exceeds pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
// Test that max is enforced on modify for relative and absolute Expirations
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
// Test some giant limit values with add
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
} catch (IllegalArgumentException e) {
assertExceptionContains("is too far in the future", e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
} catch (InvalidRequestException e) {
assertExceptionContains("is too far in the future", e);
}
// Test some giant limit values with modify
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.NEVER).build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("is too far in the future", e);
}
// Test that the max is enforced on modify correctly when changing pools
CachePoolInfo destPool = new CachePoolInfo("destPool");
dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).build());
fail("Modified a directive to a pool with a lower max expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).setExpiration(Expiration.newRelative(poolExpiration / 2)).build());
dirIt = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool(destPool.getPoolName()).build());
listInfo = dirIt.next().getInfo();
listExpiration = listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Unexpected relative expiry " + listExpiration + " expected approximately " + poolExpiration / 2, Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000);
// Test that cache pool and directive expiry can be modified back to never
dfs.modifyCachePool(destPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
poolIt = dfs.listCachePools();
listPool = poolIt.next().getInfo();
while (!listPool.getPoolName().equals(destPool.getPoolName())) {
listPool = poolIt.next().getInfo();
}
assertEquals("Expected max relative expiry to match set value", CachePoolInfo.RELATIVE_EXPIRY_NEVER, listPool.getMaxRelativeExpiryMs().longValue());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER)).build());
// Test modifying close to the limit
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)).build());
}
Aggregations