use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.
the class ShortCircuitRegistry method unregisterSlot.
public synchronized void unregisterSlot(SlotId slotId) throws InvalidRequestException {
if (!enabled) {
if (LOG.isTraceEnabled()) {
LOG.trace("unregisterSlot: ShortCircuitRegistry is " + "not enabled.");
}
throw new UnsupportedOperationException();
}
ShmId shmId = slotId.getShmId();
RegisteredShm shm = segments.get(shmId);
if (shm == null) {
throw new InvalidRequestException("there is no shared memory segment " + "registered with shmId " + shmId);
}
Slot slot = shm.getSlot(slotId.getSlotIdx());
slot.makeInvalid();
shm.unregisterSlot(slotId.getSlotIdx());
slots.remove(slot.getBlockId(), slot);
}
use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.
the class TestCacheDirectives method testMaxRelativeExpiry.
@Test(timeout = 30000)
public void testMaxRelativeExpiry() throws Exception {
// Test that negative and really big max expirations can't be set during add
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative", e);
}
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
fail("Added a pool with too big of a max expiry.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("too big", e);
}
// Test that setting a max relative expiry on a pool works
CachePoolInfo coolPool = new CachePoolInfo("coolPool");
final long poolExpiration = 1000 * 60 * 10l;
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
RemoteIterator<CachePoolEntry> poolIt = dfs.listCachePools();
CachePoolInfo listPool = poolIt.next().getInfo();
assertFalse("Should only be one pool", poolIt.hasNext());
assertEquals("Expected max relative expiry to match set value", poolExpiration, listPool.getMaxRelativeExpiryMs().longValue());
// Test that negative and really big max expirations can't be modified
try {
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
} catch (InvalidRequestException e) {
assertExceptionContains("negative", e);
}
try {
dfs.modifyCachePool(coolPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER + 1));
fail("Added a pool with too big of a max expiry.");
} catch (InvalidRequestException e) {
assertExceptionContains("too big", e);
}
// Test that adding a directives without an expiration uses the pool's max
CacheDirectiveInfo defaultExpiry = new CacheDirectiveInfo.Builder().setPath(new Path("/blah")).setPool(coolPool.getPoolName()).build();
dfs.addCacheDirective(defaultExpiry);
RemoteIterator<CacheDirectiveEntry> dirIt = dfs.listCacheDirectives(defaultExpiry);
CacheDirectiveInfo listInfo = dirIt.next().getInfo();
assertFalse("Should only have one entry in listing", dirIt.hasNext());
long listExpiration = listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Directive expiry should be approximately the pool's max expiry", Math.abs(listExpiration - poolExpiration) < 10 * 1000);
// Test that the max is enforced on add for relative and absolute
CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder().setPath(new Path("/lolcat")).setPool(coolPool.getPoolName());
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Added a directive that exceeds pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Added a directive that exceeds pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
// Test that max is enforced on modify for relative and absolute Expirations
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newRelative(poolExpiration + 1)).build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(new Date().getTime() + poolExpiration + (10 * 1000))).build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
// Test some giant limit values with add
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newRelative(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
} catch (IllegalArgumentException e) {
assertExceptionContains("is too far in the future", e);
}
try {
dfs.addCacheDirective(builder.setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Added a directive with a gigantic max value");
} catch (InvalidRequestException e) {
assertExceptionContains("is too far in the future", e);
}
// Test some giant limit values with modify
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.NEVER).build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setExpiration(Expiration.newAbsolute(Long.MAX_VALUE)).build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("is too far in the future", e);
}
// Test that the max is enforced on modify correctly when changing pools
CachePoolInfo destPool = new CachePoolInfo("destPool");
dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).build());
fail("Modified a directive to a pool with a lower max expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry).setId(listInfo.getId()).setPool(destPool.getPoolName()).setExpiration(Expiration.newRelative(poolExpiration / 2)).build());
dirIt = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool(destPool.getPoolName()).build());
listInfo = dirIt.next().getInfo();
listExpiration = listInfo.getExpiration().getAbsoluteMillis() - new Date().getTime();
assertTrue("Unexpected relative expiry " + listExpiration + " expected approximately " + poolExpiration / 2, Math.abs(poolExpiration / 2 - listExpiration) < 10 * 1000);
// Test that cache pool and directive expiry can be modified back to never
dfs.modifyCachePool(destPool.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
poolIt = dfs.listCachePools();
listPool = poolIt.next().getInfo();
while (!listPool.getPoolName().equals(destPool.getPoolName())) {
listPool = poolIt.next().getInfo();
}
assertEquals("Expected max relative expiry to match set value", CachePoolInfo.RELATIVE_EXPIRY_NEVER, listPool.getMaxRelativeExpiryMs().longValue());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER)).build());
// Test modifying close to the limit
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(listInfo.getId()).setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1)).build());
}
use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.
the class TestCacheDirectives method testLimit.
@Test(timeout = 120000)
public void testLimit() throws Exception {
try {
dfs.addCachePool(new CachePoolInfo("poolofnegativity").setLimit(-99l));
fail("Should not be able to set a negative limit");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative", e);
}
final String destiny = "poolofdestiny";
final Path path1 = new Path("/destiny");
DFSTestUtil.createFile(dfs, path1, 2 * BLOCK_SIZE, (short) 1, 0x9494);
// Start off with a limit that is too small
final CachePoolInfo poolInfo = new CachePoolInfo(destiny).setLimit(2 * BLOCK_SIZE - 1);
dfs.addCachePool(poolInfo);
final CacheDirectiveInfo info1 = new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path1).build();
try {
dfs.addCacheDirective(info1);
fail("Should not be able to cache when there is no more limit");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity", e);
}
// Raise the limit up to fit and it should work this time
poolInfo.setLimit(2 * BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
long id1 = dfs.addCacheDirective(info1);
waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 2 * BLOCK_SIZE, 1, 1, poolInfo, "testLimit:1");
// Adding another file, it shouldn't be cached
final Path path2 = new Path("/failure");
DFSTestUtil.createFile(dfs, path2, BLOCK_SIZE, (short) 1, 0x9495);
try {
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(destiny).setPath(path2).build(), EnumSet.noneOf(CacheFlag.class));
fail("Should not be able to add another cached file");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity", e);
}
// Bring the limit down, the first file should get uncached
poolInfo.setLimit(BLOCK_SIZE);
dfs.modifyCachePool(poolInfo);
waitForCachePoolStats(dfs, 2 * BLOCK_SIZE, 0, 1, 0, poolInfo, "testLimit:2");
RemoteIterator<CachePoolEntry> it = dfs.listCachePools();
assertTrue("Expected a cache pool", it.hasNext());
CachePoolStats stats = it.next().getStats();
assertEquals("Overlimit bytes should be difference of needed and limit", BLOCK_SIZE, stats.getBytesOverlimit());
// Moving a directive to a pool without enough limit should fail
CachePoolInfo inadequate = new CachePoolInfo("poolofinadequacy").setLimit(BLOCK_SIZE);
dfs.addCachePool(inadequate);
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.noneOf(CacheFlag.class));
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("remaining capacity", e);
}
// Succeeds when force=true
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(info1).setId(id1).setPool(inadequate.getPoolName()).build(), EnumSet.of(CacheFlag.FORCE));
// Also can add with force=true
dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()).setPath(path1).build(), EnumSet.of(CacheFlag.FORCE));
}
use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.
the class TestCacheDirectives method testAddRemoveDirectives.
@Test(timeout = 60000)
public void testAddRemoveDirectives() throws Exception {
proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0777)));
proto.addCachePool(new CachePoolInfo("pool2").setMode(new FsPermission((short) 0777)));
proto.addCachePool(new CachePoolInfo("pool3").setMode(new FsPermission((short) 0777)));
proto.addCachePool(new CachePoolInfo("pool4").setMode(new FsPermission((short) 0)));
CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
CacheDirectiveInfo beta = new CacheDirectiveInfo.Builder().setPath(new Path("/beta")).setPool("pool2").build();
CacheDirectiveInfo delta = new CacheDirectiveInfo.Builder().setPath(new Path("/delta")).setPool("pool1").build();
long alphaId = addAsUnprivileged(alpha);
long alphaId2 = addAsUnprivileged(alpha);
assertFalse("Expected to get unique directives when re-adding an " + "existing CacheDirectiveInfo", alphaId == alphaId2);
long betaId = addAsUnprivileged(beta);
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/unicorn")).setPool("no_such_pool").build());
fail("expected an error when adding to a non-existent pool.");
} catch (InvalidRequestException ioe) {
GenericTestUtils.assertExceptionContains("Unknown pool", ioe);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/blackhole")).setPool("pool4").build());
fail("expected an error when adding to a pool with " + "mode 0 (no permissions for anyone).");
} catch (AccessControlException e) {
GenericTestUtils.assertExceptionContains("Permission denied while accessing pool", e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/illegal:path/")).setPool("pool1").build());
fail("expected an error when adding a malformed path " + "to the cache directives.");
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("is not a valid DFS filename", e);
}
try {
addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("/emptypoolname")).setReplication((short) 1).setPool("").build());
fail("expected an error when adding a cache " + "directive with an empty pool name.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid empty pool name", e);
}
long deltaId = addAsUnprivileged(delta);
// We expect the following to succeed, because DistributedFileSystem
// qualifies the path.
long relativeId = addAsUnprivileged(new CacheDirectiveInfo.Builder().setPath(new Path("relative")).setPool("pool1").build());
RemoteIterator<CacheDirectiveEntry> iter;
iter = dfs.listCacheDirectives(null);
validateListAll(iter, alphaId, alphaId2, betaId, deltaId, relativeId);
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool3").build());
assertFalse(iter.hasNext());
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool1").build());
validateListAll(iter, alphaId, alphaId2, deltaId, relativeId);
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
validateListAll(iter, betaId);
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(alphaId2).build());
validateListAll(iter, alphaId2);
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setId(relativeId).build());
validateListAll(iter, relativeId);
dfs.removeCacheDirective(betaId);
iter = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder().setPool("pool2").build());
assertFalse(iter.hasNext());
try {
dfs.removeCacheDirective(betaId);
fail("expected an error when removing a non-existent ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID", e);
}
try {
proto.removeCacheDirective(-42l);
fail("expected an error when removing a negative ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Invalid negative ID", e);
}
try {
proto.removeCacheDirective(43l);
fail("expected an error when removing a non-existent ID");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("No directive with ID", e);
}
dfs.removeCacheDirective(alphaId);
dfs.removeCacheDirective(alphaId2);
dfs.removeCacheDirective(deltaId);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(relativeId).setReplication((short) 555).build());
iter = dfs.listCacheDirectives(null);
assertTrue(iter.hasNext());
CacheDirectiveInfo modified = iter.next().getInfo();
assertEquals(relativeId, modified.getId().longValue());
assertEquals((short) 555, modified.getReplication().shortValue());
dfs.removeCacheDirective(relativeId);
iter = dfs.listCacheDirectives(null);
assertFalse(iter.hasNext());
// Verify that PBCDs with path "." work correctly
CacheDirectiveInfo directive = new CacheDirectiveInfo.Builder().setPath(new Path(".")).setPool("pool1").build();
long id = dfs.addCacheDirective(directive);
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(directive).setId(id).setReplication((short) 2).build());
dfs.removeCacheDirective(id);
// Perform cache directive operations using a closed file system.
DistributedFileSystem dfs1 = (DistributedFileSystem) cluster.getNewFileSystemInstance(0);
dfs1.close();
try {
dfs1.listCacheDirectives(null);
fail("listCacheDirectives using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.addCacheDirective(alpha);
fail("addCacheDirective using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.modifyCacheDirective(alpha);
fail("modifyCacheDirective using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
try {
dfs1.removeCacheDirective(alphaId);
fail("removeCacheDirective using a closed filesystem!");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
}
}
use of org.apache.hadoop.fs.InvalidRequestException in project hadoop by apache.
the class TestCacheDirectives method testExpiry.
@Test(timeout = 120000)
public void testExpiry() throws Exception {
String pool = "pool1";
dfs.addCachePool(new CachePoolInfo(pool));
Path p = new Path("/mypath");
DFSTestUtil.createFile(dfs, p, BLOCK_SIZE * 2, (short) 2, 0x999);
// Expire after test timeout
Date start = new Date();
Date expiry = DateUtils.addSeconds(start, 120);
final long id = dfs.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(p).setPool(pool).setExpiration(CacheDirectiveInfo.Expiration.newAbsolute(expiry)).setReplication((short) 2).build());
waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:1");
// Change it to expire sooner
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(0)).build());
waitForCachedBlocks(cluster.getNameNode(), 0, 0, "testExpiry:2");
RemoteIterator<CacheDirectiveEntry> it = dfs.listCacheDirectives(null);
CacheDirectiveEntry ent = it.next();
assertFalse(it.hasNext());
Date entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
assertTrue("Directive should have expired", entryExpiry.before(new Date()));
// Change it back to expire later
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(120000)).build());
waitForCachedBlocks(cluster.getNameNode(), 2, 4, "testExpiry:3");
it = dfs.listCacheDirectives(null);
ent = it.next();
assertFalse(it.hasNext());
entryExpiry = new Date(ent.getInfo().getExpiration().getMillis());
assertTrue("Directive should not have expired", entryExpiry.after(new Date()));
// Verify that setting a negative TTL throws an error
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setExpiration(Expiration.newRelative(-1)).build());
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("Cannot set a negative expiration", e);
}
}
Aggregations