use of org.apache.hadoop.util.FakeTimer in project hadoop by apache.
the class TestGroupsCaching method testCacheEntriesExpire.
@Test
public void testCacheEntriesExpire() throws Exception {
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
FakeTimer timer = new FakeTimer();
final Groups groups = new Groups(conf, timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
// We make an entry
groups.getGroups("me");
int startingRequestCount = FakeGroupMapping.getRequestCount();
timer.advance(20 * 1000);
// Cache entry has expired so it results in a new fetch
groups.getGroups("me");
assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
}
use of org.apache.hadoop.util.FakeTimer in project hadoop by apache.
the class TestGroupsCaching method testExceptionOnBackgroundRefreshHandled.
@Test
public void testExceptionOnBackgroundRefreshHandled() throws Exception {
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD, true);
FakeTimer timer = new FakeTimer();
final Groups groups = new Groups(conf, timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
// We make an initial request to populate the cache
groups.getGroups("me");
// add another group
groups.cacheGroupsAdd(Arrays.asList("grp3"));
int startingRequestCount = FakeGroupMapping.getRequestCount();
// Arrange for an exception to occur only on the
// second call
FakeGroupMapping.setThrowException(true);
// Then expire that entry
timer.advance(4 * 1000);
// Now get the cache entry - it should return immediately
// with the old value and the cache will not have completed
// a request to getGroups yet.
assertEquals(groups.getGroups("me").size(), 2);
assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount());
// Now sleep for a short time and re-check the request count. It should have
// increased, but the exception means the cache will not have updated
Thread.sleep(50);
FakeGroupMapping.setThrowException(false);
assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
assertEquals(groups.getGroups("me").size(), 2);
// Now sleep another short time - the 3rd call to getGroups above
// will have kicked off another refresh that updates the cache
Thread.sleep(50);
assertEquals(startingRequestCount + 2, FakeGroupMapping.getRequestCount());
assertEquals(groups.getGroups("me").size(), 3);
}
use of org.apache.hadoop.util.FakeTimer in project hadoop by apache.
the class TestGroupsCaching method testExceptionCallingLoadWithoutBackgroundRefreshReturnsOldValue.
@Test
public void testExceptionCallingLoadWithoutBackgroundRefreshReturnsOldValue() throws Exception {
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD, false);
FakeTimer timer = new FakeTimer();
final Groups groups = new Groups(conf, timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
// First populate the cash
assertEquals(groups.getGroups("me").size(), 2);
// Advance the timer so a refresh is required
timer.advance(2 * 1000);
// This call should throw an exception
FakeGroupMapping.setThrowException(true);
assertEquals(groups.getGroups("me").size(), 2);
}
use of org.apache.hadoop.util.FakeTimer in project hadoop by apache.
the class TestGroupsCaching method testNegativeCacheEntriesExpire.
@Test
public void testNegativeCacheEntriesExpire() throws Exception {
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS, 2);
FakeTimer timer = new FakeTimer();
// Ensure that stale entries are removed from negative cache every 2 seconds
Groups groups = new Groups(conf, timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
// Add both these users to blacklist so that they
// can be added to negative cache
FakeGroupMapping.addToBlackList("user1");
FakeGroupMapping.addToBlackList("user2");
// Put user1 in negative cache.
try {
groups.getGroups("user1");
fail("Did not throw IOException : Failed to obtain groups" + " from FakeGroupMapping.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user", e);
}
// Check if user1 exists in negative cache
assertTrue(groups.getNegativeCache().contains("user1"));
// Advance fake timer
timer.advance(1000);
// Put user2 in negative cache
try {
groups.getGroups("user2");
fail("Did not throw IOException : Failed to obtain groups" + " from FakeGroupMapping.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("No groups found for user", e);
}
// Check if user2 exists in negative cache
assertTrue(groups.getNegativeCache().contains("user2"));
// Advance timer. Only user2 should be present in negative cache.
timer.advance(1100);
assertFalse(groups.getNegativeCache().contains("user1"));
assertTrue(groups.getNegativeCache().contains("user2"));
// Advance timer. Even user2 should not be present in negative cache.
timer.advance(1000);
assertFalse(groups.getNegativeCache().contains("user2"));
}
use of org.apache.hadoop.util.FakeTimer in project hadoop by apache.
the class TestGroupsCaching method testThreadNotBlockedWhenExpiredEntryExistsWithBackgroundRefresh.
@Test
public void testThreadNotBlockedWhenExpiredEntryExistsWithBackgroundRefresh() throws Exception {
conf.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 1);
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD, true);
FakeTimer timer = new FakeTimer();
final Groups groups = new Groups(conf, timer);
groups.cacheGroupsAdd(Arrays.asList(myGroups));
groups.refresh();
FakeGroupMapping.clearBlackList();
// We make an initial request to populate the cache
groups.getGroups("me");
// Further lookups will have a delay
FakeGroupMapping.setGetGroupsDelayMs(100);
// add another groups
groups.cacheGroupsAdd(Arrays.asList("grp3"));
int startingRequestCount = FakeGroupMapping.getRequestCount();
// Then expire that entry
timer.advance(4 * 1000);
// Now get the cache entry - it should return immediately
// with the old value and the cache will not have completed
// a request to getGroups yet.
assertEquals(groups.getGroups("me").size(), 2);
assertEquals(startingRequestCount, FakeGroupMapping.getRequestCount());
// Now sleep for over the delay time and the request count should
// have completed
Thread.sleep(110);
assertEquals(startingRequestCount + 1, FakeGroupMapping.getRequestCount());
// Another call to get groups should give 3 groups instead of 2
assertEquals(groups.getGroups("me").size(), 3);
}
Aggregations