use of org.apache.hadoop.util.FakeTimer in project hadoop by apache.
the class TestDatasetVolumeCheckerFailures method commonInit.
@Before
public void commonInit() {
timer = new FakeTimer();
conf = new HdfsConfiguration();
conf.setTimeDuration(DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY, MIN_DISK_CHECK_GAP_MS, TimeUnit.MILLISECONDS);
}
use of org.apache.hadoop.util.FakeTimer in project hadoop by apache.
the class TestDatasetVolumeCheckerTimeout method testDiskCheckTimeout.
@Test(timeout = 1000)
public void testDiskCheckTimeout() throws Exception {
LOG.info("Executing {}", testName.getMethodName());
final FsVolumeSpi volume = makeSlowVolume();
final DatasetVolumeChecker checker = new DatasetVolumeChecker(conf, new FakeTimer());
final AtomicLong numCallbackInvocations = new AtomicLong(0);
lock.lock();
/**
* Request a check and ensure it triggered {@link FsVolumeSpi#check}.
*/
boolean result = checker.checkVolume(volume, new DatasetVolumeChecker.Callback() {
@Override
public void call(Set<FsVolumeSpi> healthyVolumes, Set<FsVolumeSpi> failedVolumes) {
numCallbackInvocations.incrementAndGet();
// Assert that the disk check registers a failed volume due to
// timeout
assertThat(healthyVolumes.size(), is(0));
assertThat(failedVolumes.size(), is(1));
}
});
// Wait for the callback
Thread.sleep(DISK_CHECK_TIME);
// Release lock
lock.unlock();
// Ensure that the check was invoked only once.
verify(volume, times(1)).check(anyObject());
assertThat(numCallbackInvocations.get(), is(1L));
}
use of org.apache.hadoop.util.FakeTimer in project hadoop by apache.
the class TestStorageLocationChecker method testAllLocationsHealthy.
/**
* Verify that all healthy locations are correctly handled and that the
* check routine is invoked as expected.
* @throws Exception
*/
@Test(timeout = 30000)
public void testAllLocationsHealthy() throws Exception {
final List<StorageLocation> locations = makeMockLocations(HEALTHY, HEALTHY, HEALTHY);
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 0);
StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
List<StorageLocation> filteredLocations = checker.check(conf, locations);
// All locations should be healthy.
assertThat(filteredLocations.size(), is(3));
// Ensure that the check method was invoked for each location.
for (StorageLocation location : locations) {
verify(location).check(any(StorageLocation.CheckContext.class));
}
}
use of org.apache.hadoop.util.FakeTimer in project hadoop by apache.
the class TestStorageLocationChecker method testBadConfiguration.
/**
* Test handling all storage locations are failed.
*
* @throws Exception
*/
@Test(timeout = 30000)
public void testBadConfiguration() throws Exception {
final List<StorageLocation> locations = makeMockLocations(HEALTHY, HEALTHY, HEALTHY);
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 3);
thrown.expect(IOException.class);
thrown.expectMessage("Invalid value configured");
StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
checker.check(conf, locations);
}
use of org.apache.hadoop.util.FakeTimer in project hadoop by apache.
the class TestThrottledAsyncChecker method testCancellation.
@Test(timeout = 60000)
public void testCancellation() throws Exception {
LatchedCheckable target = new LatchedCheckable();
final FakeTimer timer = new FakeTimer();
final LatchedCallback callback = new LatchedCallback(target);
ThrottledAsyncChecker<Boolean, Boolean> checker = new ThrottledAsyncChecker<>(timer, MIN_ERROR_CHECK_GAP, 0, getExecutorService());
Optional<ListenableFuture<Boolean>> olf = checker.schedule(target, true);
if (olf.isPresent()) {
Futures.addCallback(olf.get(), callback);
}
// Request immediate cancellation.
checker.shutdownAndWait(0, TimeUnit.MILLISECONDS);
try {
assertFalse(olf.get().get());
fail("Failed to get expected InterruptedException");
} catch (ExecutionException ee) {
assertTrue(ee.getCause() instanceof InterruptedException);
}
callback.failureLatch.await();
}
Aggregations