use of org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger in project hadoop by apache.
the class TestNNStorageRetentionManager method runTest.
private void runTest(TestCaseDescription tc) throws IOException {
StoragePurger mockPurger = Mockito.mock(NNStorageRetentionManager.StoragePurger.class);
ArgumentCaptor<FSImageFile> imagesPurgedCaptor = ArgumentCaptor.forClass(FSImageFile.class);
ArgumentCaptor<EditLogFile> logsPurgedCaptor = ArgumentCaptor.forClass(EditLogFile.class);
// Ask the manager to purge files we don't need any more
new NNStorageRetentionManager(conf, tc.mockStorage(), tc.mockEditLog(mockPurger), mockPurger).purgeOldStorage(NameNodeFile.IMAGE);
// Verify that it asked the purger to remove the correct files
Mockito.verify(mockPurger, Mockito.atLeast(0)).purgeImage(imagesPurgedCaptor.capture());
Mockito.verify(mockPurger, Mockito.atLeast(0)).purgeLog(logsPurgedCaptor.capture());
// Check images
Set<String> purgedPaths = Sets.newLinkedHashSet();
for (FSImageFile purged : imagesPurgedCaptor.getAllValues()) {
purgedPaths.add(fileToPath(purged.getFile()));
}
Assert.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)), Joiner.on(",").join(purgedPaths));
// Check images
purgedPaths.clear();
for (EditLogFile purged : logsPurgedCaptor.getAllValues()) {
purgedPaths.add(fileToPath(purged.getFile()));
}
Assert.assertEquals(Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)), Joiner.on(",").join(purgedPaths));
}
Aggregations