use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestDatasetVolumeChecker method testCheckOneVolume.
/**
* Test {@link DatasetVolumeChecker#checkVolume} propagates the
* check to the delegate checker.
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testCheckOneVolume() throws Exception {
LOG.info("Executing {}", testName.getMethodName());
final FsVolumeSpi volume = makeVolumes(1, expectedVolumeHealth).get(0);
final DatasetVolumeChecker checker = new DatasetVolumeChecker(new HdfsConfiguration(), new FakeTimer());
checker.setDelegateChecker(new DummyChecker());
final AtomicLong numCallbackInvocations = new AtomicLong(0);
/**
* Request a check and ensure it triggered {@link FsVolumeSpi#check}.
*/
boolean result = checker.checkVolume(volume, new DatasetVolumeChecker.Callback() {
@Override
public void call(Set<FsVolumeSpi> healthyVolumes, Set<FsVolumeSpi> failedVolumes) {
numCallbackInvocations.incrementAndGet();
if (expectedVolumeHealth != null && expectedVolumeHealth != FAILED) {
assertThat(healthyVolumes.size(), is(1));
assertThat(failedVolumes.size(), is(0));
} else {
assertThat(healthyVolumes.size(), is(0));
assertThat(failedVolumes.size(), is(1));
}
}
});
// Ensure that the check was invoked at least once.
verify(volume, times(1)).check(anyObject());
if (result) {
assertThat(numCallbackInvocations.get(), is(1L));
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestFsDatasetImpl method testDeletingBlocks.
@Test
public void testDeletingBlocks() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
ds.addBlockPool(BLOCKPOOL, conf);
FsVolumeImpl vol;
try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
vol = (FsVolumeImpl) volumes.get(0);
}
ExtendedBlock eb;
ReplicaInfo info;
List<Block> blockList = new ArrayList<>();
for (int i = 1; i <= 63; i++) {
eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
blockList.add(eb.getLocalBlock());
}
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
blockList.clear();
eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
blockList.add(eb.getLocalBlock());
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestFsDatasetImpl method testReportBadBlocks.
@Test(timeout = 30000)
public void testReportBadBlocks() throws Exception {
boolean threwException = false;
MiniDFSCluster cluster = null;
try {
Configuration config = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
DataNode dataNode = cluster.getDataNodes().get(0);
ExtendedBlock block = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), 0);
try {
// Test the reportBadBlocks when the volume is null
dataNode.reportBadBlocks(block);
} catch (NullPointerException npe) {
threwException = true;
}
Thread.sleep(3000);
Assert.assertFalse(threwException);
Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("testData");
DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0);
block = DFSTestUtil.getFirstBlock(fs, filePath);
// Test for the overloaded method reportBadBlocks
dataNode.reportBadBlocks(block, dataNode.getFSDataset().getFsVolumeReferences().get(0));
Thread.sleep(3000);
BlockManagerTestUtil.updateState(cluster.getNamesystem().getBlockManager());
// Verify the bad block has been reported to namenode
Assert.assertEquals(1, cluster.getNamesystem().getCorruptReplicaBlocks());
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestStorageLocationChecker method testFailedLocationsBelowThreshold.
/**
* Test handling when the number of failed locations is below the
* max volume failure threshold.
*
* @throws Exception
*/
@Test(timeout = 30000)
public void testFailedLocationsBelowThreshold() throws Exception {
final List<StorageLocation> locations = // 2 healthy, 1 failed.
makeMockLocations(HEALTHY, HEALTHY, FAILED);
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
List<StorageLocation> filteredLocations = checker.check(conf, locations);
assertThat(filteredLocations.size(), is(2));
}
use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.
the class TestStorageLocationChecker method testFailedLocationsAboveThreshold.
/**
* Test handling when the number of volume failures tolerated is the
* same as the number of volumes.
*
* @throws Exception
*/
@Test(timeout = 30000)
public void testFailedLocationsAboveThreshold() throws Exception {
final List<StorageLocation> locations = // 1 healthy, 2 failed.
makeMockLocations(HEALTHY, FAILED, FAILED);
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
thrown.expect(IOException.class);
thrown.expectMessage("Too many failed volumes - current valid volumes: 1," + " volumes configured: 3, volumes failed: 2, volume failures" + " tolerated: 1");
StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
checker.check(conf, locations);
}
Aggregations