use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestStorageLocationChecker method makeSlowLocations.
/**
* Return a list of storage locations - one per argument - whose check()
* method takes at least the specified number of milliseconds to complete.
*/
private List<StorageLocation> makeSlowLocations(long... args) throws IOException {
final List<StorageLocation> locations = new ArrayList<>(args.length);
final AtomicInteger index = new AtomicInteger(0);
for (final long checkDelayMs : args) {
final StorageLocation location = mock(StorageLocation.class);
when(location.toString()).thenReturn("/" + index.incrementAndGet());
when(location.check(any(StorageLocation.CheckContext.class))).thenAnswer(new Answer<VolumeCheckResult>() {
@Override
public VolumeCheckResult answer(InvocationOnMock invocation) throws Throwable {
Thread.sleep(checkDelayMs);
return VolumeCheckResult.HEALTHY;
}
});
locations.add(location);
}
return locations;
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestStorageLocationChecker method makeMockLocations.
/**
* Return a list of storage locations - one per argument - which return
* health check results corresponding to the supplied arguments.
*/
private List<StorageLocation> makeMockLocations(VolumeCheckResult... args) throws IOException {
final List<StorageLocation> locations = new ArrayList<>(args.length);
final AtomicInteger index = new AtomicInteger(0);
for (VolumeCheckResult result : args) {
final StorageLocation location = mock(StorageLocation.class);
when(location.toString()).thenReturn("/" + index.incrementAndGet());
when(location.check(any(StorageLocation.CheckContext.class))).thenReturn(result);
locations.add(location);
}
return locations;
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestStorageLocationChecker method testBadConfiguration.
/**
* Test handling all storage locations are failed.
*
* @throws Exception
*/
@Test(timeout = 30000)
public void testBadConfiguration() throws Exception {
final List<StorageLocation> locations = makeMockLocations(HEALTHY, HEALTHY, HEALTHY);
final Configuration conf = new HdfsConfiguration();
conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 3);
thrown.expect(IOException.class);
thrown.expectMessage("Invalid value configured");
StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
checker.check(conf, locations);
}
use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.
the class TestDFSAdmin method testDataNodeGetReconfigurationStatus.
/**
* Test reconfiguration and check the status outputs.
* @param expectedSuccuss set true if the reconfiguration task should success.
* @throws IOException
* @throws InterruptedException
* @throws TimeoutException
*/
private void testDataNodeGetReconfigurationStatus(boolean expectedSuccuss) throws IOException, InterruptedException, TimeoutException {
ReconfigurationUtil ru = mock(ReconfigurationUtil.class);
datanode.setReconfigurationUtil(ru);
List<ReconfigurationUtil.PropertyChange> changes = new ArrayList<>();
File newDir = new File(cluster.getDataDirectory(), "data_new");
if (expectedSuccuss) {
newDir.mkdirs();
} else {
// Inject failure.
newDir.createNewFile();
}
changes.add(new ReconfigurationUtil.PropertyChange(DFS_DATANODE_DATA_DIR_KEY, newDir.toString(), datanode.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
changes.add(new ReconfigurationUtil.PropertyChange("randomKey", "new123", "old456"));
when(ru.parseChangedProperties(any(Configuration.class), any(Configuration.class))).thenReturn(changes);
final int port = datanode.getIpcPort();
final String address = "localhost:" + port;
assertThat(admin.startReconfiguration("datanode", address), is(0));
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
awaitReconfigurationFinished("datanode", address, outs, errs);
if (expectedSuccuss) {
assertThat(outs.size(), is(4));
} else {
assertThat(outs.size(), is(6));
}
List<StorageLocation> locations = DataNode.getStorageLocations(datanode.getConf());
if (expectedSuccuss) {
assertThat(locations.size(), is(1));
assertThat(new File(locations.get(0).getUri()), is(newDir));
// Verify the directory is appropriately formatted.
assertTrue(new File(newDir, Storage.STORAGE_DIR_CURRENT).isDirectory());
} else {
assertTrue(locations.isEmpty());
}
int offset = 1;
if (expectedSuccuss) {
assertThat(outs.get(offset), containsString("SUCCESS: Changed property " + DFS_DATANODE_DATA_DIR_KEY));
} else {
assertThat(outs.get(offset), containsString("FAILED: Change property " + DFS_DATANODE_DATA_DIR_KEY));
}
assertThat(outs.get(offset + 1), is(allOf(containsString("From:"), containsString("data1"), containsString("data2"))));
assertThat(outs.get(offset + 2), is(not(anyOf(containsString("data1"), containsString("data2")))));
assertThat(outs.get(offset + 2), is(allOf(containsString("To"), containsString("data_new"))));
}
Aggregations