Search in sources :

Example 21 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestStorageLocationChecker method makeSlowLocations.

/**
   * Return a list of storage locations - one per argument - whose check()
   * method takes at least the specified number of milliseconds to complete.
   */
private List<StorageLocation> makeSlowLocations(long... args) throws IOException {
    final List<StorageLocation> locations = new ArrayList<>(args.length);
    final AtomicInteger index = new AtomicInteger(0);
    for (final long checkDelayMs : args) {
        final StorageLocation location = mock(StorageLocation.class);
        when(location.toString()).thenReturn("/" + index.incrementAndGet());
        when(location.check(any(StorageLocation.CheckContext.class))).thenAnswer(new Answer<VolumeCheckResult>() {

            @Override
            public VolumeCheckResult answer(InvocationOnMock invocation) throws Throwable {
                Thread.sleep(checkDelayMs);
                return VolumeCheckResult.HEALTHY;
            }
        });
        locations.add(location);
    }
    return locations;
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ArrayList(java.util.ArrayList) VolumeCheckResult(org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation)

Example 22 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestStorageLocationChecker method makeMockLocations.

/**
   * Return a list of storage locations - one per argument - which return
   * health check results corresponding to the supplied arguments.
   */
private List<StorageLocation> makeMockLocations(VolumeCheckResult... args) throws IOException {
    final List<StorageLocation> locations = new ArrayList<>(args.length);
    final AtomicInteger index = new AtomicInteger(0);
    for (VolumeCheckResult result : args) {
        final StorageLocation location = mock(StorageLocation.class);
        when(location.toString()).thenReturn("/" + index.incrementAndGet());
        when(location.check(any(StorageLocation.CheckContext.class))).thenReturn(result);
        locations.add(location);
    }
    return locations;
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ArrayList(java.util.ArrayList) VolumeCheckResult(org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation)

Example 23 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestStorageLocationChecker method testBadConfiguration.

/**
   * Test handling all storage locations are failed.
   *
   * @throws Exception
   */
@Test(timeout = 30000)
public void testBadConfiguration() throws Exception {
    final List<StorageLocation> locations = makeMockLocations(HEALTHY, HEALTHY, HEALTHY);
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 3);
    thrown.expect(IOException.class);
    thrown.expectMessage("Invalid value configured");
    StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
    checker.check(conf, locations);
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FakeTimer(org.apache.hadoop.util.FakeTimer) Test(org.junit.Test)

Example 24 with StorageLocation

use of org.apache.hadoop.hdfs.server.datanode.StorageLocation in project hadoop by apache.

the class TestDFSAdmin method testDataNodeGetReconfigurationStatus.

/**
   * Test reconfiguration and check the status outputs.
   * @param expectedSuccuss set true if the reconfiguration task should success.
   * @throws IOException
   * @throws InterruptedException
   * @throws TimeoutException
   */
private void testDataNodeGetReconfigurationStatus(boolean expectedSuccuss) throws IOException, InterruptedException, TimeoutException {
    ReconfigurationUtil ru = mock(ReconfigurationUtil.class);
    datanode.setReconfigurationUtil(ru);
    List<ReconfigurationUtil.PropertyChange> changes = new ArrayList<>();
    File newDir = new File(cluster.getDataDirectory(), "data_new");
    if (expectedSuccuss) {
        newDir.mkdirs();
    } else {
        // Inject failure.
        newDir.createNewFile();
    }
    changes.add(new ReconfigurationUtil.PropertyChange(DFS_DATANODE_DATA_DIR_KEY, newDir.toString(), datanode.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
    changes.add(new ReconfigurationUtil.PropertyChange("randomKey", "new123", "old456"));
    when(ru.parseChangedProperties(any(Configuration.class), any(Configuration.class))).thenReturn(changes);
    final int port = datanode.getIpcPort();
    final String address = "localhost:" + port;
    assertThat(admin.startReconfiguration("datanode", address), is(0));
    final List<String> outs = Lists.newArrayList();
    final List<String> errs = Lists.newArrayList();
    awaitReconfigurationFinished("datanode", address, outs, errs);
    if (expectedSuccuss) {
        assertThat(outs.size(), is(4));
    } else {
        assertThat(outs.size(), is(6));
    }
    List<StorageLocation> locations = DataNode.getStorageLocations(datanode.getConf());
    if (expectedSuccuss) {
        assertThat(locations.size(), is(1));
        assertThat(new File(locations.get(0).getUri()), is(newDir));
        // Verify the directory is appropriately formatted.
        assertTrue(new File(newDir, Storage.STORAGE_DIR_CURRENT).isDirectory());
    } else {
        assertTrue(locations.isEmpty());
    }
    int offset = 1;
    if (expectedSuccuss) {
        assertThat(outs.get(offset), containsString("SUCCESS: Changed property " + DFS_DATANODE_DATA_DIR_KEY));
    } else {
        assertThat(outs.get(offset), containsString("FAILED: Change property " + DFS_DATANODE_DATA_DIR_KEY));
    }
    assertThat(outs.get(offset + 1), is(allOf(containsString("From:"), containsString("data1"), containsString("data2"))));
    assertThat(outs.get(offset + 2), is(not(anyOf(containsString("data1"), containsString("data2")))));
    assertThat(outs.get(offset + 2), is(allOf(containsString("To"), containsString("data_new"))));
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ReconfigurationUtil(org.apache.hadoop.conf.ReconfigurationUtil) ArrayList(java.util.ArrayList) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File)

Aggregations

StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)24 Test (org.junit.Test)11 File (java.io.File)7 ArrayList (java.util.ArrayList)7 Configuration (org.apache.hadoop.conf.Configuration)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Storage (org.apache.hadoop.hdfs.server.common.Storage)6 DataStorage (org.apache.hadoop.hdfs.server.datanode.DataStorage)6 FakeTimer (org.apache.hadoop.util.FakeTimer)6 HashSet (java.util.HashSet)5 Matchers.anyString (org.mockito.Matchers.anyString)5 IOException (java.io.IOException)4 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)4 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)4 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)4 Path (org.apache.hadoop.fs.Path)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)3 MultipleIOException (org.apache.hadoop.io.MultipleIOException)3 FileOutputStream (java.io.FileOutputStream)2