Search in sources :

Example 1 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestDistributedFileSystem method testLocatedFileStatusStorageIdsTypes.

@Test(timeout = 120000)
public void testLocatedFileStatusStorageIdsTypes() throws Exception {
    final Configuration conf = getTestConfiguration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    try {
        final DistributedFileSystem fs = cluster.getFileSystem();
        final Path testFile = new Path("/testListLocatedStatus");
        final int blockSize = 4096;
        final int numBlocks = 10;
        // Create a test file
        final int repl = 2;
        DFSTestUtil.createFile(fs, testFile, blockSize, numBlocks * blockSize, blockSize, (short) repl, 0xADDED);
        DFSTestUtil.waitForReplication(fs, testFile, (short) repl, 30000);
        // Get the listing
        RemoteIterator<LocatedFileStatus> it = fs.listLocatedStatus(testFile);
        assertTrue("Expected file to be present", it.hasNext());
        LocatedFileStatus stat = it.next();
        BlockLocation[] locs = stat.getBlockLocations();
        assertEquals("Unexpected number of locations", numBlocks, locs.length);
        Set<String> dnStorageIds = new HashSet<>();
        for (DataNode d : cluster.getDataNodes()) {
            try (FsDatasetSpi.FsVolumeReferences volumes = d.getFSDataset().getFsVolumeReferences()) {
                for (FsVolumeSpi vol : volumes) {
                    dnStorageIds.add(vol.getStorageID());
                }
            }
        }
        for (BlockLocation loc : locs) {
            String[] ids = loc.getStorageIds();
            // Run it through a set to deduplicate, since there should be no dupes
            Set<String> storageIds = new HashSet<>();
            Collections.addAll(storageIds, ids);
            assertEquals("Unexpected num storage ids", repl, storageIds.size());
            // Make sure these are all valid storage IDs
            assertTrue("Unknown storage IDs found!", dnStorageIds.containsAll(storageIds));
            // Check storage types are the default, since we didn't set any
            StorageType[] types = loc.getStorageTypes();
            assertEquals("Unexpected num storage types", repl, types.length);
            for (StorageType t : types) {
                assertEquals("Unexpected storage type", StorageType.DEFAULT, t);
            }
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) BlockLocation(org.apache.hadoop.fs.BlockLocation) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 2 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class BlockScanner method addVolumeScanner.

/**
  * Set up a scanner for the given block pool and volume.
  *
  * @param ref              A reference to the volume.
  */
public synchronized void addVolumeScanner(FsVolumeReference ref) {
    boolean success = false;
    try {
        FsVolumeSpi volume = ref.getVolume();
        if (!isEnabled()) {
            LOG.debug("Not adding volume scanner for {}, because the block " + "scanner is disabled.", volume);
            return;
        }
        VolumeScanner scanner = scanners.get(volume.getStorageID());
        if (scanner != null) {
            LOG.error("Already have a scanner for volume {}.", volume);
            return;
        }
        LOG.debug("Adding scanner for volume {} (StorageID {})", volume, volume.getStorageID());
        scanner = new VolumeScanner(conf, datanode, ref);
        scanner.start();
        scanners.put(volume.getStorageID(), scanner);
        success = true;
    } finally {
        if (!success) {
            // If we didn't create a new VolumeScanner object, we don't
            // need this reference to the volume.
            IOUtils.cleanup(null, ref);
        }
    }
}
Also used : FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)

Example 3 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestDataNodeHotSwapVolumes method testAddVolumeFailures.

@Test
public void testAddVolumeFailures() throws IOException {
    startDFSCluster(1, 1);
    final String dataDir = cluster.getDataDirectory();
    DataNode dn = cluster.getDataNodes().get(0);
    List<String> newDirs = Lists.newArrayList();
    final int NUM_NEW_DIRS = 4;
    for (int i = 0; i < NUM_NEW_DIRS; i++) {
        File newVolume = new File(dataDir, "new_vol" + i);
        newDirs.add(newVolume.toString());
        if (i % 2 == 0) {
            // Make addVolume() fail.
            newVolume.createNewFile();
        }
    }
    String newValue = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY) + "," + Joiner.on(",").join(newDirs);
    try {
        dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, newValue);
        fail("Expect to throw IOException.");
    } catch (ReconfigurationException e) {
        String errorMessage = e.getCause().getMessage();
        String[] messages = errorMessage.split("\\r?\\n");
        assertEquals(2, messages.length);
        assertThat(messages[0], containsString("new_vol0"));
        assertThat(messages[1], containsString("new_vol2"));
    }
    // Make sure that vol0 and vol2's metadata are not left in memory.
    FsDatasetSpi<?> dataset = dn.getFSDataset();
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        for (FsVolumeSpi volume : volumes) {
            assertThat(new File(volume.getStorageLocation().getUri()).toString(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
        }
    }
    DataStorage storage = dn.getStorage();
    for (int i = 0; i < storage.getNumStorageDirs(); i++) {
        Storage.StorageDirectory sd = storage.getStorageDir(i);
        assertThat(sd.getRoot().toString(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
    }
    // The newly effective conf does not have vol0 and vol2.
    String[] effectiveVolumes = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY).split(",");
    assertEquals(4, effectiveVolumes.length);
    for (String ev : effectiveVolumes) {
        assertThat(new File(StorageLocation.parse(ev).getUri()).getCanonicalPath(), is(not(anyOf(is(newDirs.get(0)), is(newDirs.get(2))))));
    }
}
Also used : FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Matchers.anyString(org.mockito.Matchers.anyString) Storage(org.apache.hadoop.hdfs.server.common.Storage) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ReconfigurationException(org.apache.hadoop.conf.ReconfigurationException) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) File(java.io.File) Test(org.junit.Test)

Example 4 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestDataNodeVolumeMetrics method verifyDataNodeVolumeMetrics.

private void verifyDataNodeVolumeMetrics(final FileSystem fs, final MiniDFSCluster cluster, final Path fileName) throws IOException {
    List<DataNode> datanodes = cluster.getDataNodes();
    DataNode datanode = datanodes.get(0);
    final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    final FsVolumeSpi volume = datanode.getFSDataset().getVolume(block);
    DataNodeVolumeMetrics metrics = volume.getMetrics();
    MetricsRecordBuilder rb = getMetrics(volume.getMetrics().name());
    assertCounter("TotalDataFileIos", metrics.getTotalDataFileIos(), rb);
    LOG.info("TotalMetadataOperations : " + metrics.getTotalMetadataOperations());
    LOG.info("TotalDataFileIos : " + metrics.getTotalDataFileIos());
    LOG.info("TotalFileIoErrors : " + metrics.getTotalFileIoErrors());
    LOG.info("MetadataOperationSampleCount : " + metrics.getMetadataOperationSampleCount());
    LOG.info("MetadataOperationMean : " + metrics.getMetadataOperationMean());
    LOG.info("MetadataFileIoStdDev : " + metrics.getMetadataOperationStdDev());
    LOG.info("DataFileIoSampleCount : " + metrics.getDataFileIoSampleCount());
    LOG.info("DataFileIoMean : " + metrics.getDataFileIoMean());
    LOG.info("DataFileIoStdDev : " + metrics.getDataFileIoStdDev());
    LOG.info("flushIoSampleCount : " + metrics.getFlushIoSampleCount());
    LOG.info("flushIoMean : " + metrics.getFlushIoMean());
    LOG.info("flushIoStdDev : " + metrics.getFlushIoStdDev());
    LOG.info("syncIoSampleCount : " + metrics.getSyncIoSampleCount());
    LOG.info("syncIoMean : " + metrics.getSyncIoMean());
    LOG.info("syncIoStdDev : " + metrics.getSyncIoStdDev());
    LOG.info("readIoSampleCount : " + metrics.getReadIoMean());
    LOG.info("readIoMean : " + metrics.getReadIoMean());
    LOG.info("readIoStdDev : " + metrics.getReadIoStdDev());
    LOG.info("writeIoSampleCount : " + metrics.getWriteIoSampleCount());
    LOG.info("writeIoMean : " + metrics.getWriteIoMean());
    LOG.info("writeIoStdDev : " + metrics.getWriteIoStdDev());
    LOG.info("fileIoErrorSampleCount : " + metrics.getFileIoErrorSampleCount());
    LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean());
    LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) DataNodeVolumeMetrics(org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder)

Example 5 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestDataNodeVolumeFailure method testFailedVolumeBeingRemovedFromDataNode.

/**
   * Test that DataStorage and BlockPoolSliceStorage remove the failed volume
   * after failure.
   */
@Test(timeout = 150000)
public void testFailedVolumeBeingRemovedFromDataNode() throws Exception {
    // The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
    // volume failures which is currently not supported on Windows.
    assumeNotWindows();
    Path file1 = new Path("/test1");
    DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
    DFSTestUtil.waitReplication(fs, file1, (short) 2);
    File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
    DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
    DataNode dn0 = cluster.getDataNodes().get(0);
    DataNodeTestUtils.waitForDiskError(dn0, DataNodeTestUtils.getVolume(dn0, dn0Vol1));
    // Verify dn0Vol1 has been completely removed from DN0.
    // 1. dn0Vol1 is removed from DataStorage.
    DataStorage storage = dn0.getStorage();
    assertEquals(1, storage.getNumStorageDirs());
    for (int i = 0; i < storage.getNumStorageDirs(); i++) {
        Storage.StorageDirectory sd = storage.getStorageDir(i);
        assertFalse(sd.getRoot().getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()));
    }
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    BlockPoolSliceStorage bpsStorage = storage.getBPStorage(bpid);
    assertEquals(1, bpsStorage.getNumStorageDirs());
    for (int i = 0; i < bpsStorage.getNumStorageDirs(); i++) {
        Storage.StorageDirectory sd = bpsStorage.getStorageDir(i);
        assertFalse(sd.getRoot().getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()));
    }
    // 2. dn0Vol1 is removed from FsDataset
    FsDatasetSpi<? extends FsVolumeSpi> data = dn0.getFSDataset();
    try (FsDatasetSpi.FsVolumeReferences vols = data.getFsVolumeReferences()) {
        for (FsVolumeSpi volume : vols) {
            assertFalse(new File(volume.getStorageLocation().getUri()).getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()));
        }
    }
    // 3. all blocks on dn0Vol1 have been removed.
    for (ReplicaInfo replica : FsDatasetTestUtil.getReplicas(data, bpid)) {
        assertNotNull(replica.getVolume());
        assertFalse(new File(replica.getVolume().getStorageLocation().getUri()).getAbsolutePath().startsWith(dn0Vol1.getAbsolutePath()));
    }
    // 4. dn0Vol1 is not in DN0's configuration and dataDirs anymore.
    String[] dataDirStrs = dn0.getConf().get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
    assertEquals(1, dataDirStrs.length);
    assertFalse(dataDirStrs[0].contains(dn0Vol1.getAbsolutePath()));
}
Also used : Path(org.apache.hadoop.fs.Path) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) Storage(org.apache.hadoop.hdfs.server.common.Storage) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) File(java.io.File) Test(org.junit.Test)

Aggregations

FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)33 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)15 Test (org.junit.Test)10 IOException (java.io.IOException)8 File (java.io.File)7 HashSet (java.util.HashSet)7 Path (org.apache.hadoop.fs.Path)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)6 Configuration (org.apache.hadoop.conf.Configuration)5 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)5 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)4 ArrayList (java.util.ArrayList)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)3 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)3 FsVolumeImpl (org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl)3 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)3 HashMap (java.util.HashMap)2