Search in sources :

Example 11 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class DataNode method reportBadBlock.

private void reportBadBlock(final BPOfferService bpos, final ExtendedBlock block, final String msg) {
    FsVolumeSpi volume = getFSDataset().getVolume(block);
    if (volume == null) {
        LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
        return;
    }
    bpos.reportBadBlocks(block, volume.getStorageID(), volume.getStorageType());
    LOG.warn(msg);
}
Also used : FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)

Example 12 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class MiniDFSCluster method setDataNodeStorageCapacities.

private synchronized void setDataNodeStorageCapacities(final int curDnIdx, final DataNode curDn, long[][] storageCapacities) throws IOException {
    if (storageCapacities == null || storageCapacities.length == 0) {
        return;
    }
    try {
        waitDataNodeFullyStarted(curDn);
    } catch (TimeoutException | InterruptedException e) {
        throw new IOException(e);
    }
    try (FsDatasetSpi.FsVolumeReferences volumes = curDn.getFSDataset().getFsVolumeReferences()) {
        assert storageCapacities[curDnIdx].length == storagesPerDatanode;
        assert volumes.size() == storagesPerDatanode;
        int j = 0;
        for (FsVolumeSpi fvs : volumes) {
            FsVolumeImpl volume = (FsVolumeImpl) fvs;
            LOG.info("setCapacityForTesting " + storageCapacities[curDnIdx][j] + " for [" + volume.getStorageType() + "]" + volume.getStorageID());
            volume.setCapacityForTesting(storageCapacities[curDnIdx][j]);
            j++;
        }
    }
    DataNodeTestUtils.triggerHeartbeat(curDn);
}
Also used : FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException)

Example 13 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class LazyPersistTestCase method ensureLazyPersistBlocksAreSaved.

/**
   * Make sure at least one non-transient volume has a saved copy of the replica.
   * An infinite loop is used to ensure the async lazy persist tasks are completely
   * done before verification. Caller of ensureLazyPersistBlocksAreSaved expects
   * either a successful pass or timeout failure.
   */
protected final void ensureLazyPersistBlocksAreSaved(LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    final Set<Long> persistedBlockIds = new HashSet<Long>();
    try (FsDatasetSpi.FsVolumeReferences volumes = cluster.getDataNodes().get(0).getFSDataset().getFsVolumeReferences()) {
        while (persistedBlockIds.size() < locatedBlocks.getLocatedBlocks().size()) {
            // Take 1 second sleep before each verification iteration
            Thread.sleep(1000);
            for (LocatedBlock lb : locatedBlocks.getLocatedBlocks()) {
                for (FsVolumeSpi v : volumes) {
                    if (v.isTransientStorage()) {
                        continue;
                    }
                    FsVolumeImpl volume = (FsVolumeImpl) v;
                    File lazyPersistDir = volume.getBlockPoolSlice(bpid).getLazypersistDir();
                    long blockId = lb.getBlock().getBlockId();
                    File targetDir = DatanodeUtil.idToBlockDir(lazyPersistDir, blockId);
                    File blockFile = new File(targetDir, lb.getBlock().getBlockName());
                    if (blockFile.exists()) {
                        // Found a persisted copy for this block and added to the Set
                        persistedBlockIds.add(blockId);
                    }
                }
            }
        }
    }
    // We should have found a persisted copy for each located block.
    assertThat(persistedBlockIds.size(), is(locatedBlocks.getLocatedBlocks().size()));
}
Also used : FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) File(java.io.File) HashSet(java.util.HashSet)

Example 14 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class LazyPersistTestCase method verifyDeletedBlocks.

protected final boolean verifyDeletedBlocks(LocatedBlocks locatedBlocks) throws IOException, InterruptedException {
    LOG.info("Verifying replica has no saved copy after deletion.");
    triggerBlockReport();
    while (cluster.getFsDatasetTestUtils(0).getPendingAsyncDeletions() > 0L) {
        Thread.sleep(1000);
    }
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    final FsDatasetSpi<?> dataset = cluster.getDataNodes().get(0).getFSDataset();
    // transient volume or finalized dir of non-transient volume
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        for (FsVolumeSpi vol : volumes) {
            FsVolumeImpl volume = (FsVolumeImpl) vol;
            File targetDir = (volume.isTransientStorage()) ? volume.getBlockPoolSlice(bpid).getFinalizedDir() : volume.getBlockPoolSlice(bpid).getLazypersistDir();
            if (verifyBlockDeletedFromDir(targetDir, locatedBlocks) == false) {
                return false;
            }
        }
    }
    return true;
}
Also used : FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) File(java.io.File)

Example 15 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestFsDatasetImpl method testRemoveVolumes.

@Test(timeout = 30000)
public void testRemoveVolumes() throws IOException {
    // Feed FsDataset with block metadata.
    final int NUM_BLOCKS = 100;
    for (int i = 0; i < NUM_BLOCKS; i++) {
        String bpid = BLOCK_POOL_IDS[NUM_BLOCKS % BLOCK_POOL_IDS.length];
        ExtendedBlock eb = new ExtendedBlock(bpid, i);
        try (ReplicaHandler replica = dataset.createRbw(StorageType.DEFAULT, eb, false)) {
        }
    }
    final String[] dataDirs = conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY).split(",");
    final String volumePathToRemove = dataDirs[0];
    Set<StorageLocation> volumesToRemove = new HashSet<>();
    volumesToRemove.add(StorageLocation.parse(volumePathToRemove));
    FsVolumeReferences volReferences = dataset.getFsVolumeReferences();
    FsVolumeImpl volumeToRemove = null;
    for (FsVolumeSpi vol : volReferences) {
        if (vol.getStorageLocation().equals(volumesToRemove.iterator().next())) {
            volumeToRemove = (FsVolumeImpl) vol;
        }
    }
    assertTrue(volumeToRemove != null);
    volReferences.close();
    dataset.removeVolumes(volumesToRemove, true);
    int expectedNumVolumes = dataDirs.length - 1;
    assertEquals("The volume has been removed from the volumeList.", expectedNumVolumes, getNumVolumes());
    assertEquals("The volume has been removed from the storageMap.", expectedNumVolumes, dataset.storageMap.size());
    try {
        dataset.asyncDiskService.execute(volumeToRemove, new Runnable() {

            @Override
            public void run() {
            }
        });
        fail("Expect RuntimeException: the volume has been removed from the " + "AsyncDiskService.");
    } catch (RuntimeException e) {
        GenericTestUtils.assertExceptionContains("Cannot find volume", e);
    }
    int totalNumReplicas = 0;
    for (String bpid : dataset.volumeMap.getBlockPoolList()) {
        totalNumReplicas += dataset.volumeMap.size(bpid);
    }
    assertEquals("The replica infos on this volume has been removed from the " + "volumeMap.", NUM_BLOCKS / NUM_INIT_VOLUMES, totalNumReplicas);
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) Matchers.anyString(org.mockito.Matchers.anyString) ReplicaHandler(org.apache.hadoop.hdfs.server.datanode.ReplicaHandler) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)33 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)15 Test (org.junit.Test)10 IOException (java.io.IOException)8 File (java.io.File)7 HashSet (java.util.HashSet)7 Path (org.apache.hadoop.fs.Path)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)6 Configuration (org.apache.hadoop.conf.Configuration)5 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)5 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)4 ArrayList (java.util.ArrayList)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)3 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)3 FsVolumeImpl (org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl)3 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)3 HashMap (java.util.HashMap)2