Search in sources :

Example 26 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestDataNodeHotSwapVolumes method testReplicatingAfterRemoveVolume.

@Test(timeout = 60000)
public void testReplicatingAfterRemoveVolume() throws InterruptedException, TimeoutException, IOException, ReconfigurationException {
    startDFSCluster(1, 2);
    final FileSystem fs = cluster.getFileSystem();
    final short replFactor = 2;
    Path testFile = new Path("/test");
    createFile(testFile, 4, replFactor);
    DataNode dn = cluster.getDataNodes().get(0);
    Collection<String> oldDirs = getDataDirs(dn);
    // Findout the storage with block and remove it
    ExtendedBlock block = DFSTestUtil.getAllBlocks(fs, testFile).get(1).getBlock();
    FsVolumeSpi volumeWithBlock = dn.getFSDataset().getVolume(block);
    String dirWithBlock = "[" + volumeWithBlock.getStorageType() + "]" + volumeWithBlock.getStorageLocation().getUri();
    String newDirs = dirWithBlock;
    for (String dir : oldDirs) {
        if (dirWithBlock.startsWith(dir)) {
            continue;
        }
        newDirs = dir;
        break;
    }
    assertThat("DN did not update its own config", dn.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs), is(dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY)));
    oldDirs.remove(newDirs);
    assertFileLocksReleased(oldDirs);
    triggerDeleteReport(dn);
    waitReplication(fs, testFile, 1, 1);
    DFSTestUtil.waitReplication(fs, testFile, replFactor);
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) Matchers.anyString(org.mockito.Matchers.anyString) Test(org.junit.Test)

Example 27 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestBlockScanner method testVolumeIteratorImpl.

/**
   * Test iterating through a bunch of blocks in a volume using a volume
   * iterator.<p/>
   *
   * We will rewind the iterator when about halfway through the blocks.
   *
   * @param numFiles        The number of files to create.
   * @param maxStaleness    The maximum staleness to allow with the iterator.
   * @throws Exception
   */
private void testVolumeIteratorImpl(int numFiles, long maxStaleness) throws Exception {
    Configuration conf = new Configuration();
    disableBlockScanner(conf);
    TestContext ctx = new TestContext(conf, 1);
    ctx.createFiles(0, numFiles, 1);
    assertEquals(1, ctx.volumes.size());
    FsVolumeSpi volume = ctx.volumes.get(0);
    ExtendedBlock savedBlock = null, loadedBlock = null;
    boolean testedRewind = false, testedSave = false, testedLoad = false;
    int blocksProcessed = 0, savedBlocksProcessed = 0;
    try {
        List<BPOfferService> bpos = ctx.datanode.getAllBpOs();
        assertEquals(1, bpos.size());
        BlockIterator iter = volume.newBlockIterator(ctx.bpids[0], "test");
        assertEquals(ctx.bpids[0], iter.getBlockPoolId());
        iter.setMaxStalenessMs(maxStaleness);
        while (true) {
            HashSet<ExtendedBlock> blocks = new HashSet<ExtendedBlock>();
            for (int blockIdx = 0; blockIdx < numFiles; blockIdx++) {
                blocks.add(ctx.getFileBlock(0, blockIdx));
            }
            while (true) {
                ExtendedBlock block = iter.nextBlock();
                if (block == null) {
                    break;
                }
                blocksProcessed++;
                LOG.info("BlockIterator for {} found block {}, blocksProcessed = {}", volume, block, blocksProcessed);
                if (testedSave && (savedBlock == null)) {
                    savedBlock = block;
                }
                if (testedLoad && (loadedBlock == null)) {
                    loadedBlock = block;
                    // The block that we get back right after loading the iterator
                    // should be the same block we got back right after saving
                    // the iterator.
                    assertEquals(savedBlock, loadedBlock);
                }
                boolean blockRemoved = blocks.remove(block);
                assertTrue("Found unknown block " + block, blockRemoved);
                if (blocksProcessed > (numFiles / 3)) {
                    if (!testedSave) {
                        LOG.info("Processed {} blocks out of {}.  Saving iterator.", blocksProcessed, numFiles);
                        iter.save();
                        testedSave = true;
                        savedBlocksProcessed = blocksProcessed;
                    }
                }
                if (blocksProcessed > (numFiles / 2)) {
                    if (!testedRewind) {
                        LOG.info("Processed {} blocks out of {}.  Rewinding iterator.", blocksProcessed, numFiles);
                        iter.rewind();
                        break;
                    }
                }
                if (blocksProcessed > ((2 * numFiles) / 3)) {
                    if (!testedLoad) {
                        LOG.info("Processed {} blocks out of {}.  Loading iterator.", blocksProcessed, numFiles);
                        iter = volume.loadBlockIterator(ctx.bpids[0], "test");
                        iter.setMaxStalenessMs(maxStaleness);
                        break;
                    }
                }
            }
            if (!testedRewind) {
                testedRewind = true;
                blocksProcessed = 0;
                LOG.info("Starting again at the beginning...");
                continue;
            }
            if (!testedLoad) {
                testedLoad = true;
                blocksProcessed = savedBlocksProcessed;
                LOG.info("Starting again at the load point...");
                continue;
            }
            assertEquals(numFiles, blocksProcessed);
            break;
        }
    } finally {
        ctx.close();
    }
}
Also used : BlockIterator(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.BlockIterator) Configuration(org.apache.hadoop.conf.Configuration) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HashSet(java.util.HashSet)

Example 28 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestDirectoryScanner method duplicateBlock.

/**
   * Duplicate the given block on all volumes.
   * @param blockId
   * @throws IOException
   */
private void duplicateBlock(long blockId) throws IOException {
    try (AutoCloseableLock lock = fds.acquireDatasetLock()) {
        ReplicaInfo b = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
        try (FsDatasetSpi.FsVolumeReferences volumes = fds.getFsVolumeReferences()) {
            for (FsVolumeSpi v : volumes) {
                if (v.getStorageID().equals(b.getVolume().getStorageID())) {
                    continue;
                }
                // Volume without a copy of the block. Make a copy now.
                File sourceBlock = new File(b.getBlockURI());
                File sourceMeta = new File(b.getMetadataURI());
                URI sourceRoot = b.getVolume().getStorageLocation().getUri();
                URI destRoot = v.getStorageLocation().getUri();
                String relativeBlockPath = sourceRoot.relativize(sourceBlock.toURI()).getPath();
                String relativeMetaPath = sourceRoot.relativize(sourceMeta.toURI()).getPath();
                File destBlock = new File(new File(destRoot).toString(), relativeBlockPath);
                File destMeta = new File(new File(destRoot).toString(), relativeMetaPath);
                destBlock.getParentFile().mkdirs();
                FileUtils.copyFile(sourceBlock, destBlock);
                FileUtils.copyFile(sourceMeta, destMeta);
                if (destBlock.exists() && destMeta.exists()) {
                    LOG.info("Copied " + sourceBlock + " ==> " + destBlock);
                    LOG.info("Copied " + sourceMeta + " ==> " + destMeta);
                }
            }
        }
    }
}
Also used : FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) File(java.io.File) URI(java.net.URI)

Example 29 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestDirectoryScanner method testExceptionHandlingWhileDirectoryScan.

/**
   * Test the behavior of exception handling during directory scan operation.
   * Directory scanner shouldn't abort the scan on every directory just because
   * one had an error.
   */
@Test(timeout = 60000)
public void testExceptionHandlingWhileDirectoryScan() throws Exception {
    cluster = new MiniDFSCluster.Builder(CONF).build();
    try {
        cluster.waitActive();
        bpid = cluster.getNamesystem().getBlockPoolId();
        fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
        client = cluster.getFileSystem().getClient();
        CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
        DataNode dataNode = cluster.getDataNodes().get(0);
        // Add files with 2 blocks
        createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 2, false);
        // Inject error on #getFinalizedDir() so that ReportCompiler#call() will
        // hit exception while preparing the block info report list.
        List<FsVolumeSpi> volumes = new ArrayList<>();
        Iterator<FsVolumeSpi> iterator = fds.getFsVolumeReferences().iterator();
        while (iterator.hasNext()) {
            FsVolumeImpl volume = (FsVolumeImpl) iterator.next();
            FsVolumeImpl spy = Mockito.spy(volume);
            Mockito.doThrow(new IOException("Error while getFinalizedDir")).when(spy).getFinalizedDir(volume.getBlockPoolList()[0]);
            volumes.add(spy);
        }
        FsVolumeReferences volReferences = new FsVolumeReferences(volumes);
        FsDatasetSpi<? extends FsVolumeSpi> spyFds = Mockito.spy(fds);
        Mockito.doReturn(volReferences).when(spyFds).getFsVolumeReferences();
        scanner = new DirectoryScanner(dataNode, spyFds, CONF);
        scanner.setRetainDiffs(true);
        scanner.reconcile();
    } finally {
        if (scanner != null) {
            scanner.shutdown();
            scanner = null;
        }
        cluster.shutdown();
    }
}
Also used : FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) ArrayList(java.util.ArrayList) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) IOException(java.io.IOException) Test(org.junit.Test)

Example 30 with FsVolumeSpi

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi in project hadoop by apache.

the class TestIncrementalBrVariations method verifyIncrementalBlockReports.

public void verifyIncrementalBlockReports(boolean splitReports) throws IOException {
    // Get the block list for the file with the block locations.
    LocatedBlocks blocks = createFileGetBlocks(GenericTestUtils.getMethodName());
    try (FsDatasetSpi.FsVolumeReferences volumes = dn0.getFSDataset().getFsVolumeReferences()) {
        // We will send 'fake' incremental block reports to the NN that look
        // like they originated from DN 0.
        StorageReceivedDeletedBlocks[] reports = new StorageReceivedDeletedBlocks[volumes.size()];
        // Lie to the NN that one block on each storage has been deleted.
        for (int i = 0; i < reports.length; ++i) {
            FsVolumeSpi volume = volumes.get(i);
            boolean foundBlockOnStorage = false;
            ReceivedDeletedBlockInfo[] rdbi = new ReceivedDeletedBlockInfo[1];
            // report.
            for (LocatedBlock block : blocks.getLocatedBlocks()) {
                if (block.getStorageIDs()[0].equals(volume.getStorageID())) {
                    rdbi[0] = new ReceivedDeletedBlockInfo(block.getBlock().getLocalBlock(), ReceivedDeletedBlockInfo.BlockStatus.DELETED_BLOCK, null);
                    foundBlockOnStorage = true;
                    break;
                }
            }
            assertTrue(foundBlockOnStorage);
            reports[i] = new StorageReceivedDeletedBlocks(new DatanodeStorage(volume.getStorageID()), rdbi);
            if (splitReports) {
                // If we are splitting reports then send the report for this storage now.
                StorageReceivedDeletedBlocks[] singletonReport = { reports[i] };
                cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, singletonReport);
            }
        }
        if (!splitReports) {
            // Send a combined report.
            cluster.getNameNodeRpc().blockReceivedAndDeleted(dn0Reg, poolId, reports);
        }
        // Make sure that the deleted block from each storage was picked up
        // by the NameNode.  IBRs are async, make sure the NN processes
        // all of them.
        cluster.getNamesystem().getBlockManager().flushBlockOps();
        assertThat(cluster.getNamesystem().getMissingBlocksCount(), is((long) reports.length));
    }
}
Also used : FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)

Aggregations

FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)33 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)15 Test (org.junit.Test)10 IOException (java.io.IOException)8 File (java.io.File)7 HashSet (java.util.HashSet)7 Path (org.apache.hadoop.fs.Path)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)6 Configuration (org.apache.hadoop.conf.Configuration)5 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)5 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)4 ArrayList (java.util.ArrayList)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)3 FsVolumeReference (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference)3 FsVolumeImpl (org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl)3 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)3 HashMap (java.util.HashMap)2