Search in sources :

Example 16 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class BlockReportTestBase method testInterleavedBlockReports.

// See HDFS-10301
@Test(timeout = 300000)
public void testInterleavedBlockReports() throws IOException, ExecutionException, InterruptedException {
    int numConcurrentBlockReports = 3;
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    final String poolId = cluster.getNamesystem().getBlockPoolId();
    LOG.info("Block pool id: " + poolId);
    final DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    final StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
    // Get the list of storage ids associated with the datanode
    // before the test
    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
    final DatanodeDescriptor dnDescriptor = bm.getDatanodeManager().getDatanode(dn.getDatanodeId());
    DatanodeStorageInfo[] storageInfos = dnDescriptor.getStorageInfos();
    // Send the block report concurrently using
    // numThreads=numConcurrentBlockReports
    ExecutorService executorService = Executors.newFixedThreadPool(numConcurrentBlockReports);
    List<Future<Void>> futureList = new ArrayList<>(numConcurrentBlockReports);
    for (int i = 0; i < numConcurrentBlockReports; i++) {
        futureList.add(executorService.submit(new Callable<Void>() {

            @Override
            public Void call() throws IOException {
                sendBlockReports(dnR, poolId, reports);
                return null;
            }
        }));
    }
    for (Future<Void> future : futureList) {
        future.get();
    }
    executorService.shutdown();
    // Verify that the storages match before and after the test
    Assert.assertArrayEquals(storageInfos, dnDescriptor.getStorageInfos());
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) ArrayList(java.util.ArrayList) Callable(java.util.concurrent.Callable) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ExecutorService(java.util.concurrent.ExecutorService) Future(java.util.concurrent.Future) Test(org.junit.Test)

Example 17 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestFSNamesystem method testReplQueuesActiveAfterStartupSafemode.

@Test
public void testReplQueuesActiveAfterStartupSafemode() throws IOException, InterruptedException {
    Configuration conf = new Configuration();
    FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
    FSImage fsImage = Mockito.mock(FSImage.class);
    Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
    FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
    FSNamesystem fsn = Mockito.spy(fsNamesystem);
    BlockManager bm = fsn.getBlockManager();
    Whitebox.setInternalState(bm, "namesystem", fsn);
    //Make shouldPopulaeReplQueues return true
    HAContext haContext = Mockito.mock(HAContext.class);
    HAState haState = Mockito.mock(HAState.class);
    Mockito.when(haContext.getState()).thenReturn(haState);
    Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
    Mockito.when(fsn.getHAContext()).thenReturn(haContext);
    //Make NameNode.getNameNodeMetrics() not return null
    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
    fsn.enterSafeMode(false);
    assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
    assertTrue("Replication queues were being populated during very first " + "safemode", !bm.isPopulatingReplQueues());
    fsn.leaveSafeMode(false);
    assertTrue("FSNamesystem didn't leave safemode", !fsn.isInSafeMode());
    assertTrue("Replication queues weren't being populated even after leaving " + "safemode", bm.isPopulatingReplQueues());
    fsn.enterSafeMode(false);
    assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
    assertTrue("Replication queues weren't being populated after entering " + "safemode 2nd time", bm.isPopulatingReplQueues());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) HAState(org.apache.hadoop.hdfs.server.namenode.ha.HAState) HAContext(org.apache.hadoop.hdfs.server.namenode.ha.HAContext) Test(org.junit.Test)

Example 18 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestDFSOutputStream method testNoLocalWriteFlag.

@Test
public void testNoLocalWriteFlag() throws IOException {
    DistributedFileSystem fs = cluster.getFileSystem();
    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.NO_LOCAL_WRITE, CreateFlag.CREATE);
    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
    DatanodeManager dm = bm.getDatanodeManager();
    try (FSDataOutputStream os = fs.create(new Path("/test-no-local"), FsPermission.getDefault(), flags, 512, (short) 2, 512, null)) {
        // Inject a DatanodeManager that returns one DataNode as local node for
        // the client.
        DatanodeManager spyDm = spy(dm);
        DatanodeDescriptor dn1 = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.LIVE).get(0);
        doReturn(dn1).when(spyDm).getDatanodeByHost("127.0.0.1");
        Whitebox.setInternalState(bm, "datanodeManager", spyDm);
        byte[] buf = new byte[512 * 16];
        new Random().nextBytes(buf);
        os.write(buf);
    } finally {
        Whitebox.setInternalState(bm, "datanodeManager", dm);
    }
    cluster.triggerBlockReports();
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    // Total number of DataNodes is 3.
    assertEquals(3, cluster.getAllBlockReports(bpid).size());
    int numDataNodesWithData = 0;
    for (Map<DatanodeStorage, BlockListAsLongs> dnBlocks : cluster.getAllBlockReports(bpid)) {
        for (BlockListAsLongs blocks : dnBlocks.values()) {
            if (blocks.getNumberOfBlocks() > 0) {
                numDataNodesWithData++;
                break;
            }
        }
    }
    // Verify that only one DN has no data.
    assertEquals(1, 3 - numDataNodesWithData);
}
Also used : CreateFlag(org.apache.hadoop.fs.CreateFlag) Path(org.apache.hadoop.fs.Path) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) Random(java.util.Random) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 19 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestDFSRename method testRenameWithOverwrite.

/**
   * Check the blocks of dst file are cleaned after rename with overwrite
   * Restart NN to check the rename successfully
   */
@Test(timeout = 120000)
public void testRenameWithOverwrite() throws Exception {
    final short replFactor = 2;
    final long blockSize = 512;
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replFactor).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    try {
        long fileLen = blockSize * 3;
        String src = "/foo/src";
        String dst = "/foo/dst";
        Path srcPath = new Path(src);
        Path dstPath = new Path(dst);
        DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1);
        DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1);
        LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(), dst, 0, fileLen);
        BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()).getBlockManager();
        assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) != null);
        dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
        assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) == null);
        // Restart NN and check the rename successfully
        cluster.restartNameNodes();
        assertFalse(dfs.exists(srcPath));
        assertTrue(dfs.exists(dstPath));
    } finally {
        if (dfs != null) {
            dfs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Test(org.junit.Test)

Example 20 with BlockManager

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.

the class TestDFSStripedOutputStreamWithFailure method runTest.

/**
   * runTest implementation.
   * @param length file length
   * @param killPos killing positions in ascending order
   * @param dnIndex DN index to kill when meets killing positions
   * @param tokenExpire wait token to expire when kill a DN
   * @throws Exception
   */
private void runTest(final int length, final int[] killPos, final int[] dnIndex, final boolean tokenExpire) throws Exception {
    if (killPos[0] <= FLUSH_POS) {
        LOG.warn("killPos=" + Arrays.toString(killPos) + " <= FLUSH_POS=" + FLUSH_POS + ", length=" + length + ", dnIndex=" + Arrays.toString(dnIndex));
        //skip test
        return;
    }
    Preconditions.checkArgument(length > killPos[0], "length=%s <= killPos=%s", length, killPos);
    Preconditions.checkArgument(killPos.length == dnIndex.length);
    final Path p = new Path(dir, "dn" + Arrays.toString(dnIndex) + "len" + length + "kill" + Arrays.toString(killPos));
    final String fullPath = p.toString();
    LOG.info("fullPath=" + fullPath);
    if (tokenExpire) {
        final NameNode nn = cluster.getNameNode();
        final BlockManager bm = nn.getNamesystem().getBlockManager();
        final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
        // set a short token lifetime (1 second)
        SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
    }
    final AtomicInteger pos = new AtomicInteger();
    final FSDataOutputStream out = dfs.create(p);
    final DFSStripedOutputStream stripedOut = (DFSStripedOutputStream) out.getWrappedStream();
    // first GS of this block group which never proceeds blockRecovery
    long firstGS = -1;
    // the old GS before bumping
    long oldGS = -1;
    List<Long> gsList = new ArrayList<>();
    final List<DatanodeInfo> killedDN = new ArrayList<>();
    int numKilled = 0;
    for (; pos.get() < length; ) {
        final int i = pos.getAndIncrement();
        if (numKilled < killPos.length && i == killPos[numKilled]) {
            assertTrue(firstGS != -1);
            final long gs = getGenerationStamp(stripedOut);
            if (numKilled == 0) {
                assertEquals(firstGS, gs);
            } else {
                //TODO: implement hflush/hsync and verify gs strict greater than oldGS
                assertTrue(gs >= oldGS);
            }
            oldGS = gs;
            if (tokenExpire) {
                DFSTestUtil.flushInternal(stripedOut);
                waitTokenExpires(out);
            }
            killedDN.add(killDatanode(cluster, stripedOut, dnIndex[numKilled], pos));
            numKilled++;
        }
        write(out, i);
        if (i % blockGroupSize == FLUSH_POS) {
            firstGS = getGenerationStamp(stripedOut);
            oldGS = firstGS;
        }
        if (i > 0 && (i + 1) % blockGroupSize == 0) {
            gsList.add(oldGS);
        }
    }
    gsList.add(oldGS);
    out.close();
    assertEquals(dnIndex.length, numKilled);
    StripedFileTestUtil.waitBlockGroupsReported(dfs, fullPath, numKilled);
    cluster.triggerBlockReports();
    StripedFileTestUtil.checkData(dfs, p, length, killedDN, gsList, blockGroupSize);
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ArrayList(java.util.ArrayList) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) BlockTokenSecretManager(org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)

Aggregations

BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)47 Test (org.junit.Test)33 Path (org.apache.hadoop.fs.Path)21 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)13 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)12 IOException (java.io.IOException)11 Configuration (org.apache.hadoop.conf.Configuration)11 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)11 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)11 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)10 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 Block (org.apache.hadoop.hdfs.protocol.Block)8 FileNotFoundException (java.io.FileNotFoundException)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)7 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)7 FileSystem (org.apache.hadoop.fs.FileSystem)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)6