Search in sources :

Example 91 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDefaultBlockPlacementPolicy method setup.

@Before
public void setup() throws IOException {
    StaticMapping.resetMap();
    Configuration conf = new HdfsConfiguration();
    final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
    final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks).hosts(hosts).build();
    cluster.waitActive();
    nameNodeRpc = cluster.getNameNodeRpc();
    namesystem = cluster.getNamesystem();
    perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null, FsPermission.getDefault());
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus) Before(org.junit.Before)

Example 92 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestCreateEditsLog method testCanLoadCreatedEditsLog.

/**
   * Tests that an edits log created using CreateEditsLog is valid and can be
   * loaded successfully by a namenode.
   */
@Test(timeout = 60000)
public void testCanLoadCreatedEditsLog() throws Exception {
    // Format namenode.
    HdfsConfiguration conf = new HdfsConfiguration();
    File nameDir = new File(HDFS_DIR, "name");
    conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString());
    DFSTestUtil.formatNameNode(conf);
    // Call CreateEditsLog and move the resulting edits to the name dir.
    CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d", TEST_DIR.getAbsolutePath() });
    Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*");
    FileContext localFc = FileContext.getLocalFSFileContext();
    for (FileStatus edits : localFc.util().globStatus(editsWildcard)) {
        Path src = edits.getPath();
        Path dst = new Path(new File(nameDir, "current").getAbsolutePath(), src.getName());
        localFc.rename(src, dst);
    }
    // Start a namenode to try to load the edits.
    cluster = new MiniDFSCluster.Builder(conf).format(false).manageNameDfsDirs(false).waitSafeMode(false).build();
    cluster.waitClusterUp();
// Test successful, because no exception thrown.
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Example 93 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDeadDatanode method testDeadDatanode.

/**
   * Test to ensure namenode rejects request from dead datanode
   * - Start a cluster
   * - Shutdown the datanode and wait for it to be marked dead at the namenode
   * - Send datanode requests to Namenode and make sure it is rejected 
   *   appropriately.
   */
@Test
public void testDeadDatanode() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    // wait for datanode to be marked live
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration reg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
    // Shutdown and wait for datanode to be marked dead
    dn.shutdown();
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
    DatanodeProtocol dnp = cluster.getNameNodeRpc();
    ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(new Block(0), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
    StorageReceivedDeletedBlocks[] storageBlocks = { new StorageReceivedDeletedBlocks(new DatanodeStorage(reg.getDatanodeUuid()), blocks) };
    // Ensure blockReceived call from dead datanode is not rejected with
    // IOException, since it's async, but the node remains unregistered.
    dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    // IBRs are async, make sure the NN processes all of them.
    bm.flushBlockOps();
    assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());
    // Ensure blockReport from dead datanode is rejected with IOException
    StorageBlockReport[] report = { new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()), BlockListAsLongs.EMPTY) };
    try {
        dnp.blockReport(reg, poolId, report, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
        fail("Expected IOException is not thrown");
    } catch (IOException ex) {
    // Expected
    }
    // Ensure heartbeat from dead datanode is rejected with a command
    // that asks datanode to register again
    StorageReport[] rep = { new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()), false, 0, 0, 0, 0, 0) };
    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true, SlowPeerReports.EMPTY_REPORT).getCommands();
    assertEquals(1, cmd.length);
    assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER.getAction());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Example 94 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDeadDatanode method testDeadNodeAsBlockTarget.

@Test
public void testDeadNodeAsBlockTarget() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    // wait for datanode to be marked live
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration reg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    // Get the updated datanode descriptor
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    DatanodeManager dm = bm.getDatanodeManager();
    Node clientNode = dm.getDatanode(reg);
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
    // Shutdown and wait for datanode to be marked dead
    dn.shutdown();
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
    // Get the updated datanode descriptor available in DNM
    // choose the targets, but local node should not get selected as this is not
    // part of the cluster anymore
    DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3, clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7, BlockType.CONTIGUOUS, null);
    for (DatanodeStorageInfo datanodeStorageInfo : results) {
        assertFalse("Dead node should not be choosen", datanodeStorageInfo.getDatanodeDescriptor().equals(clientNode));
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Node(org.apache.hadoop.net.Node) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) Test(org.junit.Test)

Example 95 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestCheckpoint method testSecondaryImageDownload.

/**
   * Test that the secondary doesn't have to re-download image
   * if it hasn't changed.
   */
@Test
public void testSecondaryImageDownload() throws IOException {
    LOG.info("Starting testSecondaryImageDownload");
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    Path dir = new Path("/checkpoint");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
    cluster.waitActive();
    FileSystem fileSys = cluster.getFileSystem();
    FSImage image = cluster.getNameNode().getFSImage();
    SecondaryNameNode secondary = null;
    try {
        assertTrue(!fileSys.exists(dir));
        //
        // Make the checkpoint
        //
        secondary = startSecondaryNameNode(conf);
        File secondaryDir = MiniDFSCluster.getCheckpointDirectory(MiniDFSCluster.getBaseDirectory(), 0, 0)[0];
        File secondaryCurrent = new File(secondaryDir, "current");
        long expectedTxIdToDownload = cluster.getNameNode().getFSImage().getStorage().getMostRecentCheckpointTxId();
        File secondaryFsImageBefore = new File(secondaryCurrent, NNStorage.getImageFileName(expectedTxIdToDownload));
        File secondaryFsImageAfter = new File(secondaryCurrent, NNStorage.getImageFileName(expectedTxIdToDownload + 2));
        assertFalse("Secondary should start with empty current/ dir " + "but " + secondaryFsImageBefore + " exists", secondaryFsImageBefore.exists());
        assertTrue("Secondary should have loaded an image", secondary.doCheckpoint());
        assertTrue("Secondary should have downloaded original image", secondaryFsImageBefore.exists());
        assertTrue("Secondary should have created a new image", secondaryFsImageAfter.exists());
        long fsimageLength = secondaryFsImageBefore.length();
        assertEquals("Image size should not have changed", fsimageLength, secondaryFsImageAfter.length());
        // change namespace
        fileSys.mkdirs(dir);
        assertFalse("Another checkpoint should not have to re-load image", secondary.doCheckpoint());
        for (StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
            File imageFile = NNStorage.getImageFile(sd, NameNodeFile.IMAGE, expectedTxIdToDownload + 5);
            assertTrue("Image size increased", imageFile.length() > fsimageLength);
        }
    } finally {
        fileSys.close();
        cleanup(secondary);
        secondary = null;
        cleanup(cluster);
        cluster = null;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) Test(org.junit.Test)

Aggregations

HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)454 Configuration (org.apache.hadoop.conf.Configuration)311 Test (org.junit.Test)311 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)267 Path (org.apache.hadoop.fs.Path)152 FileSystem (org.apache.hadoop.fs.FileSystem)94 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)92 File (java.io.File)72 IOException (java.io.IOException)69 Before (org.junit.Before)56 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)40 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)35 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)30 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)27 RandomAccessFile (java.io.RandomAccessFile)22 ArrayList (java.util.ArrayList)20 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)20 URI (java.net.URI)19 FsPermission (org.apache.hadoop.fs.permission.FsPermission)19