Search in sources :

Example 61 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestBlockTokenWithDFS method testAppend.

/**
   * testing that APPEND operation can handle token expiration when
   * re-establishing pipeline is needed
   */
@Test
public void testAppend() throws Exception {
    MiniDFSCluster cluster = null;
    int numDataNodes = 2;
    Configuration conf = getConf(numDataNodes);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
        cluster.waitActive();
        assertEquals(numDataNodes, cluster.getDataNodes().size());
        final NameNode nn = cluster.getNameNode();
        final BlockManager bm = nn.getNamesystem().getBlockManager();
        final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
        // set a short token lifetime (1 second)
        SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
        Path fileToAppend = new Path(FILE_TO_APPEND);
        FileSystem fs = cluster.getFileSystem();
        byte[] expected = generateBytes(FILE_SIZE);
        // write a one-byte file
        FSDataOutputStream stm = writeFile(fs, fileToAppend, (short) numDataNodes, BLOCK_SIZE);
        stm.write(expected, 0, 1);
        stm.close();
        // open the file again for append
        stm = fs.append(fileToAppend);
        int mid = expected.length - 1;
        stm.write(expected, 1, mid - 1);
        stm.hflush();
        /*
       * wait till token used in stm expires
       */
        Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
        while (!SecurityTestUtil.isBlockTokenExpired(token)) {
            try {
                Thread.sleep(10);
            } catch (InterruptedException ignored) {
            }
        }
        // remove a datanode to force re-establishing pipeline
        cluster.stopDataNode(0);
        // append the rest of the file
        stm.write(expected, mid, expected.length - mid);
        stm.close();
        // check if append is successful
        FSDataInputStream in5 = fs.open(fileToAppend);
        assertTrue(checkFile1(in5, expected));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) BlockTokenSecretManager(org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager) Test(org.junit.Test)

Example 62 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestDataNodeMultipleRegistrations method testFedSingleNN.

/**
   * starts single nn and single dn and verifies registration and handshake
   * 
   * @throws IOException
   */
@Test
public void testFedSingleNN() throws IOException {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nameNodePort(9927).build();
    try {
        NameNode nn1 = cluster.getNameNode();
        assertNotNull("cannot create nn1", nn1);
        String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
        String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID();
        int lv1 = FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
        LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress());
        // check number of vlumes in fsdataset
        DataNode dn = cluster.getDataNodes().get(0);
        final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
        Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
        int i = 0;
        for (Map.Entry<String, Object> e : volInfos.entrySet()) {
            LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
        }
        // number of volumes should be 2 - [data1, data2]
        assertEquals("number of volumes is wrong", cluster.getFsDatasetTestUtils(0).getDefaultNumOfDataDirs(), volInfos.size());
        for (BPOfferService bpos : dn.getAllBpOs()) {
            LOG.info("reg: bpid=" + "; name=" + bpos.bpRegistration + "; sid=" + bpos.bpRegistration.getDatanodeUuid() + "; nna=" + getNNSocketAddress(bpos));
        }
        // try block report
        BPOfferService bpos1 = dn.getAllBpOs().get(0);
        bpos1.triggerBlockReportForTests();
        assertEquals("wrong nn address", getNNSocketAddress(bpos1), nn1.getNameNodeAddress());
        assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
        assertEquals("wrong cid", dn.getClusterId(), cid1);
        cluster.shutdown();
        // Ensure all the BPOfferService threads are shutdown
        assertEquals(0, dn.getAllBpOs().size());
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Map(java.util.Map) Test(org.junit.Test)

Example 63 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestDataNodeMultipleRegistrations method test2NNRegistration.

/**
   * start multiple NNs and single DN and verifies per BP registrations and
   * handshakes.
   * 
   * @throws IOException
   */
@Test
public void test2NNRegistration() throws IOException {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
    try {
        cluster.waitActive();
        NameNode nn1 = cluster.getNameNode(0);
        NameNode nn2 = cluster.getNameNode(1);
        assertNotNull("cannot create nn1", nn1);
        assertNotNull("cannot create nn2", nn2);
        String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID();
        String bpid2 = FSImageTestUtil.getFSImage(nn2).getBlockPoolID();
        String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID();
        String cid2 = FSImageTestUtil.getFSImage(nn2).getClusterID();
        int lv1 = FSImageTestUtil.getFSImage(nn1).getLayoutVersion();
        int lv2 = FSImageTestUtil.getFSImage(nn2).getLayoutVersion();
        int ns1 = FSImageTestUtil.getFSImage(nn1).getNamespaceID();
        int ns2 = FSImageTestUtil.getFSImage(nn2).getNamespaceID();
        assertNotSame("namespace ids should be different", ns1, ns2);
        LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress());
        LOG.info("nn2: lv=" + lv2 + ";cid=" + cid2 + ";bpid=" + bpid2 + ";uri=" + nn2.getNameNodeAddress());
        // check number of volumes in fsdataset
        DataNode dn = cluster.getDataNodes().get(0);
        final Map<String, Object> volInfos = dn.data.getVolumeInfoMap();
        Assert.assertTrue("No volumes in the fsdataset", volInfos.size() > 0);
        int i = 0;
        for (Map.Entry<String, Object> e : volInfos.entrySet()) {
            LOG.info("vol " + i++ + ") " + e.getKey() + ": " + e.getValue());
        }
        // number of volumes should be 2 - [data1, data2]
        assertEquals("number of volumes is wrong", cluster.getFsDatasetTestUtils(0).getDefaultNumOfDataDirs(), volInfos.size());
        for (BPOfferService bpos : dn.getAllBpOs()) {
            LOG.info("BP: " + bpos);
        }
        BPOfferService bpos1 = dn.getAllBpOs().get(0);
        BPOfferService bpos2 = dn.getAllBpOs().get(1);
        // The order of bpos is not guaranteed, so fix the order
        if (getNNSocketAddress(bpos1).equals(nn2.getNameNodeAddress())) {
            BPOfferService tmp = bpos1;
            bpos1 = bpos2;
            bpos2 = tmp;
        }
        assertEquals("wrong nn address", getNNSocketAddress(bpos1), nn1.getNameNodeAddress());
        assertEquals("wrong nn address", getNNSocketAddress(bpos2), nn2.getNameNodeAddress());
        assertEquals("wrong bpid", bpos1.getBlockPoolId(), bpid1);
        assertEquals("wrong bpid", bpos2.getBlockPoolId(), bpid2);
        assertEquals("wrong cid", dn.getClusterId(), cid1);
        assertEquals("cid should be same", cid2, cid1);
        assertEquals("namespace should be same", bpos1.bpNSInfo.namespaceID, ns1);
        assertEquals("namespace should be same", bpos2.bpNSInfo.namespaceID, ns2);
    } finally {
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Map(java.util.Map) Test(org.junit.Test)

Example 64 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestDnRespectsBlockReportSplitThreshold method testAlwaysSplit.

/**
   * Test that if splitThreshold is zero, then we always get a separate
   * call per storage.
   */
@Test(timeout = 300000)
public void testAlwaysSplit() throws IOException, InterruptedException {
    startUpCluster(0);
    NameNode nn = cluster.getNameNode();
    DataNode dn = cluster.getDataNodes().get(0);
    // Create a file with a few blocks.
    createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);
    // Insert a spy object for the NN RPC.
    DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
    // Trigger a block report so there is an interaction with the spy
    // object.
    DataNodeTestUtils.triggerBlockReport(dn);
    ArgumentCaptor<StorageBlockReport[]> captor = ArgumentCaptor.forClass(StorageBlockReport[].class);
    Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(any(DatanodeRegistration.class), anyString(), captor.capture(), Mockito.<BlockReportContext>anyObject());
    verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Test(org.junit.Test)

Example 65 with NameNode

use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.

the class TestStorageReport method testStorageReportHasStorageTypeAndState.

/**
   * Ensure that storage type and storage state are propagated
   * in Storage Reports.
   */
@Test
public void testStorageReportHasStorageTypeAndState() throws IOException {
    // Make sure we are not testing with the default type, that would not
    // be a very good test.
    assertNotSame(storageType, StorageType.DEFAULT);
    NameNode nn = cluster.getNameNode();
    DataNode dn = cluster.getDataNodes().get(0);
    // Insert a spy object for the NN RPC.
    DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
    // Trigger a heartbeat so there is an interaction with the spy
    // object.
    DataNodeTestUtils.triggerHeartbeat(dn);
    // Verify that the callback passed in the expected parameters.
    ArgumentCaptor<StorageReport[]> captor = ArgumentCaptor.forClass(StorageReport[].class);
    Mockito.verify(nnSpy).sendHeartbeat(any(DatanodeRegistration.class), captor.capture(), anyLong(), anyLong(), anyInt(), anyInt(), anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class));
    StorageReport[] reports = captor.getValue();
    for (StorageReport report : reports) {
        assertThat(report.getStorage().getStorageType(), is(storageType));
        assertThat(report.getStorage().getState(), is(DatanodeStorage.State.NORMAL));
    }
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) SlowPeerReports(org.apache.hadoop.hdfs.server.protocol.SlowPeerReports) VolumeFailureSummary(org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary) Test(org.junit.Test)

Aggregations

NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)65 Test (org.junit.Test)44 Configuration (org.apache.hadoop.conf.Configuration)28 Path (org.apache.hadoop.fs.Path)22 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)17 FileSystem (org.apache.hadoop.fs.FileSystem)15 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 File (java.io.File)7 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)7 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)6 IOException (java.io.IOException)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)4 BlockTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)4 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4