Search in sources :

Example 21 with DatanodeProtocolClientSideTranslatorPB

use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.

the class TestDnRespectsBlockReportSplitThreshold method testAlwaysSplit.

/**
   * Test that if splitThreshold is zero, then we always get a separate
   * call per storage.
   */
@Test(timeout = 300000)
public void testAlwaysSplit() throws IOException, InterruptedException {
    startUpCluster(0);
    NameNode nn = cluster.getNameNode();
    DataNode dn = cluster.getDataNodes().get(0);
    // Create a file with a few blocks.
    createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);
    // Insert a spy object for the NN RPC.
    DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
    // Trigger a block report so there is an interaction with the spy
    // object.
    DataNodeTestUtils.triggerBlockReport(dn);
    ArgumentCaptor<StorageBlockReport[]> captor = ArgumentCaptor.forClass(StorageBlockReport[].class);
    Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(any(DatanodeRegistration.class), anyString(), captor.capture(), Mockito.<BlockReportContext>anyObject());
    verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Test(org.junit.Test)

Example 22 with DatanodeProtocolClientSideTranslatorPB

use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.

the class TestStorageReport method testStorageReportHasStorageTypeAndState.

/**
   * Ensure that storage type and storage state are propagated
   * in Storage Reports.
   */
@Test
public void testStorageReportHasStorageTypeAndState() throws IOException {
    // Make sure we are not testing with the default type, that would not
    // be a very good test.
    assertNotSame(storageType, StorageType.DEFAULT);
    NameNode nn = cluster.getNameNode();
    DataNode dn = cluster.getDataNodes().get(0);
    // Insert a spy object for the NN RPC.
    DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
    // Trigger a heartbeat so there is an interaction with the spy
    // object.
    DataNodeTestUtils.triggerHeartbeat(dn);
    // Verify that the callback passed in the expected parameters.
    ArgumentCaptor<StorageReport[]> captor = ArgumentCaptor.forClass(StorageReport[].class);
    Mockito.verify(nnSpy).sendHeartbeat(any(DatanodeRegistration.class), captor.capture(), anyLong(), anyLong(), anyInt(), anyInt(), anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class));
    StorageReport[] reports = captor.getValue();
    for (StorageReport report : reports) {
        assertThat(report.getStorage().getStorageType(), is(storageType));
        assertThat(report.getStorage().getState(), is(DatanodeStorage.State.NORMAL));
    }
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) SlowPeerReports(org.apache.hadoop.hdfs.server.protocol.SlowPeerReports) VolumeFailureSummary(org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary) Test(org.junit.Test)

Example 23 with DatanodeProtocolClientSideTranslatorPB

use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.

the class TestDeleteRace method testDeleteAndCommitBlockSynchronizationRace.

/**
   * Test race between delete operation and commitBlockSynchronization method.
   * See HDFS-6825.
   * @param hasSnapshot
   * @throws Exception
   */
private void testDeleteAndCommitBlockSynchronizationRace(boolean hasSnapshot) throws Exception {
    LOG.info("Start testing, hasSnapshot: " + hasSnapshot);
    ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>> testList = new ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>>();
    testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file", false));
    testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file1", true));
    testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/testdir/testdir1/test-file", false));
    testList.add(new AbstractMap.SimpleImmutableEntry<String, Boolean>("/testdir/testdir1/test-file1", true));
    final Path rootPath = new Path("/");
    final Configuration conf = new Configuration();
    // Disable permissions so that another user can recover the lease.
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    FSDataOutputStream stm = null;
    Map<DataNode, DatanodeProtocolClientSideTranslatorPB> dnMap = new HashMap<DataNode, DatanodeProtocolClientSideTranslatorPB>();
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        int stId = 0;
        for (AbstractMap.SimpleImmutableEntry<String, Boolean> stest : testList) {
            String testPath = stest.getKey();
            Boolean mkSameDir = stest.getValue();
            LOG.info("test on " + testPath + " mkSameDir: " + mkSameDir + " snapshot: " + hasSnapshot);
            Path fPath = new Path(testPath);
            //find grandest non-root parent
            Path grandestNonRootParent = fPath;
            while (!grandestNonRootParent.getParent().equals(rootPath)) {
                grandestNonRootParent = grandestNonRootParent.getParent();
            }
            stm = fs.create(fPath);
            LOG.info("test on " + testPath + " created " + fPath);
            // write a half block
            AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
            stm.hflush();
            if (hasSnapshot) {
                SnapshotTestHelper.createSnapshot(fs, rootPath, "st" + String.valueOf(stId));
                ++stId;
            }
            // Look into the block manager on the active node for the block
            // under construction.
            NameNode nn = cluster.getNameNode();
            ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, fPath);
            DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn, blk);
            LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
            // Find the corresponding DN daemon, and spy on its connection to the
            // active.
            DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
            DatanodeProtocolClientSideTranslatorPB nnSpy = dnMap.get(primaryDN);
            if (nnSpy == null) {
                nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn);
                dnMap.put(primaryDN, nnSpy);
            }
            // Delay the commitBlockSynchronization call
            DelayAnswer delayer = new DelayAnswer(LOG);
            Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk), // new genstamp
            Mockito.anyInt(), // new length
            Mockito.anyLong(), // close file
            Mockito.eq(true), // delete block
            Mockito.eq(false), // new targets
            (DatanodeID[]) Mockito.anyObject(), // new target storages
            (String[]) Mockito.anyObject());
            fs.recoverLease(fPath);
            LOG.info("Waiting for commitBlockSynchronization call from primary");
            delayer.waitForCall();
            LOG.info("Deleting recursively " + grandestNonRootParent);
            fs.delete(grandestNonRootParent, true);
            if (mkSameDir && !grandestNonRootParent.toString().equals(testPath)) {
                LOG.info("Recreate dir " + grandestNonRootParent + " testpath: " + testPath);
                fs.mkdirs(grandestNonRootParent);
            }
            delayer.proceed();
            LOG.info("Now wait for result");
            delayer.waitForResult();
            Throwable t = delayer.getThrown();
            if (t != null) {
                LOG.info("Result exception (snapshot: " + hasSnapshot + "): " + t);
            }
        }
        // end of loop each fPath
        LOG.info("Now check we can restart");
        cluster.restartNameNodes();
        LOG.info("Restart finished");
    } finally {
        if (stm != null) {
            IOUtils.closeStream(stm);
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) AbstractMap(java.util.AbstractMap) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 24 with DatanodeProtocolClientSideTranslatorPB

use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.

the class TestDiskspaceQuotaUpdate method testQuotaIssuesWhileCommitting.

/**
   * Test that the cached quota stays correct between the COMMIT
   * and COMPLETE block steps, even if the replication factor is
   * changed during this time.
   */
@Test(timeout = 60000)
public void testQuotaIssuesWhileCommitting() throws Exception {
    // We want a one-DN cluster so that we can force a lack of
    // commit by only instrumenting a single DN; we kill the other 3
    List<MiniDFSCluster.DataNodeProperties> dnprops = new ArrayList<>();
    try {
        for (int i = REPLICATION - 1; i > 0; i--) {
            dnprops.add(cluster.stopDataNode(i));
        }
        DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(cluster.getDataNodes().get(0), cluster.getNameNode());
        testQuotaIssuesWhileCommittingHelper(nnSpy, (short) 1, (short) 4);
        testQuotaIssuesWhileCommittingHelper(nnSpy, (short) 4, (short) 1);
        // Don't actually change replication; just check that the sizes
        // agree during the commit period
        testQuotaIssuesWhileCommittingHelper(nnSpy, (short) 1, (short) 1);
    } finally {
        for (MiniDFSCluster.DataNodeProperties dnprop : dnprops) {
            cluster.restartDataNode(dnprop);
        }
        cluster.waitActive();
    }
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 25 with DatanodeProtocolClientSideTranslatorPB

use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.

the class BlockRecoveryWorker method getActiveNamenodeForBP.

/**
   * Get the NameNode corresponding to the given block pool.
   *
   * @param bpid Block pool Id
   * @return Namenode corresponding to the bpid
   * @throws IOException if unable to get the corresponding NameNode
   */
DatanodeProtocolClientSideTranslatorPB getActiveNamenodeForBP(String bpid) throws IOException {
    BPOfferService bpos = datanode.getBPOfferService(bpid);
    if (bpos == null) {
        throw new IOException("No block pool offer service for bpid=" + bpid);
    }
    DatanodeProtocolClientSideTranslatorPB activeNN = bpos.getActiveNN();
    if (activeNN == null) {
        throw new IOException("Block pool " + bpid + " has not recognized an active NN");
    }
    return activeNN;
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) IOException(java.io.IOException)

Aggregations

DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)26 Test (org.junit.Test)16 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)14 NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)7 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)6 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)6 InvocationOnMock (org.mockito.invocation.InvocationOnMock)6 ArrayList (java.util.ArrayList)5 Configuration (org.apache.hadoop.conf.Configuration)5 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 SlowPeerReports (org.apache.hadoop.hdfs.server.protocol.SlowPeerReports)5 VolumeFailureSummary (org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary)5 DelayAnswer (org.apache.hadoop.test.GenericTestUtils.DelayAnswer)5 IOException (java.io.IOException)4 InetSocketAddress (java.net.InetSocketAddress)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Path (org.apache.hadoop.fs.Path)4 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)4 NNHAStatusHeartbeat (org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat)4 File (java.io.File)3