Search in sources :

Example 21 with StorageBlockReport

use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.

the class BlockReportTestBase method blockReport_06.

/**
   * Test creates a file and closes it.
   * The second datanode is started in the cluster.
   * As soon as the replication process is completed test runs
   * Block report and checks that no underreplicated blocks are left
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 300000)
public void blockReport_06() throws Exception {
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path filePath = new Path("/" + METHOD_NAME + ".dat");
    final int DN_N1 = DN_N0 + 1;
    writeFile(METHOD_NAME, FILE_SIZE, filePath);
    startDNandWait(filePath, true);
    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N1);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
    sendBlockReports(dnR, poolId, reports);
    printStats();
    assertEquals("Wrong number of PendingReplication Blocks", 0, cluster.getNamesystem().getUnderReplicatedBlocks());
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Test(org.junit.Test)

Example 22 with StorageBlockReport

use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.

the class TestBPOfferService method testIgnoreDeletionsFromNonActive.

/**
   * Test that DNA_INVALIDATE commands from the standby are ignored.
   */
@Test
public void testIgnoreDeletionsFromNonActive() throws Exception {
    BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
    // Ask to invalidate FAKE_BLOCK when block report hits the
    // standby
    Mockito.doReturn(new BlockCommand(DatanodeProtocol.DNA_INVALIDATE, FAKE_BPID, new Block[] { FAKE_BLOCK.getLocalBlock() })).when(mockNN2).blockReport(Mockito.<DatanodeRegistration>anyObject(), Mockito.eq(FAKE_BPID), Mockito.<StorageBlockReport[]>anyObject(), Mockito.<BlockReportContext>anyObject());
    bpos.start();
    try {
        waitForInitialization(bpos);
        // Should get block reports from both NNs
        waitForBlockReport(mockNN1);
        waitForBlockReport(mockNN2);
    } finally {
        bpos.stop();
        bpos.join();
    }
    // Should ignore the delete command from the standby
    Mockito.verify(mockFSDataset, Mockito.never()).invalidate(Mockito.eq(FAKE_BPID), (Block[]) Mockito.anyObject());
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockCommand(org.apache.hadoop.hdfs.server.protocol.BlockCommand) Test(org.junit.Test)

Example 23 with StorageBlockReport

use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.

the class TestBlockHasMultipleReplicasOnSameDN method testBlockHasMultipleReplicasOnSameDN.

/**
   * Verify NameNode behavior when a given DN reports multiple replicas
   * of a given block.
   */
@Test
public void testBlockHasMultipleReplicasOnSameDN() throws IOException {
    String filename = makeFileName(GenericTestUtils.getMethodName());
    Path filePath = new Path(filename);
    // Write out a file with a few blocks.
    DFSTestUtil.createFile(fs, filePath, BLOCK_SIZE, BLOCK_SIZE * NUM_BLOCKS, BLOCK_SIZE, NUM_DATANODES, seed);
    // Get the block list for the file with the block locations.
    LocatedBlocks locatedBlocks = client.getLocatedBlocks(filePath.toString(), 0, BLOCK_SIZE * NUM_BLOCKS);
    // Generate a fake block report from one of the DataNodes, such
    // that it reports one copy of each block on either storage.
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration dnReg = dn.getDNRegistrationForBP(bpid);
    StorageBlockReport[] reports = new StorageBlockReport[cluster.getStoragesPerDatanode()];
    ArrayList<ReplicaInfo> blocks = new ArrayList<>();
    for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
        Block localBlock = locatedBlock.getBlock().getLocalBlock();
        blocks.add(new FinalizedReplica(localBlock, null, null));
    }
    Collections.sort(blocks);
    try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
        BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
        for (int i = 0; i < cluster.getStoragesPerDatanode(); ++i) {
            DatanodeStorage dns = new DatanodeStorage(volumes.get(i).getStorageID());
            reports[i] = new StorageBlockReport(dns, bll);
        }
    }
    // Should not assert!
    cluster.getNameNodeRpc().blockReport(dnReg, bpid, reports, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
    // Get the block locations once again.
    locatedBlocks = client.getLocatedBlocks(filename, 0, BLOCK_SIZE * NUM_BLOCKS);
    // Make sure that each block has two replicas, one on each DataNode.
    for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
        DatanodeInfo[] locations = locatedBlock.getLocations();
        assertThat(locations.length, is((int) NUM_DATANODES));
        assertThat(locations[0].getDatanodeUuid(), not(locations[1].getDatanodeUuid()));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 24 with StorageBlockReport

use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.

the class TestDnRespectsBlockReportSplitThreshold method verifyCapturedArguments.

private void verifyCapturedArguments(ArgumentCaptor<StorageBlockReport[]> captor, int expectedReportsPerCall, int expectedTotalBlockCount) {
    List<StorageBlockReport[]> listOfReports = captor.getAllValues();
    int numBlocksReported = 0;
    for (StorageBlockReport[] reports : listOfReports) {
        assertThat(reports.length, is(expectedReportsPerCall));
        for (StorageBlockReport report : reports) {
            BlockListAsLongs blockList = report.getBlocks();
            numBlocksReported += blockList.getNumberOfBlocks();
        }
    }
    assert (numBlocksReported >= expectedTotalBlockCount);
}
Also used : StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs)

Example 25 with StorageBlockReport

use of org.apache.hadoop.hdfs.server.protocol.StorageBlockReport in project hadoop by apache.

the class TestDnRespectsBlockReportSplitThreshold method testAlwaysSplit.

/**
   * Test that if splitThreshold is zero, then we always get a separate
   * call per storage.
   */
@Test(timeout = 300000)
public void testAlwaysSplit() throws IOException, InterruptedException {
    startUpCluster(0);
    NameNode nn = cluster.getNameNode();
    DataNode dn = cluster.getDataNodes().get(0);
    // Create a file with a few blocks.
    createFile(GenericTestUtils.getMethodName(), BLOCKS_IN_FILE);
    // Insert a spy object for the NN RPC.
    DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
    // Trigger a block report so there is an interaction with the spy
    // object.
    DataNodeTestUtils.triggerBlockReport(dn);
    ArgumentCaptor<StorageBlockReport[]> captor = ArgumentCaptor.forClass(StorageBlockReport[].class);
    Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(any(DatanodeRegistration.class), anyString(), captor.capture(), Mockito.<BlockReportContext>anyObject());
    verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Test(org.junit.Test)

Aggregations

StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)28 Test (org.junit.Test)21 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)15 Path (org.apache.hadoop.fs.Path)11 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)8 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)8 Block (org.apache.hadoop.hdfs.protocol.Block)7 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)7 ArrayList (java.util.ArrayList)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)6 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)6 NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 BlockReportResponseProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 DatanodeCommand (org.apache.hadoop.hdfs.server.protocol.DatanodeCommand)3 InvocationOnMock (org.mockito.invocation.InvocationOnMock)3 ServiceException (com.google.protobuf.ServiceException)2 IOException (java.io.IOException)2