Search in sources :

Example 1 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestDatanodeRegistration method testRegistrationWithDifferentSoftwareVersions.

@Test
public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, "3.0.0");
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
        long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
        StorageInfo mockStorageInfo = mock(StorageInfo.class);
        doReturn(nnCTime).when(mockStorageInfo).getCTime();
        DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
        doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
        doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
        doReturn(123).when(mockDnReg).getXferPort();
        doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
        doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
        // Should succeed when software versions are the same.
        doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
        rpcServer.registerDatanode(mockDnReg);
        // Should succeed when software version of DN is above minimum required by NN.
        doReturn("4.0.0").when(mockDnReg).getSoftwareVersion();
        rpcServer.registerDatanode(mockDnReg);
        // Should fail when software version of DN is below minimum required by NN.
        doReturn("2.0.0").when(mockDnReg).getSoftwareVersion();
        try {
            rpcServer.registerDatanode(mockDnReg);
            fail("Should not have been able to register DN with too-low version.");
        } catch (IncorrectVersionException ive) {
            GenericTestUtils.assertExceptionContains("The reported DataNode version is too low", ive);
            LOG.info("Got expected exception", ive);
        }
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) IncorrectVersionException(org.apache.hadoop.hdfs.server.common.IncorrectVersionException) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) Configuration(org.apache.hadoop.conf.Configuration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) Test(org.junit.Test)

Example 2 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class TestBlockRecovery method initBlockRecords.

private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
    List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
    BlockRecord blockRecord = new BlockRecord(new DatanodeID(dnR), spyDN, new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), ReplicaState.FINALIZED));
    blocks.add(blockRecord);
    return blocks;
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) BlockRecord(org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord) ArrayList(java.util.ArrayList)

Example 3 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class BlockReportTestBase method blockReport_09.

// Similar to BlockReport_08 but corrupts GS and len of the TEMPORARY's
// replica block. Expect the same behaviour: NN should simply ignore this
// block
@Test(timeout = 300000)
public void blockReport_09() throws IOException {
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path filePath = new Path("/" + METHOD_NAME + ".dat");
    final int DN_N1 = DN_N0 + 1;
    final int bytesChkSum = 1024 * 1000;
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, bytesChkSum);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 6 * bytesChkSum);
    shutDownCluster();
    startUpCluster();
    try {
        writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
        Block bl = findBlock(filePath, 12 * bytesChkSum);
        BlockChecker bc = new BlockChecker(filePath);
        bc.start();
        waitForTempReplica(bl, DN_N1);
        // all blocks belong to the same file, hence same BP
        DataNode dn = cluster.getDataNodes().get(DN_N1);
        String poolId = cluster.getNamesystem().getBlockPoolId();
        DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
        StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
        sendBlockReports(dnR, poolId, reports);
        printStats();
        assertEquals("Wrong number of PendingReplication blocks", 2, cluster.getNamesystem().getPendingReplicationBlocks());
        try {
            bc.join();
        } catch (InterruptedException e) {
        }
    } finally {
        // return the initial state of the configuration
        resetConfiguration();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 4 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class BlockReportTestBase method blockReport_03.

/**
   * Test writes a file and closes it.
   * Block reported is generated with a bad GS for a single block.
   * Block report is forced and the check for # of corrupted blocks is performed.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 300000)
public void blockReport_03() throws IOException {
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path filePath = new Path("/" + METHOD_NAME + ".dat");
    writeFile(METHOD_NAME, FILE_SIZE, filePath);
    // all blocks belong to the same file, hence same BP
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    String poolId = cluster.getNamesystem().getBlockPoolId();
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
    sendBlockReports(dnR, poolId, reports);
    printStats();
    assertThat("Wrong number of corrupt blocks", cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
    assertThat("Wrong number of PendingDeletion blocks", cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Test(org.junit.Test)

Example 5 with DatanodeRegistration

use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.

the class BlockReportTestBase method blockReport_04.

/**
   * Test writes a file and closes it.
   * Block reported is generated with an extra block.
   * Block report is forced and the check for # of pendingdeletion
   * blocks is performed.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 300000)
public void blockReport_04() throws IOException {
    final String METHOD_NAME = GenericTestUtils.getMethodName();
    Path filePath = new Path("/" + METHOD_NAME + ".dat");
    DFSTestUtil.createFile(fs, filePath, FILE_SIZE, REPL_FACTOR, rand.nextLong());
    DataNode dn = cluster.getDataNodes().get(DN_N0);
    // all blocks belong to the same file, hence same BP
    String poolId = cluster.getNamesystem().getBlockPoolId();
    // Create a bogus new block which will not be present on the namenode.
    ExtendedBlock b = new ExtendedBlock(poolId, rand.nextLong(), 1024L, rand.nextLong());
    dn.getFSDataset().createRbw(StorageType.DEFAULT, b, false);
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
    StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
    sendBlockReports(dnR, poolId, reports);
    printStats();
    assertThat("Wrong number of corrupt blocks", cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
    assertThat("Wrong number of PendingDeletion blocks", cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) Test(org.junit.Test)

Aggregations

DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)48 Test (org.junit.Test)36 Configuration (org.apache.hadoop.conf.Configuration)19 Path (org.apache.hadoop.fs.Path)16 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)12 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)12 ArrayList (java.util.ArrayList)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)10 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)10 Block (org.apache.hadoop.hdfs.protocol.Block)9 IOException (java.io.IOException)8 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)8 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)8 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)7 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)6 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)5