Search in sources :

Example 1 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestDecommission method testDecommissionWithNamenodeRestart.

/**
   * Tests restart of namenode while datanode hosts are added to exclude file
   **/
@Test(timeout = 360000)
public void testDecommissionWithNamenodeRestart() throws IOException, InterruptedException {
    LOG.info("Starting test testDecommissionWithNamenodeRestart");
    int numNamenodes = 1;
    int numDatanodes = 1;
    int replicas = 1;
    getConf().setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
    getConf().setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 5);
    startCluster(numNamenodes, numDatanodes);
    Path file1 = new Path("testDecommissionWithNamenodeRestart.dat");
    FileSystem fileSys = getCluster().getFileSystem();
    writeFile(fileSys, file1, replicas);
    DFSClient client = getDfsClient(0);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    DatanodeID excludedDatanodeID = info[0];
    String excludedDatanodeName = info[0].getXferAddr();
    initExcludeHost(excludedDatanodeName);
    //Add a new datanode to cluster
    getCluster().startDataNodes(getConf(), 1, true, null, null, null, null);
    numDatanodes += 1;
    assertEquals("Number of datanodes should be 2 ", 2, getCluster().getDataNodes().size());
    //Restart the namenode
    getCluster().restartNameNode();
    DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(getCluster().getNamesystem(), excludedDatanodeID);
    waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
    // Ensure decommissioned datanode is not automatically shutdown
    assertEquals("All datanodes must be alive", numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length);
    assertTrue("Checked if block was replicated after decommission.", checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(), numDatanodes) == null);
    cleanupFile(fileSys, file1);
    // Restart the cluster and ensure recommissioned datanodes
    // are allowed to register with the namenode
    shutdownCluster();
    startCluster(numNamenodes, numDatanodes);
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 2 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestBlockRecovery method initBlockRecords.

private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
    List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
    DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
    BlockRecord blockRecord = new BlockRecord(new DatanodeID(dnR), spyDN, new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), ReplicaState.FINALIZED));
    blocks.add(blockRecord);
    return blocks;
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) BlockRecord(org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord) ArrayList(java.util.ArrayList)

Example 3 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestNamenodeRetryCache method testUpdatePipelineWithFailOver.

/**
   * Make sure a retry call does not hang because of the exception thrown in the
   * first call.
   */
@Test(timeout = 60000)
public void testUpdatePipelineWithFailOver() throws Exception {
    cluster.shutdown();
    nnRpc = null;
    filesystem = null;
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
    cluster.waitActive();
    NamenodeProtocols ns0 = cluster.getNameNodeRpc(0);
    ExtendedBlock oldBlock = new ExtendedBlock();
    ExtendedBlock newBlock = new ExtendedBlock();
    DatanodeID[] newNodes = new DatanodeID[2];
    String[] newStorages = new String[2];
    newCall();
    try {
        ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
        fail("Expect StandbyException from the updatePipeline call");
    } catch (StandbyException e) {
        // expected, since in the beginning both nn are in standby state
        GenericTestUtils.assertExceptionContains(HAServiceState.STANDBY.toString(), e);
    }
    cluster.transitionToActive(0);
    try {
        ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
    } catch (IOException e) {
    // ignore call should not hang.
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) StandbyException(org.apache.hadoop.ipc.StandbyException) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) Test(org.junit.Test)

Example 4 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestUpgradeDomainBlockPlacementPolicy method testPlacementAfterDecommission.

@Test(timeout = 300000)
public void testPlacementAfterDecommission() throws Exception {
    final long fileSize = DEFAULT_BLOCK_SIZE * 5;
    final String testFile = new String("/testfile");
    final Path path = new Path(testFile);
    DFSTestUtil.createFile(cluster.getFileSystem(), path, fileSize, REPLICATION_FACTOR, 1000L);
    // Decommission some nodes and wait until decommissions have finished.
    refreshDatanodeAdminProperties2();
    GenericTestUtils.waitFor(new Supplier<Boolean>() {

        @Override
        public Boolean get() {
            boolean successful = true;
            LocatedBlocks locatedBlocks;
            try {
                locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
            } catch (IOException ioe) {
                return false;
            }
            for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
                Set<DatanodeInfo> locs = new HashSet<>();
                for (DatanodeInfo datanodeInfo : block.getLocations()) {
                    if (datanodeInfo.getAdminState() == DatanodeInfo.AdminStates.NORMAL) {
                        locs.add(datanodeInfo);
                    }
                }
                for (DatanodeID datanodeID : expectedDatanodeIDs) {
                    successful = successful && locs.contains(datanodeID);
                }
            }
            return successful;
        }
    }, 1000, 60000);
    // Verify block placement policy of each block.
    LocatedBlocks locatedBlocks;
    locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
    for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
        BlockPlacementStatus status = cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy().verifyBlockPlacement(block.getLocations(), REPLICATION_FACTOR);
        assertTrue(status.isPlacementPolicySatisfied());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HashSet(java.util.HashSet) Set(java.util.Set) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) BlockPlacementStatus(org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Test(org.junit.Test)

Example 5 with DatanodeID

use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.

the class TestUpgradeDomainBlockPlacementPolicy method refreshDatanodeAdminProperties.

/**
   * Define admin properties for these datanodes as follows.
   * dn0's upgrade domain is ud5.
   * dn1's upgrade domain is ud2.
   * dn2's upgrade domain is ud3.
   * dn3's upgrade domain is ud1.
   * dn4's upgrade domain is ud2.
   * dn5's upgrade domain is ud4.
   * dn0 and dn5 are decommissioned.
   * Given dn0, dn1 and dn2 are on rack1 and dn3, dn4 and dn5 are on
   * rack2. Then any block's replicas should be on either
   * {dn1, dn2, d3} or {dn2, dn3, dn4}.
   */
private void refreshDatanodeAdminProperties() throws IOException {
    DatanodeAdminProperties[] datanodes = new DatanodeAdminProperties[hosts.length];
    for (int i = 0; i < hosts.length; i++) {
        datanodes[i] = new DatanodeAdminProperties();
        DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
        datanodes[i].setHostName(datanodeID.getHostName());
        datanodes[i].setPort(datanodeID.getXferPort());
        datanodes[i].setUpgradeDomain(upgradeDomains[i]);
    }
    datanodes[0].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
    datanodes[5].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
    hostsFileWriter.initIncludeHosts(datanodes);
    cluster.getFileSystem().refreshNodes();
    expectedDatanodeIDs.clear();
    expectedDatanodeIDs.add(cluster.getDataNodes().get(2).getDatanodeId());
    expectedDatanodeIDs.add(cluster.getDataNodes().get(3).getDatanodeId());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties)

Aggregations

DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)51 Test (org.junit.Test)36 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)18 Configuration (org.apache.hadoop.conf.Configuration)13 Path (org.apache.hadoop.fs.Path)12 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)10 IOException (java.io.IOException)8 InetSocketAddress (java.net.InetSocketAddress)8 Peer (org.apache.hadoop.hdfs.net.Peer)8 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)8 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)7 Block (org.apache.hadoop.hdfs.protocol.Block)7 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)6 Socket (java.net.Socket)5 FileSystem (org.apache.hadoop.fs.FileSystem)5 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)5