Search in sources :

Example 76 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDecommission method checkFile.

/**
   * Verify that the number of replicas are as expected for each block in
   * the given file.
   * For blocks with a decommissioned node, verify that their replication
   * is 1 more than what is specified.
   * For blocks without decommissioned nodes, verify their replication is
   * equal to what is specified.
   * 
   * @param downnode - if null, there is no decommissioned node for this file.
   * @return - null if no failure found, else an error message string.
   */
private static String checkFile(FileSystem fileSys, Path name, int repl, String downnode, int numDatanodes) throws IOException {
    boolean isNodeDown = (downnode != null);
    // need a raw stream
    assertTrue("Not HDFS:" + fileSys.getUri(), fileSys instanceof DistributedFileSystem);
    HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
    for (LocatedBlock blk : dinfo) {
        // for each block
        int hasdown = 0;
        DatanodeInfo[] nodes = blk.getLocations();
        for (int j = 0; j < nodes.length; j++) {
            // for each replica
            if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) {
                hasdown++;
                //Downnode must actually be decommissioned
                if (!nodes[j].isDecommissioned()) {
                    return "For block " + blk.getBlock() + " replica on " + nodes[j] + " is given as downnode, " + "but is not decommissioned";
                }
                //Decommissioned node (if any) should only be last node in list.
                if (j != nodes.length - 1) {
                    return "For block " + blk.getBlock() + " decommissioned node " + nodes[j] + " was not last node in list: " + (j + 1) + " of " + nodes.length;
                }
                LOG.info("Block " + blk.getBlock() + " replica on " + nodes[j] + " is decommissioned.");
            } else {
                //Non-downnodes must not be decommissioned
                if (nodes[j].isDecommissioned()) {
                    return "For block " + blk.getBlock() + " replica on " + nodes[j] + " is unexpectedly decommissioned";
                }
            }
        }
        LOG.info("Block " + blk.getBlock() + " has " + hasdown + " decommissioned replica.");
        if (Math.min(numDatanodes, repl + hasdown) != nodes.length) {
            return "Wrong number of replicas for block " + blk.getBlock() + ": " + nodes.length + ", expected " + Math.min(numDatanodes, repl + hasdown);
        }
    }
    return null;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Example 77 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestDatanodeReport method testDatanodeReportWithUpgradeDomain.

/**
   * This test verifies upgrade domain is set according to the JSON host file.
   */
@Test
public void testDatanodeReportWithUpgradeDomain() throws Exception {
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, // 0.5s
    500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/datanodeReport");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    final DFSClient client = cluster.getFileSystem().dfs;
    final String ud1 = "ud1";
    final String ud2 = "ud2";
    try {
        //wait until the cluster is up
        cluster.waitActive();
        DatanodeAdminProperties datanode = new DatanodeAdminProperties();
        datanode.setHostName(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
        datanode.setUpgradeDomain(ud1);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
        client.refreshNodes();
        DatanodeInfo[] all = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals(all[0].getUpgradeDomain(), ud1);
        datanode.setUpgradeDomain(null);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
        client.refreshNodes();
        all = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals(all[0].getUpgradeDomain(), null);
        datanode.setUpgradeDomain(ud2);
        hostsFileWriter.initIncludeHosts(new DatanodeAdminProperties[] { datanode });
        client.refreshNodes();
        all = client.datanodeReport(DatanodeReportType.ALL);
        assertEquals(all[0].getUpgradeDomain(), ud2);
    } finally {
        cluster.shutdown();
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) DatanodeAdminProperties(org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties) Test(org.junit.Test)

Example 78 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestBlockRecovery method testRaceBetweenReplicaRecoveryAndFinalizeBlock.

/**
   * Test to verify the race between finalizeBlock and Lease recovery
   * 
   * @throws Exception
   */
@Test(timeout = 20000)
public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() throws Exception {
    // Stop the Mocked DN started in startup()
    tearDown();
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        cluster.waitClusterUp();
        DistributedFileSystem fs = cluster.getFileSystem();
        Path path = new Path("/test");
        FSDataOutputStream out = fs.create(path);
        out.writeBytes("data");
        out.hsync();
        List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs.open(path));
        final LocatedBlock block = blocks.get(0);
        final DataNode dataNode = cluster.getDataNodes().get(0);
        final AtomicBoolean recoveryInitResult = new AtomicBoolean(true);
        Thread recoveryThread = new Thread() {

            @Override
            public void run() {
                try {
                    DatanodeInfo[] locations = block.getLocations();
                    final RecoveringBlock recoveringBlock = new RecoveringBlock(block.getBlock(), locations, block.getBlock().getGenerationStamp() + 1);
                    try (AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) {
                        Thread.sleep(2000);
                        dataNode.initReplicaRecovery(recoveringBlock);
                    }
                } catch (Exception e) {
                    recoveryInitResult.set(false);
                }
            }
        };
        recoveryThread.start();
        try {
            out.close();
        } catch (IOException e) {
            Assert.assertTrue("Writing should fail", e.getMessage().contains("are bad. Aborting..."));
        } finally {
            recoveryThread.join();
        }
        Assert.assertTrue("Recovery should be initiated successfully", recoveryInitResult.get());
        dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock().getGenerationStamp() + 1, block.getBlock().getBlockId(), block.getBlockSize());
    } finally {
        if (null != cluster) {
            cluster.shutdown();
            cluster = null;
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URISyntaxException(java.net.URISyntaxException) TimeoutException(java.util.concurrent.TimeoutException) RecoveryInProgressException(org.apache.hadoop.hdfs.protocol.RecoveryInProgressException) IOException(java.io.IOException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 79 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestBlockRecovery method initRecoveringBlocks.

private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
    Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
    DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
    DatanodeInfo[] locs = new DatanodeInfo[] { new DatanodeInfoBuilder().setNodeID(dn.getDNRegistrationForBP(block.getBlockPoolId())).build(), mockOtherDN };
    RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
    blocks.add(rBlock);
    return blocks;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) ArrayList(java.util.ArrayList)

Example 80 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestBlockReplacement method testBlockMoveAcrossStorageInSameNode.

@Test
public void testBlockMoveAcrossStorageInSameNode() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // create only one datanode in the cluster to verify movement within
    // datanode.
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
        DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
        LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
        // get the current 
        LocatedBlock locatedBlock = locatedBlocks.get(0);
        ExtendedBlock block = locatedBlock.getBlock();
        DatanodeInfo[] locations = locatedBlock.getLocations();
        assertEquals(1, locations.length);
        StorageType[] storageTypes = locatedBlock.getStorageTypes();
        // current block should be written to DISK
        assertTrue(storageTypes[0] == StorageType.DISK);
        DatanodeInfo source = locations[0];
        // move block to ARCHIVE by using same DataNodeInfo for source, proxy and
        // destination so that movement happens within datanode 
        assertTrue(replaceBlock(block, source, source, source, StorageType.ARCHIVE, Status.SUCCESS));
        // wait till namenode notified
        Thread.sleep(3000);
        locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
        // get the current 
        locatedBlock = locatedBlocks.get(0);
        assertEquals("Storage should be only one", 1, locatedBlock.getLocations().length);
        assertTrue("Block should be moved to ARCHIVE", locatedBlock.getStorageTypes()[0] == StorageType.ARCHIVE);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Aggregations

DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)214 Test (org.junit.Test)103 Path (org.apache.hadoop.fs.Path)91 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)73 IOException (java.io.IOException)47 FileSystem (org.apache.hadoop.fs.FileSystem)44 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)43 ArrayList (java.util.ArrayList)39 Configuration (org.apache.hadoop.conf.Configuration)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)37 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)32 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)32 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)29 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)25 InetSocketAddress (java.net.InetSocketAddress)20 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)20 StorageType (org.apache.hadoop.fs.StorageType)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)14