Search in sources :

Example 86 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hbase by apache.

the class TestBlockReorder method testBlockLocationReorder.

/**
   * Test that we're can add a hook, and that this hook works when we try to read the file in HDFS.
   */
@Test
public void testBlockLocationReorder() throws Exception {
    Path p = new Path("hello");
    Assert.assertTrue((short) cluster.getDataNodes().size() > 1);
    final int repCount = 2;
    // Let's write the file
    FSDataOutputStream fop = dfs.create(p, (short) repCount);
    final double toWrite = 875.5613;
    fop.writeDouble(toWrite);
    fop.close();
    // Let's check we can read it when everybody's there
    long start = System.currentTimeMillis();
    FSDataInputStream fin = dfs.open(p);
    Assert.assertTrue(toWrite == fin.readDouble());
    long end = System.currentTimeMillis();
    LOG.info("readtime= " + (end - start));
    fin.close();
    Assert.assertTrue((end - start) < 30 * 1000);
    // Let's kill the first location. But actually the fist location returned will change
    // The first thing to do is to get the location, then the port
    FileStatus f = dfs.getFileStatus(p);
    BlockLocation[] lbs;
    do {
        lbs = dfs.getFileBlockLocations(f, 0, 1);
    } while (lbs.length != 1 && lbs[0].getLength() != repCount);
    final String name = lbs[0].getNames()[0];
    Assert.assertTrue(name.indexOf(':') > 0);
    String portS = name.substring(name.indexOf(':') + 1);
    final int port = Integer.parseInt(portS);
    LOG.info("port= " + port);
    int ipcPort = -1;
    // Let's find the DN to kill. cluster.getDataNodes(int) is not on the same port, so we need
    // to iterate ourselves.
    boolean ok = false;
    final String lookup = lbs[0].getHosts()[0];
    StringBuilder sb = new StringBuilder();
    for (DataNode dn : cluster.getDataNodes()) {
        final String dnName = getHostName(dn);
        sb.append(dnName).append(' ');
        if (lookup.equals(dnName)) {
            ok = true;
            LOG.info("killing datanode " + name + " / " + lookup);
            ipcPort = dn.ipcServer.getListenerAddress().getPort();
            dn.shutdown();
            LOG.info("killed datanode " + name + " / " + lookup);
            break;
        }
    }
    Assert.assertTrue("didn't find the server to kill, was looking for " + lookup + " found " + sb, ok);
    LOG.info("ipc port= " + ipcPort);
    // Add the hook, with an implementation checking that we don't use the port we've just killed.
    Assert.assertTrue(HFileSystem.addLocationsOrderInterceptor(conf, new HFileSystem.ReorderBlocks() {

        @Override
        public void reorderBlocks(Configuration c, LocatedBlocks lbs, String src) {
            for (LocatedBlock lb : lbs.getLocatedBlocks()) {
                if (lb.getLocations().length > 1) {
                    DatanodeInfo[] infos = lb.getLocations();
                    if (infos[0].getHostName().equals(lookup)) {
                        LOG.info("HFileSystem bad host, inverting");
                        DatanodeInfo tmp = infos[0];
                        infos[0] = infos[1];
                        infos[1] = tmp;
                    }
                }
            }
        }
    }));
    final int retries = 10;
    ServerSocket ss = null;
    ServerSocket ssI;
    try {
        // We're taking the port to have a timeout issue later.
        ss = new ServerSocket(port);
        ssI = new ServerSocket(ipcPort);
    } catch (BindException be) {
        LOG.warn("Got bind exception trying to set up socket on " + port + " or " + ipcPort + ", this means that the datanode has not closed the socket or" + " someone else took it. It may happen, skipping this test for this time.", be);
        if (ss != null) {
            ss.close();
        }
        return;
    }
    // so we try retries times;  with the reorder it will never last more than a few milli seconds
    for (int i = 0; i < retries; i++) {
        start = System.currentTimeMillis();
        fin = dfs.open(p);
        Assert.assertTrue(toWrite == fin.readDouble());
        fin.close();
        end = System.currentTimeMillis();
        LOG.info("HFileSystem readtime= " + (end - start));
        Assert.assertFalse("We took too much time to read", (end - start) > 60000);
    }
    ss.close();
    ssI.close();
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BindException(java.net.BindException) ServerSocket(java.net.ServerSocket) BlockLocation(org.apache.hadoop.fs.BlockLocation) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 87 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class MiniDFSCluster method restartDataNode.

/**
   * Restart a datanode, on the same port if requested
   * @param dnprop the datanode to restart
   * @param keepPort whether to use the same port 
   * @return true if restarting is successful
   * @throws IOException
   */
public synchronized boolean restartDataNode(DataNodeProperties dnprop, boolean keepPort) throws IOException {
    Configuration conf = dnprop.conf;
    String[] args = dnprop.dnArgs;
    SecureResources secureResources = dnprop.secureResources;
    // save cloned config
    Configuration newconf = new HdfsConfiguration(conf);
    if (keepPort) {
        InetSocketAddress addr = dnprop.datanode.getXferAddress();
        conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort());
        conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort);
    }
    final DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
    final DataNodeProperties dnp = new DataNodeProperties(newDn, newconf, args, secureResources, newDn.getIpcPort());
    dataNodes.add(dnp);
    numDataNodes++;
    setDataNodeStorageCapacities(dataNodes.lastIndexOf(dnp), newDn, storageCap.toArray(new long[][] {}));
    return true;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) SecureResources(org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources)

Example 88 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class MiniDFSCluster method injectBlocks.

/**
   * This method is valid only if the data nodes have simulated data
   * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
   * @param blocksToInject - the blocks
   * @param bpid - (optional) the block pool id to use for injecting blocks.
   *             If not supplied then it is queried from the in-process NameNode.
   * @throws IOException
   *              if not simulatedFSDataset
   *             if any of blocks already exist in the data node
   *   
   */
public void injectBlocks(int dataNodeIndex, Iterable<Block> blocksToInject, String bpid) throws IOException {
    if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
        throw new IndexOutOfBoundsException();
    }
    final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
    final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
    if (!(dataSet instanceof SimulatedFSDataset)) {
        throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
    }
    if (bpid == null) {
        bpid = getNamesystem().getBlockPoolId();
    }
    SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
    sdataset.injectBlocks(bpid, blocksToInject);
    dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) SimulatedFSDataset(org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset) IOException(java.io.IOException)

Example 89 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class MiniDFSCluster method corruptBlockOnDataNodesHelper.

private int corruptBlockOnDataNodesHelper(ExtendedBlock block, boolean deleteBlockFile) throws IOException {
    int blocksCorrupted = 0;
    for (DataNode dn : getDataNodes()) {
        try {
            MaterializedReplica replica = getFsDatasetTestUtils(dn).getMaterializedReplica(block);
            if (deleteBlockFile) {
                replica.deleteData();
            } else {
                replica.corruptData();
            }
            blocksCorrupted++;
        } catch (ReplicaNotFoundException e) {
        // Ignore.
        }
    }
    return blocksCorrupted;
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)

Example 90 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class MiniDFSCluster method stopDataNodeForUpgrade.

/*
   * Shutdown a particular datanode
   * @param i node index
   * @return null if the node index is out of range, else the properties of the
   * removed node
   */
public synchronized DataNodeProperties stopDataNodeForUpgrade(int i) throws IOException {
    if (i < 0 || i >= dataNodes.size()) {
        return null;
    }
    DataNodeProperties dnprop = dataNodes.remove(i);
    DataNode dn = dnprop.datanode;
    LOG.info("MiniDFSCluster Stopping DataNode " + dn.getDisplayName() + " from a total of " + (dataNodes.size() + 1) + " datanodes.");
    dn.shutdownDatanode(true);
    numDataNodes--;
    return dnprop;
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode)

Aggregations

DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)165 Test (org.junit.Test)110 Path (org.apache.hadoop.fs.Path)78 Configuration (org.apache.hadoop.conf.Configuration)60 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)47 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)37 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)37 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)35 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)29 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)28 FileSystem (org.apache.hadoop.fs.FileSystem)27 IOException (java.io.IOException)24 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 ArrayList (java.util.ArrayList)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)17 File (java.io.File)15 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)14 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)13 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)12