Search in sources :

Example 6 with FsVolumeImpl

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl in project hadoop by apache.

the class MiniDFSClusterWithNodeGroup method startDataNodes.

public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] nodeGroups, String[] hosts, long[][] storageCapacities, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig) throws IOException {
    assert storageCapacities == null || simulatedCapacities == null;
    assert storageTypes == null || storageTypes.length == numDataNodes;
    assert storageCapacities == null || storageCapacities.length == numDataNodes;
    if (operation == StartupOption.RECOVER) {
        return;
    }
    if (checkDataNodeHostConfig) {
        conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    } else {
        conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    }
    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    int curDatanodesNum = dataNodes.size();
    // for mincluster's the default initialDelay for BRs is 0
    if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
        conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
    }
    //
    if (racks != null && numDataNodes > racks.length) {
        throw new IllegalArgumentException("The length of racks [" + racks.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (nodeGroups != null && numDataNodes > nodeGroups.length) {
        throw new IllegalArgumentException("The length of nodeGroups [" + nodeGroups.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (hosts != null && numDataNodes > hosts.length) {
        throw new IllegalArgumentException("The length of hosts [" + hosts.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    //Generate some hostnames if required
    if (racks != null && hosts == null) {
        hosts = new String[numDataNodes];
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
            hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
        }
    }
    if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) {
        throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null : new String[] { operation.getName() };
    DataNode[] dns = new DataNode[numDataNodes];
    for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
        Configuration dnConf = new HdfsConfiguration(conf);
        // Set up datanode address
        setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
        if (manageDfsDirs) {
            String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i]);
            dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
            conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
        }
        if (simulatedCapacities != null) {
            SimulatedFSDataset.setFactory(dnConf);
            dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, simulatedCapacities[i - curDatanodesNum]);
        }
        LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": " + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        if (hosts != null) {
            dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
            LOG.info("Starting DataNode " + i + " with hostname set to: " + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
        }
        if (racks != null) {
            String name = hosts[i - curDatanodesNum];
            if (nodeGroups == null) {
                LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]);
            } else {
                LOG.info("Adding node with hostname : " + name + " to serverGroup " + nodeGroups[i - curDatanodesNum] + " and rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum] + nodeGroups[i - curDatanodesNum]);
            }
        }
        // save config
        Configuration newconf = new HdfsConfiguration(dnConf);
        if (hosts != null) {
            NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
        }
        SecureResources secureResources = null;
        if (UserGroupInformation.isSecurityEnabled()) {
            try {
                secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
        DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources);
        if (dn == null)
            throw new IOException("Cannot start DataNode in " + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        //since the HDFS does things based on IP:port, we need to add the mapping
        //for IP:port to rackId
        String ipAddr = dn.getXferAddress().getAddress().getHostAddress();
        if (racks != null) {
            int port = dn.getXferAddress().getPort();
            if (nodeGroups == null) {
                LOG.info("Adding node with IP:port : " + ipAddr + ":" + port + " to rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(ipAddr + ":" + port, racks[i - curDatanodesNum]);
            } else {
                LOG.info("Adding node with IP:port : " + ipAddr + ":" + port + " to nodeGroup " + nodeGroups[i - curDatanodesNum] + " and rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(ipAddr + ":" + port, racks[i - curDatanodesNum] + nodeGroups[i - curDatanodesNum]);
            }
        }
        dn.runDatanodeDaemon();
        dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
        dns[i - curDatanodesNum] = dn;
    }
    curDatanodesNum += numDataNodes;
    this.numDataNodes += numDataNodes;
    waitActive();
    if (storageCapacities != null) {
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; ++i) {
            try (FsDatasetSpi.FsVolumeReferences volumes = dns[i].getFSDataset().getFsVolumeReferences()) {
                assert volumes.size() == storagesPerDatanode;
                for (int j = 0; j < volumes.size(); ++j) {
                    FsVolumeImpl volume = (FsVolumeImpl) volumes.get(j);
                    volume.setCapacityForTesting(storageCapacities[i][j]);
                }
            }
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) IOException(java.io.IOException) IOException(java.io.IOException) FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) SecureResources(org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources)

Example 7 with FsVolumeImpl

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl in project hadoop by apache.

the class MiniDFSCluster method setDataNodeStorageCapacities.

private synchronized void setDataNodeStorageCapacities(final int curDnIdx, final DataNode curDn, long[][] storageCapacities) throws IOException {
    if (storageCapacities == null || storageCapacities.length == 0) {
        return;
    }
    try {
        waitDataNodeFullyStarted(curDn);
    } catch (TimeoutException | InterruptedException e) {
        throw new IOException(e);
    }
    try (FsDatasetSpi.FsVolumeReferences volumes = curDn.getFSDataset().getFsVolumeReferences()) {
        assert storageCapacities[curDnIdx].length == storagesPerDatanode;
        assert volumes.size() == storagesPerDatanode;
        int j = 0;
        for (FsVolumeSpi fvs : volumes) {
            FsVolumeImpl volume = (FsVolumeImpl) fvs;
            LOG.info("setCapacityForTesting " + storageCapacities[curDnIdx][j] + " for [" + volume.getStorageType() + "]" + volume.getStorageID());
            volume.setCapacityForTesting(storageCapacities[curDnIdx][j]);
            j++;
        }
    }
    DataNodeTestUtils.triggerHeartbeat(curDn);
}
Also used : FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException)

Example 8 with FsVolumeImpl

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl in project hadoop by apache.

the class TestStorageMover method setVolumeFull.

private void setVolumeFull(DataNode dn, StorageType type) {
    try (FsDatasetSpi.FsVolumeReferences refs = dn.getFSDataset().getFsVolumeReferences()) {
        for (FsVolumeSpi fvs : refs) {
            FsVolumeImpl volume = (FsVolumeImpl) fvs;
            if (volume.getStorageType() == type) {
                LOG.info("setCapacity to 0 for [" + volume.getStorageType() + "]" + volume.getStorageID());
                volume.setCapacityForTesting(0);
            }
        }
    } catch (IOException e) {
        LOG.error("Unexpected exception by closing FsVolumeReference", e);
    }
}
Also used : FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) IOException(java.io.IOException)

Example 9 with FsVolumeImpl

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl in project hadoop by apache.

the class TestMiniDFSCluster method verifyStorageCapacity.

private void verifyStorageCapacity(final MiniDFSCluster cluster, final long[] capacities) throws IOException {
    FsVolumeImpl source = null;
    FsVolumeImpl dest = null;
    /* verify capacity */
    for (int i = 0; i < cluster.getDataNodes().size(); i++) {
        final DataNode dnNode = cluster.getDataNodes().get(i);
        try (FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences()) {
            source = (FsVolumeImpl) refs.get(0);
            dest = (FsVolumeImpl) refs.get(1);
            assertEquals(capacities[0], source.getCapacity());
            assertEquals(capacities[1], dest.getCapacity());
        }
    }
}
Also used : FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)

Example 10 with FsVolumeImpl

use of org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl in project hadoop by apache.

the class TestDirectoryScanner method testExceptionHandlingWhileDirectoryScan.

/**
   * Test the behavior of exception handling during directory scan operation.
   * Directory scanner shouldn't abort the scan on every directory just because
   * one had an error.
   */
@Test(timeout = 60000)
public void testExceptionHandlingWhileDirectoryScan() throws Exception {
    cluster = new MiniDFSCluster.Builder(CONF).build();
    try {
        cluster.waitActive();
        bpid = cluster.getNamesystem().getBlockPoolId();
        fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
        client = cluster.getFileSystem().getClient();
        CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, 1);
        DataNode dataNode = cluster.getDataNodes().get(0);
        // Add files with 2 blocks
        createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH * 2, false);
        // Inject error on #getFinalizedDir() so that ReportCompiler#call() will
        // hit exception while preparing the block info report list.
        List<FsVolumeSpi> volumes = new ArrayList<>();
        Iterator<FsVolumeSpi> iterator = fds.getFsVolumeReferences().iterator();
        while (iterator.hasNext()) {
            FsVolumeImpl volume = (FsVolumeImpl) iterator.next();
            FsVolumeImpl spy = Mockito.spy(volume);
            Mockito.doThrow(new IOException("Error while getFinalizedDir")).when(spy).getFinalizedDir(volume.getBlockPoolList()[0]);
            volumes.add(spy);
        }
        FsVolumeReferences volReferences = new FsVolumeReferences(volumes);
        FsDatasetSpi<? extends FsVolumeSpi> spyFds = Mockito.spy(fds);
        Mockito.doReturn(volReferences).when(spyFds).getFsVolumeReferences();
        scanner = new DirectoryScanner(dataNode, spyFds, CONF);
        scanner.setRetainDiffs(true);
        scanner.reconcile();
    } finally {
        if (scanner != null) {
            scanner.shutdown();
            scanner = null;
        }
        cluster.shutdown();
    }
}
Also used : FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) ArrayList(java.util.ArrayList) FsVolumeSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) IOException(java.io.IOException) Test(org.junit.Test)

Aggregations

FsVolumeImpl (org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl)11 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)9 File (java.io.File)4 IOException (java.io.IOException)4 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)4 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)4 Path (org.apache.hadoop.fs.Path)3 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)3 Test (org.junit.Test)3 Random (java.util.Random)2 Configuration (org.apache.hadoop.conf.Configuration)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)2 ArrayList (java.util.ArrayList)1 TimeoutException (java.util.concurrent.TimeoutException)1 StorageType (org.apache.hadoop.fs.StorageType)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 SecureResources (org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources)1 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)1