Search in sources :

Example 1 with SecureResources

use of org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources in project hadoop by apache.

the class MiniDFSClusterWithNodeGroup method startDataNodes.

public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] nodeGroups, String[] hosts, long[][] storageCapacities, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig) throws IOException {
    assert storageCapacities == null || simulatedCapacities == null;
    assert storageTypes == null || storageTypes.length == numDataNodes;
    assert storageCapacities == null || storageCapacities.length == numDataNodes;
    if (operation == StartupOption.RECOVER) {
        return;
    }
    if (checkDataNodeHostConfig) {
        conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    } else {
        conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    }
    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    int curDatanodesNum = dataNodes.size();
    // for mincluster's the default initialDelay for BRs is 0
    if (conf.get(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
        conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
    }
    //
    if (racks != null && numDataNodes > racks.length) {
        throw new IllegalArgumentException("The length of racks [" + racks.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (nodeGroups != null && numDataNodes > nodeGroups.length) {
        throw new IllegalArgumentException("The length of nodeGroups [" + nodeGroups.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (hosts != null && numDataNodes > hosts.length) {
        throw new IllegalArgumentException("The length of hosts [" + hosts.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    //Generate some hostnames if required
    if (racks != null && hosts == null) {
        hosts = new String[numDataNodes];
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
            hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
        }
    }
    if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) {
        throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null : new String[] { operation.getName() };
    DataNode[] dns = new DataNode[numDataNodes];
    for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
        Configuration dnConf = new HdfsConfiguration(conf);
        // Set up datanode address
        setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
        if (manageDfsDirs) {
            String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i]);
            dnConf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
            conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dirs);
        }
        if (simulatedCapacities != null) {
            SimulatedFSDataset.setFactory(dnConf);
            dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, simulatedCapacities[i - curDatanodesNum]);
        }
        LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": " + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        if (hosts != null) {
            dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
            LOG.info("Starting DataNode " + i + " with hostname set to: " + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
        }
        if (racks != null) {
            String name = hosts[i - curDatanodesNum];
            if (nodeGroups == null) {
                LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]);
            } else {
                LOG.info("Adding node with hostname : " + name + " to serverGroup " + nodeGroups[i - curDatanodesNum] + " and rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum] + nodeGroups[i - curDatanodesNum]);
            }
        }
        // save config
        Configuration newconf = new HdfsConfiguration(dnConf);
        if (hosts != null) {
            NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
        }
        SecureResources secureResources = null;
        if (UserGroupInformation.isSecurityEnabled()) {
            try {
                secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
        DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources);
        if (dn == null)
            throw new IOException("Cannot start DataNode in " + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        //since the HDFS does things based on IP:port, we need to add the mapping
        //for IP:port to rackId
        String ipAddr = dn.getXferAddress().getAddress().getHostAddress();
        if (racks != null) {
            int port = dn.getXferAddress().getPort();
            if (nodeGroups == null) {
                LOG.info("Adding node with IP:port : " + ipAddr + ":" + port + " to rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(ipAddr + ":" + port, racks[i - curDatanodesNum]);
            } else {
                LOG.info("Adding node with IP:port : " + ipAddr + ":" + port + " to nodeGroup " + nodeGroups[i - curDatanodesNum] + " and rack " + racks[i - curDatanodesNum]);
                StaticMapping.addNodeToRack(ipAddr + ":" + port, racks[i - curDatanodesNum] + nodeGroups[i - curDatanodesNum]);
            }
        }
        dn.runDatanodeDaemon();
        dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
        dns[i - curDatanodesNum] = dn;
    }
    curDatanodesNum += numDataNodes;
    this.numDataNodes += numDataNodes;
    waitActive();
    if (storageCapacities != null) {
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; ++i) {
            try (FsDatasetSpi.FsVolumeReferences volumes = dns[i].getFSDataset().getFsVolumeReferences()) {
                assert volumes.size() == storagesPerDatanode;
                for (int j = 0; j < volumes.size(); ++j) {
                    FsVolumeImpl volume = (FsVolumeImpl) volumes.get(j);
                    volume.setCapacityForTesting(storageCapacities[i][j]);
                }
            }
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) IOException(java.io.IOException) IOException(java.io.IOException) FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) SecureResources(org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources)

Example 2 with SecureResources

use of org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources in project hadoop by apache.

the class MiniDFSCluster method startDataNodes.

/**
   * Modify the config and start up additional DataNodes.  The info port for
   * DataNodes is guaranteed to use a free port.
   *  
   *  Data nodes can run with the name node in the mini cluster or
   *  a real name node. For example, running with a real name node is useful
   *  when running simulated data nodes with a real name node.
   *  If minicluster's name node is null assume that the conf has been
   *  set with the right address:port of the name node.
   *
   * @param conf the base configuration to use in starting the DataNodes.  This
   *          will be modified as necessary.
   * @param numDataNodes Number of DataNodes to start; may be zero
   * @param manageDfsDirs if true, the data directories for DataNodes will be
   *          created and {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} will be
   *          set in the conf
   * @param operation the operation with which to start the DataNodes.  If null
   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
   * @param racks array of strings indicating the rack that each DataNode is on
   * @param hosts array of strings indicating the hostnames for each DataNode
   * @param simulatedCapacities array of capacities of the simulated data nodes
   * @param setupHostsFile add new nodes to dfs hosts files
   * @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already set in config
   * @param checkDataNodeHostConfig if true, only set DataNode hostname key if not already set in config
   * @param dnConfOverlays An array of {@link Configuration} objects that will overlay the
   *              global MiniDFSCluster Configuration for the corresponding DataNode.
   * @throws IllegalStateException if NameNode has been shutdown
   */
public synchronized void startDataNodes(Configuration conf, int numDataNodes, StorageType[][] storageTypes, boolean manageDfsDirs, StartupOption operation, String[] racks, String[] hosts, long[][] storageCapacities, long[] simulatedCapacities, boolean setupHostsFile, boolean checkDataNodeAddrConfig, boolean checkDataNodeHostConfig, Configuration[] dnConfOverlays) throws IOException {
    assert storageCapacities == null || simulatedCapacities == null;
    assert storageTypes == null || storageTypes.length == numDataNodes;
    assert storageCapacities == null || storageCapacities.length == numDataNodes;
    if (operation == StartupOption.RECOVER) {
        return;
    }
    if (checkDataNodeHostConfig) {
        conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    } else {
        conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
    }
    int curDatanodesNum = dataNodes.size();
    // for mincluster's the default initialDelay for BRs is 0
    if (conf.get(DFS_BLOCKREPORT_INITIAL_DELAY_KEY) == null) {
        conf.setLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 0);
    }
    //
    if (racks != null && numDataNodes > racks.length) {
        throw new IllegalArgumentException("The length of racks [" + racks.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (hosts != null && numDataNodes > hosts.length) {
        throw new IllegalArgumentException("The length of hosts [" + hosts.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    //Generate some hostnames if required
    if (racks != null && hosts == null) {
        hosts = new String[numDataNodes];
        for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
            hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
        }
    }
    if (simulatedCapacities != null && numDataNodes > simulatedCapacities.length) {
        throw new IllegalArgumentException("The length of simulatedCapacities [" + simulatedCapacities.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    if (dnConfOverlays != null && numDataNodes > dnConfOverlays.length) {
        throw new IllegalArgumentException("The length of dnConfOverlays [" + dnConfOverlays.length + "] is less than the number of datanodes [" + numDataNodes + "].");
    }
    String[] dnArgs = (operation == null || operation != StartupOption.ROLLBACK) ? null : new String[] { operation.getName() };
    DataNode[] dns = new DataNode[numDataNodes];
    for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
        Configuration dnConf = new HdfsConfiguration(conf);
        if (dnConfOverlays != null) {
            dnConf.addResource(dnConfOverlays[i]);
        }
        // Set up datanode address
        setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
        if (manageDfsDirs) {
            String dirs = makeDataNodeDirs(i, storageTypes == null ? null : storageTypes[i - curDatanodesNum]);
            dnConf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
            conf.set(DFS_DATANODE_DATA_DIR_KEY, dirs);
        }
        if (simulatedCapacities != null) {
            SimulatedFSDataset.setFactory(dnConf);
            dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, simulatedCapacities[i - curDatanodesNum]);
        }
        LOG.info("Starting DataNode " + i + " with " + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": " + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
        if (hosts != null) {
            dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
            LOG.info("Starting DataNode " + i + " with hostname set to: " + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
        }
        if (racks != null) {
            String name = hosts[i - curDatanodesNum];
            LOG.info("Adding node with hostname : " + name + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(name, racks[i - curDatanodesNum]);
        }
        // save config
        Configuration newconf = new HdfsConfiguration(dnConf);
        if (hosts != null) {
            NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
        }
        SecureResources secureResources = null;
        if (UserGroupInformation.isSecurityEnabled() && conf.get(DFS_DATA_TRANSFER_PROTECTION_KEY) == null) {
            try {
                secureResources = SecureDataNodeStarter.getSecureResources(dnConf);
            } catch (Exception ex) {
                ex.printStackTrace();
            }
        }
        final int maxRetriesOnSasl = conf.getInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT);
        int numRetries = 0;
        DataNode dn = null;
        while (true) {
            try {
                dn = DataNode.instantiateDataNode(dnArgs, dnConf, secureResources);
                break;
            } catch (IOException e) {
                // replay attack.
                if (UserGroupInformation.isSecurityEnabled() && numRetries < maxRetriesOnSasl) {
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        break;
                    }
                    ++numRetries;
                    continue;
                }
                throw e;
            }
        }
        if (dn == null)
            throw new IOException("Cannot start DataNode in " + dnConf.get(DFS_DATANODE_DATA_DIR_KEY));
        //since the HDFS does things based on host|ip:port, we need to add the
        //mapping for the service to rackId
        String service = SecurityUtil.buildTokenService(dn.getXferAddress()).toString();
        if (racks != null) {
            LOG.info("Adding node with service : " + service + " to rack " + racks[i - curDatanodesNum]);
            StaticMapping.addNodeToRack(service, racks[i - curDatanodesNum]);
        }
        dn.runDatanodeDaemon();
        dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs, secureResources, dn.getIpcPort()));
        dns[i - curDatanodesNum] = dn;
    }
    this.numDataNodes += numDataNodes;
    waitActive();
    setDataNodeStorageCapacities(curDatanodesNum, numDataNodes, dns, storageCapacities);
    /* memorize storage capacities */
    if (storageCapacities != null) {
        storageCap.addAll(Arrays.asList(storageCapacities));
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) TimeoutException(java.util.concurrent.TimeoutException) ServiceFailedException(org.apache.hadoop.ha.ServiceFailedException) ReplicaNotFoundException(org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) SecureResources(org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources)

Example 3 with SecureResources

use of org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources in project hadoop by apache.

the class MiniDFSCluster method restartDataNode.

/**
   * Restart a datanode, on the same port if requested
   * @param dnprop the datanode to restart
   * @param keepPort whether to use the same port 
   * @return true if restarting is successful
   * @throws IOException
   */
public synchronized boolean restartDataNode(DataNodeProperties dnprop, boolean keepPort) throws IOException {
    Configuration conf = dnprop.conf;
    String[] args = dnprop.dnArgs;
    SecureResources secureResources = dnprop.secureResources;
    // save cloned config
    Configuration newconf = new HdfsConfiguration(conf);
    if (keepPort) {
        InetSocketAddress addr = dnprop.datanode.getXferAddress();
        conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort());
        conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort);
    }
    final DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
    final DataNodeProperties dnp = new DataNodeProperties(newDn, newconf, args, secureResources, newDn.getIpcPort());
    dataNodes.add(dnp);
    numDataNodes++;
    setDataNodeStorageCapacities(dataNodes.lastIndexOf(dnp), newDn, storageCap.toArray(new long[][] {}));
    return true;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) SecureResources(org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources)

Aggregations

Configuration (org.apache.hadoop.conf.Configuration)3 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)3 SecureResources (org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources)3 IOException (java.io.IOException)2 InetSocketAddress (java.net.InetSocketAddress)1 URISyntaxException (java.net.URISyntaxException)1 TimeoutException (java.util.concurrent.TimeoutException)1 ServiceFailedException (org.apache.hadoop.ha.ServiceFailedException)1 ReplicaNotFoundException (org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException)1 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)1 FsVolumeImpl (org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl)1