Search in sources :

Example 11 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class BlockReaderLocalLegacy method getBlockPathInfo.

private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi, ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout, Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname, StorageType storageType) throws IOException {
    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
    BlockLocalPathInfo pathinfo;
    ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, node, conf, timeout, connectToDnViaHostname);
    try {
        // make RPC to local datanode to find local pathnames of blocks
        pathinfo = proxy.getBlockLocalPathInfo(blk, token);
        // invalidated.  Therefore, our only option is to skip caching.
        if (pathinfo != null && !storageType.isTransient()) {
            LOG.debug("Cached location of block {} as {}", blk, pathinfo);
            localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
        }
    } catch (IOException e) {
        // Reset proxy on error
        localDatanodeInfo.resetDatanodeProxy();
        throw e;
    }
    return pathinfo;
}
Also used : BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) IOException(java.io.IOException) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Example 12 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class CancelCommand method cancelPlan.

/**
   * Cancels a running plan.
   *
   * @param planData - Plan data.
   * @throws IOException
   */
private void cancelPlan(String planData) throws IOException {
    Preconditions.checkNotNull(planData);
    NodePlan plan = NodePlan.parseJson(planData);
    String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
    Preconditions.checkNotNull(dataNodeAddress);
    ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);
    String planHash = DigestUtils.shaHex(planData);
    try {
        dataNode.cancelDiskBalancePlan(planHash);
    } catch (DiskBalancerException ex) {
        LOG.error("Cancelling plan on  {} failed. Result: {}, Message: {}", plan.getNodeName(), ex.getResult().toString(), ex.getMessage());
        throw ex;
    }
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)

Example 13 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class CancelCommand method cancelPlanUsingHash.

/**
   * Cancels a running plan.
   * @param nodeAddress - Address of the data node.
   * @param hash - Sha512 hash of the plan, which can be read from datanode
   *             using query status command.
   * @throws IOException
   */
private void cancelPlanUsingHash(String nodeAddress, String hash) throws IOException {
    Preconditions.checkNotNull(nodeAddress);
    Preconditions.checkNotNull(hash);
    ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeAddress);
    try {
        dataNode.cancelDiskBalancePlan(hash);
    } catch (DiskBalancerException ex) {
        LOG.error("Cancelling plan on  {} failed. Result: {}, Message: {}", nodeAddress, ex.getResult().toString(), ex.getMessage());
        throw ex;
    }
}
Also used : ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)

Example 14 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class Command method getDataNodeProxy.

/**
   * Copied from DFSAdmin.java. -- Creates a connection to dataNode.
   *
   * @param datanode - dataNode.
   * @return ClientDataNodeProtocol
   * @throws IOException
   */
public ClientDatanodeProtocol getDataNodeProxy(String datanode) throws IOException {
    InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
    // For datanode proxy the server principal should be DN's one.
    getConf().set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, getConf().get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));
    // Create the client
    ClientDatanodeProtocol dnProtocol = DFSUtilClient.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), getConf(), NetUtils.getSocketFactory(getConf(), ClientDatanodeProtocol.class));
    return dnProtocol;
}
Also used : InetSocketAddress(java.net.InetSocketAddress) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Example 15 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class Command method populatePathNames.

/**
   * Reads the Physical path of the disks we are balancing. This is needed to
   * make the disk balancer human friendly and not used in balancing.
   *
   * @param node - Disk Balancer Node.
   */
protected void populatePathNames(DiskBalancerDataNode node) throws IOException {
    // invoke rpc call to dataNode.
    if (getClusterURI().getScheme().startsWith("file")) {
        return;
    }
    String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort();
    ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
    String volumeNameJson = dnClient.getDiskBalancerSetting(DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
    @SuppressWarnings("unchecked") Map<String, String> volumeMap = READER.readValue(volumeNameJson);
    for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
        for (DiskBalancerVolume vol : set.getVolumes()) {
            if (volumeMap.containsKey(vol.getUuid())) {
                vol.setPath(volumeMap.get(vol.getUuid()));
            }
        }
    }
}
Also used : DiskBalancerVolumeSet(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Aggregations

ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)22 IOException (java.io.IOException)7 InetSocketAddress (java.net.InetSocketAddress)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 Configuration (org.apache.hadoop.conf.Configuration)4 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 Server (org.apache.hadoop.ipc.Server)3 Test (org.junit.Test)3 LinkedList (java.util.LinkedList)2 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 Block (org.apache.hadoop.hdfs.protocol.Block)2 BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 BlockTokenIdentifier (org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier)2 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)2