use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class BlockReaderLocalLegacy method getBlockPathInfo.
private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi, ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout, Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname, StorageType storageType) throws IOException {
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
BlockLocalPathInfo pathinfo;
ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, node, conf, timeout, connectToDnViaHostname);
try {
// make RPC to local datanode to find local pathnames of blocks
pathinfo = proxy.getBlockLocalPathInfo(blk, token);
// invalidated. Therefore, our only option is to skip caching.
if (pathinfo != null && !storageType.isTransient()) {
LOG.debug("Cached location of block {} as {}", blk, pathinfo);
localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
}
} catch (IOException e) {
// Reset proxy on error
localDatanodeInfo.resetDatanodeProxy();
throw e;
}
return pathinfo;
}
use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class CancelCommand method cancelPlan.
/**
* Cancels a running plan.
*
* @param planData - Plan data.
* @throws IOException
*/
private void cancelPlan(String planData) throws IOException {
Preconditions.checkNotNull(planData);
NodePlan plan = NodePlan.parseJson(planData);
String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
Preconditions.checkNotNull(dataNodeAddress);
ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);
String planHash = DigestUtils.shaHex(planData);
try {
dataNode.cancelDiskBalancePlan(planHash);
} catch (DiskBalancerException ex) {
LOG.error("Cancelling plan on {} failed. Result: {}, Message: {}", plan.getNodeName(), ex.getResult().toString(), ex.getMessage());
throw ex;
}
}
use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class CancelCommand method cancelPlanUsingHash.
/**
* Cancels a running plan.
* @param nodeAddress - Address of the data node.
* @param hash - Sha512 hash of the plan, which can be read from datanode
* using query status command.
* @throws IOException
*/
private void cancelPlanUsingHash(String nodeAddress, String hash) throws IOException {
Preconditions.checkNotNull(nodeAddress);
Preconditions.checkNotNull(hash);
ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeAddress);
try {
dataNode.cancelDiskBalancePlan(hash);
} catch (DiskBalancerException ex) {
LOG.error("Cancelling plan on {} failed. Result: {}, Message: {}", nodeAddress, ex.getResult().toString(), ex.getMessage());
throw ex;
}
}
use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class Command method getDataNodeProxy.
/**
* Copied from DFSAdmin.java. -- Creates a connection to dataNode.
*
* @param datanode - dataNode.
* @return ClientDataNodeProtocol
* @throws IOException
*/
public ClientDatanodeProtocol getDataNodeProxy(String datanode) throws IOException {
InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
// For datanode proxy the server principal should be DN's one.
getConf().set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, getConf().get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));
// Create the client
ClientDatanodeProtocol dnProtocol = DFSUtilClient.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), getConf(), NetUtils.getSocketFactory(getConf(), ClientDatanodeProtocol.class));
return dnProtocol;
}
use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class Command method populatePathNames.
/**
* Reads the Physical path of the disks we are balancing. This is needed to
* make the disk balancer human friendly and not used in balancing.
*
* @param node - Disk Balancer Node.
*/
protected void populatePathNames(DiskBalancerDataNode node) throws IOException {
// invoke rpc call to dataNode.
if (getClusterURI().getScheme().startsWith("file")) {
return;
}
String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort();
ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
String volumeNameJson = dnClient.getDiskBalancerSetting(DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
@SuppressWarnings("unchecked") Map<String, String> volumeMap = READER.readValue(volumeNameJson);
for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
for (DiskBalancerVolume vol : set.getVolumes()) {
if (volumeMap.containsKey(vol.getUuid())) {
vol.setPath(volumeMap.get(vol.getUuid()));
}
}
}
}
Aggregations