use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class QueryCommand method execute.
/**
* Executes the Client Calls.
*
* @param cmd - CommandLine
*/
@Override
public void execute(CommandLine cmd) throws Exception {
LOG.info("Executing \"query plan\" command.");
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.QUERY));
verifyCommandOptions(DiskBalancerCLI.QUERY, cmd);
String nodeName = cmd.getOptionValue(DiskBalancerCLI.QUERY);
Preconditions.checkNotNull(nodeName);
nodeName = nodeName.trim();
String nodeAddress = nodeName;
// if the string is not name:port format use the default port.
if (!nodeName.matches("[^\\:]+:[0-9]{2,5}")) {
int defaultIPC = NetUtils.createSocketAddr(getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
nodeAddress = nodeName + ":" + defaultIPC;
LOG.debug("Using default data node port : {}", nodeAddress);
}
ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeAddress);
try {
DiskBalancerWorkStatus workStatus = dataNode.queryDiskBalancerPlan();
System.out.printf("Plan File: %s%nPlan ID: %s%nResult: %s%n", workStatus.getPlanFile(), workStatus.getPlanID(), workStatus.getResult().toString());
if (cmd.hasOption(DiskBalancerCLI.VERBOSE)) {
System.out.printf("%s", workStatus.currentStateString());
}
} catch (DiskBalancerException ex) {
LOG.error("Query plan failed. ex: {}", ex);
throw ex;
}
}
use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class DFSAdmin method refreshNamenodes.
private int refreshNamenodes(String[] argv, int i) throws IOException {
String datanode = argv[i];
ClientDatanodeProtocol refreshProtocol = getDataNodeProxy(datanode);
refreshProtocol.refreshNamenodes();
return 0;
}
use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class DFSAdmin method getDatanodeInfo.
private int getDatanodeInfo(String[] argv, int i) throws IOException {
ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
try {
DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
System.out.println(dnInfo.getDatanodeLocalReport());
} catch (IOException ioe) {
throw new IOException("Datanode unreachable. " + ioe, ioe);
}
return 0;
}
use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class DFSAdmin method getDataNodeProxy.
private ClientDatanodeProtocol getDataNodeProxy(String datanode) throws IOException {
InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
// Get the current configuration
Configuration conf = getConf();
// For datanode proxy the server principal should be DN's one.
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));
// Create the client
ClientDatanodeProtocol dnProtocol = DFSUtilClient.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf, NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
return dnProtocol;
}
use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class DFSAdmin method triggerBlockReport.
public int triggerBlockReport(String[] argv) throws IOException {
List<String> args = new LinkedList<String>();
for (int j = 1; j < argv.length; j++) {
args.add(argv[j]);
}
boolean incremental = StringUtils.popOption("-incremental", args);
String hostPort = StringUtils.popFirstNonOption(args);
if (hostPort == null) {
System.err.println("You must specify a host:port pair.");
return 1;
}
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " + Joiner.on(" ").join(args) + "\n");
return 1;
}
ClientDatanodeProtocol dnProxy = getDataNodeProxy(hostPort);
try {
dnProxy.triggerBlockReport(new BlockReportOptions.Factory().setIncremental(incremental).build());
} catch (IOException e) {
System.err.println("triggerBlockReport error: " + e);
return 1;
}
System.out.println("Triggering " + (incremental ? "an incremental " : "a full ") + "block report on " + hostPort + ".");
return 0;
}
Aggregations