use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class NameNodeProxiesClient method createProxyWithClientProtocol.
/**
* Creates the namenode proxy with the ClientProtocol. This will handle
* creation of either HA- or non-HA-enabled proxy objects, depending upon
* if the provided URI is a configured logical URI.
*
* @param conf the configuration containing the required IPC
* properties, client failover configurations, etc.
* @param nameNodeUri the URI pointing either to a specific NameNode
* or to a logical nameservice.
* @param fallbackToSimpleAuth set to true or false during calls to indicate
* if a secure client falls back to simple auth
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException if there is an error creating the proxy
* @see {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
*/
public static ProxyAndInfo<ClientProtocol> createProxyWithClientProtocol(Configuration conf, URI nameNodeUri, AtomicBoolean fallbackToSimpleAuth) throws IOException {
AbstractNNFailoverProxyProvider<ClientProtocol> failoverProxyProvider = createFailoverProxyProvider(conf, nameNodeUri, ClientProtocol.class, true, fallbackToSimpleAuth);
if (failoverProxyProvider == null) {
InetSocketAddress nnAddr = DFSUtilClient.getNNAddress(nameNodeUri);
Text dtService = SecurityUtil.buildTokenService(nnAddr);
ClientProtocol proxy = createNonHAProxyWithClientProtocol(nnAddr, conf, UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth);
return new ProxyAndInfo<>(proxy, dtService, nnAddr);
} else {
return createHAProxy(conf, nameNodeUri, ClientProtocol.class, failoverProxyProvider);
}
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class DfsServlet method createNameNodeProxy.
/**
* Create a {@link NameNode} proxy from the current {@link ServletContext}.
*/
protected ClientProtocol createNameNodeProxy() throws IOException {
ServletContext context = getServletContext();
// if we are running in the Name Node, use it directly rather than via
// rpc
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
if (nn != null) {
return nn.getRpcServer();
}
InetSocketAddress nnAddr = NameNodeHttpServer.getNameNodeAddressFromContext(context);
Configuration conf = new HdfsConfiguration(NameNodeHttpServer.getConfFromContext(context));
return NameNodeProxies.createProxy(conf, DFSUtilClient.getNNUri(nnAddr), ClientProtocol.class).getProxy();
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class DFSAdmin method setSafeMode.
/**
* Safe mode maintenance command.
* Usage: hdfs dfsadmin -safemode [enter | leave | get | wait | forceExit]
* @param argv List of of command line parameters.
* @param idx The index of the command that is being processed.
* @exception IOException if the filesystem does not exist.
*/
public void setSafeMode(String[] argv, int idx) throws IOException {
if (idx != argv.length - 1) {
printUsage("-safemode");
return;
}
HdfsConstants.SafeModeAction action;
Boolean waitExitSafe = false;
if ("leave".equalsIgnoreCase(argv[idx])) {
action = HdfsConstants.SafeModeAction.SAFEMODE_LEAVE;
} else if ("enter".equalsIgnoreCase(argv[idx])) {
action = HdfsConstants.SafeModeAction.SAFEMODE_ENTER;
} else if ("get".equalsIgnoreCase(argv[idx])) {
action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
} else if ("wait".equalsIgnoreCase(argv[idx])) {
action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
waitExitSafe = true;
} else if ("forceExit".equalsIgnoreCase(argv[idx])) {
action = HdfsConstants.SafeModeAction.SAFEMODE_FORCE_EXIT;
} else {
printUsage("-safemode");
return;
}
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
ClientProtocol haNn = proxy.getProxy();
boolean inSafeMode = haNn.setSafeMode(action, false);
if (waitExitSafe) {
inSafeMode = waitExitSafeMode(haNn, inSafeMode);
}
System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF") + " in " + proxy.getAddress());
}
} else {
boolean inSafeMode = dfs.setSafeMode(action);
if (waitExitSafe) {
inSafeMode = waitExitSafeMode(dfs, inSafeMode);
}
System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
}
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class DFSAdmin method setBalancerBandwidth.
/**
* Command to ask the namenode to set the balancer bandwidth for all of the
* datanodes.
* Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
* @param argv List of of command line parameters.
* @param idx The index of the command that is being processed.
* @exception IOException
*/
public int setBalancerBandwidth(String[] argv, int idx) throws IOException {
long bandwidth;
int exitCode = -1;
try {
bandwidth = Long.parseLong(argv[idx]);
} catch (NumberFormatException nfe) {
System.err.println("NumberFormatException: " + nfe.getMessage());
System.err.println("Usage: hdfs dfsadmin" + " [-setBalancerBandwidth <bandwidth in bytes per second>]");
return exitCode;
}
if (bandwidth < 0) {
System.err.println("Bandwidth should be a non-negative integer");
return exitCode;
}
FileSystem fs = getFS();
if (!(fs instanceof DistributedFileSystem)) {
System.err.println("FileSystem is " + fs.getUri());
return exitCode;
}
DistributedFileSystem dfs = (DistributedFileSystem) fs;
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().setBalancerBandwidth(bandwidth);
System.out.println("Balancer bandwidth is set to " + bandwidth + " for " + proxy.getAddress());
}
} else {
dfs.setBalancerBandwidth(bandwidth);
System.out.println("Balancer bandwidth is set to " + bandwidth);
}
exitCode = 0;
return exitCode;
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class DFSAdmin method metaSave.
/**
* Dumps DFS data structures into specified file.
* Usage: hdfs dfsadmin -metasave filename
* @param argv List of of command line parameters.
* @param idx The index of the command that is being processed.
* @exception IOException if an error occurred while accessing
* the file or path.
*/
public int metaSave(String[] argv, int idx) throws IOException {
String pathname = argv[idx];
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + proxy.getAddress());
}
} else {
dfs.metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri());
}
return 0;
}
Aggregations