use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class TestReplication method checkFile.
/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
Configuration conf = fileSys.getConf();
ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(), ClientProtocol.class).getProxy();
waitForBlockReplication(name.toString(), namenode, Math.min(numDatanodes, repl), -1);
LocatedBlocks locations = namenode.getBlockLocations(name.toString(), 0, Long.MAX_VALUE);
FileStatus stat = fileSys.getFileStatus(name);
BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat, 0L, Long.MAX_VALUE);
// verify that rack locations match
assertTrue(blockLocations.length == locations.locatedBlockCount());
for (int i = 0; i < blockLocations.length; i++) {
LocatedBlock blk = locations.get(i);
DatanodeInfo[] datanodes = blk.getLocations();
String[] topologyPaths = blockLocations[i].getTopologyPaths();
assertTrue(topologyPaths.length == datanodes.length);
for (int j = 0; j < topologyPaths.length; j++) {
boolean found = false;
for (int k = 0; k < racks.length; k++) {
if (topologyPaths[j].startsWith(racks[k])) {
found = true;
break;
}
}
assertTrue(found);
}
}
boolean isOnSameRack = true, isNotOnSameRack = true;
for (LocatedBlock blk : locations.getLocatedBlocks()) {
DatanodeInfo[] datanodes = blk.getLocations();
if (datanodes.length <= 1)
break;
if (datanodes.length == 2) {
isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(datanodes[1].getNetworkLocation()));
break;
}
isOnSameRack = false;
isNotOnSameRack = false;
for (int i = 0; i < datanodes.length - 1; i++) {
LOG.info("datanode " + i + ": " + datanodes[i]);
boolean onRack = false;
for (int j = i + 1; j < datanodes.length; j++) {
if (datanodes[i].getNetworkLocation().equals(datanodes[j].getNetworkLocation())) {
onRack = true;
}
}
if (onRack) {
isOnSameRack = true;
}
if (!onRack) {
isNotOnSameRack = true;
}
if (isOnSameRack && isNotOnSameRack)
break;
}
if (!isOnSameRack || !isNotOnSameRack)
break;
}
assertTrue(isOnSameRack);
assertTrue(isNotOnSameRack);
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class TestHAStateTransitions method testIsAtLeastOneActive.
/**
* This test also serves to test
* {@link HAUtil#getProxiesForAllNameNodesInNameservice(Configuration, String)} and
* {@link DFSUtil#getRpcAddressesForNameserviceId(Configuration, String, String)}
* by virtue of the fact that it wouldn't work properly if the proxies
* returned were not for the correct NNs.
*/
@Test(timeout = 300000)
public void testIsAtLeastOneActive() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
try {
Configuration conf = new HdfsConfiguration();
HATestUtil.setFailoverConfigurations(cluster, conf);
List<ClientProtocol> namenodes = HAUtil.getProxiesForAllNameNodesInNameservice(conf, HATestUtil.getLogicalHostname(cluster));
assertEquals(2, namenodes.size());
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToActive(0);
assertTrue(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToStandby(0);
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToActive(1);
assertTrue(HAUtil.isAtLeastOneActive(namenodes));
cluster.transitionToStandby(1);
assertFalse(HAUtil.isAtLeastOneActive(namenodes));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class TestIsMethodSupported method testClientNamenodeProtocol.
@Test
public void testClientNamenodeProtocol() throws IOException {
ClientProtocol cp = NameNodeProxies.createNonHAProxy(conf, nnAddress, ClientProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy();
RpcClientUtil.isMethodSupported(cp, ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER, RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class DFSAdmin method saveNamespace.
/**
* Command to ask the namenode to save the namespace.
* Usage: hdfs dfsadmin -saveNamespace
* @see ClientProtocol#saveNamespace(long, long)
*/
public int saveNamespace(String[] argv) throws IOException {
final DistributedFileSystem dfs = getDFS();
final Configuration dfsConf = dfs.getConf();
long timeWindow = 0;
long txGap = 0;
if (argv.length > 1 && "-beforeShutdown".equals(argv[1])) {
final long checkpointPeriod = dfsConf.getTimeDuration(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT, TimeUnit.SECONDS);
final long checkpointTxnCount = dfsConf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
final int toleratePeriodNum = dfsConf.getInt(DFSConfigKeys.DFS_NAMENODE_MISSING_CHECKPOINT_PERIODS_BEFORE_SHUTDOWN_KEY, DFSConfigKeys.DFS_NAMENODE_MISSING_CHECKPOINT_PERIODS_BEFORE_SHUTDOWN_DEFAULT);
timeWindow = checkpointPeriod * toleratePeriodNum;
txGap = checkpointTxnCount * toleratePeriodNum;
System.out.println("Do checkpoint if necessary before stopping " + "namenode. The time window is " + timeWindow + " seconds, and the " + "transaction gap is " + txGap);
}
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
boolean saved = proxy.getProxy().saveNamespace(timeWindow, txGap);
if (saved) {
System.out.println("Save namespace successful for " + proxy.getAddress());
} else {
System.out.println("No extra checkpoint has been made for " + proxy.getAddress());
}
}
} else {
boolean saved = dfs.saveNamespace(timeWindow, txGap);
if (saved) {
System.out.println("Save namespace successful");
} else {
System.out.println("No extra checkpoint has been made");
}
}
return 0;
}
use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.
the class DFSAdmin method refreshNodes.
/**
* Command to ask the namenode to reread the hosts and excluded hosts
* file.
* Usage: hdfs dfsadmin -refreshNodes
* @exception IOException
*/
public int refreshNodes() throws IOException {
int exitCode = -1;
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().refreshNodes();
System.out.println("Refresh nodes successful for " + proxy.getAddress());
}
} else {
dfs.refreshNodes();
System.out.println("Refresh nodes successful");
}
exitCode = 0;
return exitCode;
}
Aggregations