Search in sources :

Example 1 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class TestReplication method checkFile.

/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl) throws IOException {
    Configuration conf = fileSys.getConf();
    ClientProtocol namenode = NameNodeProxies.createProxy(conf, fileSys.getUri(), ClientProtocol.class).getProxy();
    waitForBlockReplication(name.toString(), namenode, Math.min(numDatanodes, repl), -1);
    LocatedBlocks locations = namenode.getBlockLocations(name.toString(), 0, Long.MAX_VALUE);
    FileStatus stat = fileSys.getFileStatus(name);
    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat, 0L, Long.MAX_VALUE);
    // verify that rack locations match
    assertTrue(blockLocations.length == locations.locatedBlockCount());
    for (int i = 0; i < blockLocations.length; i++) {
        LocatedBlock blk = locations.get(i);
        DatanodeInfo[] datanodes = blk.getLocations();
        String[] topologyPaths = blockLocations[i].getTopologyPaths();
        assertTrue(topologyPaths.length == datanodes.length);
        for (int j = 0; j < topologyPaths.length; j++) {
            boolean found = false;
            for (int k = 0; k < racks.length; k++) {
                if (topologyPaths[j].startsWith(racks[k])) {
                    found = true;
                    break;
                }
            }
            assertTrue(found);
        }
    }
    boolean isOnSameRack = true, isNotOnSameRack = true;
    for (LocatedBlock blk : locations.getLocatedBlocks()) {
        DatanodeInfo[] datanodes = blk.getLocations();
        if (datanodes.length <= 1)
            break;
        if (datanodes.length == 2) {
            isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(datanodes[1].getNetworkLocation()));
            break;
        }
        isOnSameRack = false;
        isNotOnSameRack = false;
        for (int i = 0; i < datanodes.length - 1; i++) {
            LOG.info("datanode " + i + ": " + datanodes[i]);
            boolean onRack = false;
            for (int j = i + 1; j < datanodes.length; j++) {
                if (datanodes[i].getNetworkLocation().equals(datanodes[j].getNetworkLocation())) {
                    onRack = true;
                }
            }
            if (onRack) {
                isOnSameRack = true;
            }
            if (!onRack) {
                isNotOnSameRack = true;
            }
            if (isOnSameRack && isNotOnSameRack)
                break;
        }
        if (!isOnSameRack || !isNotOnSameRack)
            break;
    }
    assertTrue(isOnSameRack);
    assertTrue(isNotOnSameRack);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 2 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class TestHAStateTransitions method testIsAtLeastOneActive.

/**
   * This test also serves to test
   * {@link HAUtil#getProxiesForAllNameNodesInNameservice(Configuration, String)} and
   * {@link DFSUtil#getRpcAddressesForNameserviceId(Configuration, String, String)}
   * by virtue of the fact that it wouldn't work properly if the proxies
   * returned were not for the correct NNs.
   */
@Test(timeout = 300000)
public void testIsAtLeastOneActive() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
    try {
        Configuration conf = new HdfsConfiguration();
        HATestUtil.setFailoverConfigurations(cluster, conf);
        List<ClientProtocol> namenodes = HAUtil.getProxiesForAllNameNodesInNameservice(conf, HATestUtil.getLogicalHostname(cluster));
        assertEquals(2, namenodes.size());
        assertFalse(HAUtil.isAtLeastOneActive(namenodes));
        cluster.transitionToActive(0);
        assertTrue(HAUtil.isAtLeastOneActive(namenodes));
        cluster.transitionToStandby(0);
        assertFalse(HAUtil.isAtLeastOneActive(namenodes));
        cluster.transitionToActive(1);
        assertTrue(HAUtil.isAtLeastOneActive(namenodes));
        cluster.transitionToStandby(1);
        assertFalse(HAUtil.isAtLeastOneActive(namenodes));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) Test(org.junit.Test)

Example 3 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class TestIsMethodSupported method testClientNamenodeProtocol.

@Test
public void testClientNamenodeProtocol() throws IOException {
    ClientProtocol cp = NameNodeProxies.createNonHAProxy(conf, nnAddress, ClientProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy();
    RpcClientUtil.isMethodSupported(cp, ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER, RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
}
Also used : ClientNamenodeProtocolPB(org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) Test(org.junit.Test)

Example 4 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class DFSAdmin method saveNamespace.

/**
   * Command to ask the namenode to save the namespace.
   * Usage: hdfs dfsadmin -saveNamespace
   * @see ClientProtocol#saveNamespace(long, long)
   */
public int saveNamespace(String[] argv) throws IOException {
    final DistributedFileSystem dfs = getDFS();
    final Configuration dfsConf = dfs.getConf();
    long timeWindow = 0;
    long txGap = 0;
    if (argv.length > 1 && "-beforeShutdown".equals(argv[1])) {
        final long checkpointPeriod = dfsConf.getTimeDuration(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT, TimeUnit.SECONDS);
        final long checkpointTxnCount = dfsConf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
        final int toleratePeriodNum = dfsConf.getInt(DFSConfigKeys.DFS_NAMENODE_MISSING_CHECKPOINT_PERIODS_BEFORE_SHUTDOWN_KEY, DFSConfigKeys.DFS_NAMENODE_MISSING_CHECKPOINT_PERIODS_BEFORE_SHUTDOWN_DEFAULT);
        timeWindow = checkpointPeriod * toleratePeriodNum;
        txGap = checkpointTxnCount * toleratePeriodNum;
        System.out.println("Do checkpoint if necessary before stopping " + "namenode. The time window is " + timeWindow + " seconds, and the " + "transaction gap is " + txGap);
    }
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaEnabled) {
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            boolean saved = proxy.getProxy().saveNamespace(timeWindow, txGap);
            if (saved) {
                System.out.println("Save namespace successful for " + proxy.getAddress());
            } else {
                System.out.println("No extra checkpoint has been made for " + proxy.getAddress());
            }
        }
    } else {
        boolean saved = dfs.saveNamespace(timeWindow, txGap);
        if (saved) {
            System.out.println("Save namespace successful");
        } else {
            System.out.println("No extra checkpoint has been made");
        }
    }
    return 0;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) URI(java.net.URI)

Example 5 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class DFSAdmin method refreshNodes.

/**
   * Command to ask the namenode to reread the hosts and excluded hosts 
   * file.
   * Usage: hdfs dfsadmin -refreshNodes
   * @exception IOException 
   */
public int refreshNodes() throws IOException {
    int exitCode = -1;
    DistributedFileSystem dfs = getDFS();
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaEnabled) {
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            proxy.getProxy().refreshNodes();
            System.out.println("Refresh nodes successful for " + proxy.getAddress());
        }
    } else {
        dfs.refreshNodes();
        System.out.println("Refresh nodes successful");
    }
    exitCode = 0;
    return exitCode;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) URI(java.net.URI)

Aggregations

ClientProtocol (org.apache.hadoop.hdfs.protocol.ClientProtocol)21 Configuration (org.apache.hadoop.conf.Configuration)14 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 URI (java.net.URI)8 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 ProxyAndInfo (org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo)7 IOException (java.io.IOException)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 Path (org.apache.hadoop.fs.Path)4 Test (org.junit.Test)4 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)3 Field (java.lang.reflect.Field)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)2 InetSocketAddress (java.net.InetSocketAddress)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 StorageType (org.apache.hadoop.fs.StorageType)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 ClientNamenodeProtocolPB (org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB)2