Search in sources :

Example 1 with NamenodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol in project hadoop by apache.

the class TestGetBlocks method testGetBlocks.

/** test getBlocks */
@Test
public void testGetBlocks() throws Exception {
    final Configuration CONF = new HdfsConfiguration();
    final short REPLICATION_FACTOR = (short) 2;
    final int DEFAULT_BLOCK_SIZE = 1024;
    CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    CONF.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR).build();
    try {
        cluster.waitActive();
        // the third block will not be visible to getBlocks
        long fileLen = 2 * DEFAULT_BLOCK_SIZE + 1;
        DFSTestUtil.createFile(cluster.getFileSystem(), new Path("/tmp.txt"), fileLen, REPLICATION_FACTOR, 0L);
        // get blocks & data nodes
        List<LocatedBlock> locatedBlocks;
        DatanodeInfo[] dataNodes = null;
        boolean notWritten;
        do {
            final DFSClient dfsclient = new DFSClient(DFSUtilClient.getNNAddress(CONF), CONF);
            locatedBlocks = dfsclient.getNamenode().getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
            assertEquals(3, locatedBlocks.size());
            notWritten = false;
            for (int i = 0; i < 2; i++) {
                dataNodes = locatedBlocks.get(i).getLocations();
                if (dataNodes.length != REPLICATION_FACTOR) {
                    notWritten = true;
                    try {
                        Thread.sleep(10);
                    } catch (InterruptedException e) {
                    }
                    break;
                }
            }
        } while (notWritten);
        // get RPC client to namenode
        InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
        NamenodeProtocol namenode = NameNodeProxies.createProxy(CONF, DFSUtilClient.getNNUri(addr), NamenodeProtocol.class).getProxy();
        // get blocks of size fileLen from dataNodes[0]
        BlockWithLocations[] locs;
        locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
        assertEquals(locs.length, 2);
        assertEquals(locs[0].getStorageIDs().length, 2);
        assertEquals(locs[1].getStorageIDs().length, 2);
        // get blocks of size BlockSize from dataNodes[0]
        locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
        assertEquals(locs.length, 1);
        assertEquals(locs[0].getStorageIDs().length, 2);
        // get blocks of size 1 from dataNodes[0]
        locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
        assertEquals(locs.length, 1);
        assertEquals(locs[0].getStorageIDs().length, 2);
        // get blocks of size 0 from dataNodes[0]
        getBlocksWithException(namenode, dataNodes[0], 0);
        // get blocks of size -1 from dataNodes[0]
        getBlocksWithException(namenode, dataNodes[0], -1);
        // get blocks of size BlockSize from a non-existent datanode
        DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
        getBlocksWithException(namenode, info, 2);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) BlockWithLocations(org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) NamenodeProtocol(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol) Test(org.junit.Test)

Example 2 with NamenodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol in project hadoop by apache.

the class NameNodeProxies method createNNProxyWithNamenodeProtocol.

private static NamenodeProtocol createNNProxyWithNamenodeProtocol(InetSocketAddress address, Configuration conf, UserGroupInformation ugi, boolean withRetries) throws IOException {
    NamenodeProtocolPB proxy = (NamenodeProtocolPB) createNameNodeProxy(address, conf, ugi, NamenodeProtocolPB.class, 0);
    if (withRetries) {
        // create the proxy with retries
        RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200, TimeUnit.MILLISECONDS);
        Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
        methodNameToPolicyMap.put("getBlocks", timeoutPolicy);
        methodNameToPolicyMap.put("getAccessKeys", timeoutPolicy);
        NamenodeProtocol translatorProxy = new NamenodeProtocolTranslatorPB(proxy);
        return (NamenodeProtocol) RetryProxy.create(NamenodeProtocol.class, translatorProxy, methodNameToPolicyMap);
    } else {
        return new NamenodeProtocolTranslatorPB(proxy);
    }
}
Also used : HashMap(java.util.HashMap) NamenodeProtocolTranslatorPB(org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB) NamenodeProtocolPB(org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB) RetryPolicy(org.apache.hadoop.io.retry.RetryPolicy) NamenodeProtocol(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol)

Example 3 with NamenodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol in project hadoop by apache.

the class BootstrapStandby method doRun.

private int doRun() throws IOException {
    // find the active NN
    NamenodeProtocol proxy = null;
    NamespaceInfo nsInfo = null;
    boolean isUpgradeFinalized = false;
    RemoteNameNodeInfo proxyInfo = null;
    for (int i = 0; i < remoteNNs.size(); i++) {
        proxyInfo = remoteNNs.get(i);
        InetSocketAddress otherIpcAddress = proxyInfo.getIpcAddress();
        proxy = createNNProtocolProxy(otherIpcAddress);
        try {
            // Get the namespace from any active NN. If you just formatted the primary NN and are
            // bootstrapping the other NNs from that layout, it will only contact the single NN.
            // However, if there cluster is already running and you are adding a NN later (e.g.
            // replacing a failed NN), then this will bootstrap from any node in the cluster.
            nsInfo = proxy.versionRequest();
            isUpgradeFinalized = proxy.isUpgradeFinalized();
            break;
        } catch (IOException ioe) {
            LOG.warn("Unable to fetch namespace information from remote NN at " + otherIpcAddress + ": " + ioe.getMessage());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Full exception trace", ioe);
            }
        }
    }
    if (nsInfo == null) {
        LOG.fatal("Unable to fetch namespace information from any remote NN. Possible NameNodes: " + remoteNNs);
        return ERR_CODE_FAILED_CONNECT;
    }
    if (!checkLayoutVersion(nsInfo)) {
        LOG.fatal("Layout version on remote node (" + nsInfo.getLayoutVersion() + ") does not match " + "this node's layout version (" + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ")");
        return ERR_CODE_INVALID_VERSION;
    }
    System.out.println("=====================================================\n" + "About to bootstrap Standby ID " + nnId + " from:\n" + "           Nameservice ID: " + nsId + "\n" + "        Other Namenode ID: " + proxyInfo.getNameNodeID() + "\n" + "  Other NN's HTTP address: " + proxyInfo.getHttpAddress() + "\n" + "  Other NN's IPC  address: " + proxyInfo.getIpcAddress() + "\n" + "             Namespace ID: " + nsInfo.getNamespaceID() + "\n" + "            Block pool ID: " + nsInfo.getBlockPoolID() + "\n" + "               Cluster ID: " + nsInfo.getClusterID() + "\n" + "           Layout version: " + nsInfo.getLayoutVersion() + "\n" + "       isUpgradeFinalized: " + isUpgradeFinalized + "\n" + "=====================================================");
    NNStorage storage = new NNStorage(conf, dirsToFormat, editUrisToFormat);
    if (!isUpgradeFinalized) {
        // the remote NameNode is in upgrade state, this NameNode should also
        // create the previous directory. First prepare the upgrade and rename
        // the current dir to previous.tmp.
        LOG.info("The active NameNode is in Upgrade. " + "Prepare the upgrade for the standby NameNode as well.");
        if (!doPreUpgrade(storage, nsInfo)) {
            return ERR_CODE_ALREADY_FORMATTED;
        }
    } else if (!format(storage, nsInfo)) {
        // prompt the user to format storage
        return ERR_CODE_ALREADY_FORMATTED;
    }
    // download the fsimage from active namenode
    int download = downloadImage(storage, proxy, proxyInfo);
    if (download != 0) {
        return download;
    }
    // finish the upgrade: rename previous.tmp to previous
    if (!isUpgradeFinalized) {
        doUpgrade(storage);
    }
    return 0;
}
Also used : NNStorage(org.apache.hadoop.hdfs.server.namenode.NNStorage) InetSocketAddress(java.net.InetSocketAddress) IOException(java.io.IOException) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) NamenodeProtocol(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol)

Example 4 with NamenodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol in project hadoop by apache.

the class TestIsMethodSupported method testNamenodeProtocol.

@Test
public void testNamenodeProtocol() throws IOException {
    NamenodeProtocol np = NameNodeProxies.createNonHAProxy(conf, nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(), true).getProxy();
    boolean exists = RpcClientUtil.isMethodSupported(np, NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER, RPC.getProtocolVersion(NamenodeProtocolPB.class), "rollEditLog");
    assertTrue(exists);
    exists = RpcClientUtil.isMethodSupported(np, NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER, RPC.getProtocolVersion(NamenodeProtocolPB.class), "bogusMethod");
    assertFalse(exists);
}
Also used : ClientNamenodeProtocolPB(org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB) NamenodeProtocolPB(org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB) NamenodeProtocol(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol) Test(org.junit.Test)

Example 5 with NamenodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol in project hadoop by apache.

the class TestCheckpoint method testMultipleSecondaryNNsAgainstSameNN2.

/**
   * Test case where two secondary namenodes are checkpointing the same
   * NameNode. This differs from {@link #testMultipleSecondaryNamenodes()}
   * since that test runs against two distinct NNs.
   * 
   * This case tests the following interleaving:
   * - 2NN A) calls rollEdits()
   * - 2NN B) calls rollEdits()
   * - 2NN A) paused at getRemoteEditLogManifest()
   * - 2NN B) calls getRemoteEditLogManifest() (returns up to txid 4)
   * - 2NN B) uploads checkpoint fsimage_4
   * - 2NN A) allowed to proceed, also returns up to txid 4
   * - 2NN A) uploads checkpoint fsimage_4 as well, should fail gracefully
   * 
   * It verifies that one of the two gets an error that it's uploading a
   * duplicate checkpoint, and the other one succeeds.
   */
@Test
public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary1 = null, secondary2 = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true).build();
        // Start 2NNs
        secondary1 = startSecondaryNameNode(conf, 1);
        secondary2 = startSecondaryNameNode(conf, 2);
        // Make the first 2NN's checkpoint process delayable - we can pause it
        // right before it calls getRemoteEditLogManifest.
        // The method to set up a spy on an RPC protocol is a little bit involved
        // since we can't spy directly on a proxy object. This sets up a mock
        // which delegates all its calls to the original object, instead.
        final NamenodeProtocol origNN = secondary1.getNameNode();
        final Answer<Object> delegator = new GenericTestUtils.DelegateAnswer(origNN);
        NamenodeProtocol spyNN = Mockito.mock(NamenodeProtocol.class, delegator);
        DelayAnswer delayer = new DelayAnswer(LOG) {

            @Override
            protected Object passThrough(InvocationOnMock invocation) throws Throwable {
                return delegator.answer(invocation);
            }
        };
        secondary1.setNameNode(spyNN);
        Mockito.doAnswer(delayer).when(spyNN).getEditLogManifest(Mockito.anyLong());
        // Set up a thread to do a checkpoint from the first 2NN
        DoCheckpointThread checkpointThread = new DoCheckpointThread(secondary1);
        checkpointThread.start();
        // Wait for the first checkpointer to be about to call getEditLogManifest
        delayer.waitForCall();
        // Now make the second checkpointer run an entire checkpoint
        secondary2.doCheckpoint();
        // NN should have now received fsimage_4
        NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
        assertEquals(4, storage.getMostRecentCheckpointTxId());
        // Let the first one finish
        delayer.proceed();
        // Letting the first node continue, it should try to upload the
        // same image, and gracefully ignore it, while logging an
        // error message.
        checkpointThread.join();
        checkpointThread.propagateExceptions();
        // primary should still consider fsimage_4 the latest
        assertEquals(4, storage.getMostRecentCheckpointTxId());
        // Now have second one checkpoint one more time just to make sure that
        // the NN isn't left in a broken state
        secondary2.doCheckpoint();
        assertEquals(6, storage.getMostRecentCheckpointTxId());
        // Should have accepted both checkpoints
        assertNNHasCheckpoints(cluster, ImmutableList.of(4, 6));
        // Let the first one also go again on its own to make sure it can
        // continue at next checkpoint
        secondary1.setNameNode(origNN);
        secondary1.doCheckpoint();
        // NN should have received new checkpoint
        assertEquals(8, storage.getMostRecentCheckpointTxId());
        // Validate invariant that files named the same are the same.
        assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2));
        // Validate that the NN received checkpoints at expected txids
        // (i.e that both checkpoints went through)
        assertNNHasCheckpoints(cluster, ImmutableList.of(6, 8));
    } finally {
        cleanup(secondary1);
        secondary1 = null;
        cleanup(secondary2);
        secondary2 = null;
        cleanup(cluster);
        cluster = null;
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) NamenodeProtocol(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Test(org.junit.Test)

Aggregations

NamenodeProtocol (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol)5 Test (org.junit.Test)3 InetSocketAddress (java.net.InetSocketAddress)2 Configuration (org.apache.hadoop.conf.Configuration)2 NamenodeProtocolPB (org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB)2 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 Path (org.apache.hadoop.fs.Path)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)1 ClientNamenodeProtocolPB (org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB)1 NamenodeProtocolTranslatorPB (org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB)1 NNStorage (org.apache.hadoop.hdfs.server.namenode.NNStorage)1 BlockWithLocations (org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations)1 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)1 RetryPolicy (org.apache.hadoop.io.retry.RetryPolicy)1 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)1 DelayAnswer (org.apache.hadoop.test.GenericTestUtils.DelayAnswer)1