Search in sources :

Example 1 with StandbyException

use of org.apache.hadoop.ipc.StandbyException in project hadoop by apache.

the class TestStandbyCheckpoints method testStandbyExceptionThrownDuringCheckpoint.

/**
   * Make sure that clients will receive StandbyExceptions even when a
   * checkpoint is in progress on the SBN, and therefore the StandbyCheckpointer
   * thread will have FSNS lock. Regression test for HDFS-4591.
   */
@Test(timeout = 300000)
public void testStandbyExceptionThrownDuringCheckpoint() throws Exception {
    // Set it up so that we know when the SBN checkpoint starts and ends.
    FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nns[1]);
    DelayAnswer answerer = new DelayAnswer(LOG);
    Mockito.doAnswer(answerer).when(spyImage1).saveNamespace(Mockito.any(FSNamesystem.class), Mockito.eq(NameNodeFile.IMAGE), Mockito.any(Canceler.class));
    // Perform some edits and wait for a checkpoint to start on the SBN.
    doEdits(0, 1000);
    nns[0].getRpcServer().rollEditLog();
    answerer.waitForCall();
    assertTrue("SBN is not performing checkpoint but it should be.", answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
    // Make sure that the lock has actually been taken by the checkpointing
    // thread.
    ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
    try {
        // Perform an RPC to the SBN and make sure it throws a StandbyException.
        nns[1].getRpcServer().getFileInfo("/");
        fail("Should have thrown StandbyException, but instead succeeded.");
    } catch (StandbyException se) {
        GenericTestUtils.assertExceptionContains("is not supported", se);
    }
    // Make sure new incremental block reports are processed during
    // checkpointing on the SBN.
    assertEquals(0, cluster.getNamesystem(1).getPendingDataNodeMessageCount());
    doCreate();
    Thread.sleep(1000);
    assertTrue(cluster.getNamesystem(1).getPendingDataNodeMessageCount() > 0);
    // Make sure that the checkpoint is still going on, implying that the client
    // RPC to the SBN happened during the checkpoint.
    assertTrue("SBN should have still been checkpointing.", answerer.getFireCount() == 1 && answerer.getResultCount() == 0);
    answerer.proceed();
    answerer.waitForResult();
    assertTrue("SBN should have finished checkpointing.", answerer.getFireCount() == 1 && answerer.getResultCount() == 1);
}
Also used : StandbyException(org.apache.hadoop.ipc.StandbyException) Canceler(org.apache.hadoop.hdfs.util.Canceler) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) Test(org.junit.Test)

Example 2 with StandbyException

use of org.apache.hadoop.ipc.StandbyException in project hadoop by apache.

the class TestNamenodeRetryCache method testUpdatePipelineWithFailOver.

/**
   * Make sure a retry call does not hang because of the exception thrown in the
   * first call.
   */
@Test(timeout = 60000)
public void testUpdatePipelineWithFailOver() throws Exception {
    cluster.shutdown();
    nnRpc = null;
    filesystem = null;
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
    cluster.waitActive();
    NamenodeProtocols ns0 = cluster.getNameNodeRpc(0);
    ExtendedBlock oldBlock = new ExtendedBlock();
    ExtendedBlock newBlock = new ExtendedBlock();
    DatanodeID[] newNodes = new DatanodeID[2];
    String[] newStorages = new String[2];
    newCall();
    try {
        ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
        fail("Expect StandbyException from the updatePipeline call");
    } catch (StandbyException e) {
        // expected, since in the beginning both nn are in standby state
        GenericTestUtils.assertExceptionContains(HAServiceState.STANDBY.toString(), e);
    }
    cluster.transitionToActive(0);
    try {
        ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
    } catch (IOException e) {
    // ignore call should not hang.
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) StandbyException(org.apache.hadoop.ipc.StandbyException) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) Test(org.junit.Test)

Example 3 with StandbyException

use of org.apache.hadoop.ipc.StandbyException in project hadoop by apache.

the class FSNamesystem method startFileInt.

private HdfsFileStatus startFileInt(String src, PermissionStatus permissions, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions, boolean logRetryCache) throws IOException {
    if (NameNode.stateChangeLog.isDebugEnabled()) {
        StringBuilder builder = new StringBuilder();
        builder.append("DIR* NameSystem.startFile: src=").append(src).append(", holder=").append(holder).append(", clientMachine=").append(clientMachine).append(", createParent=").append(createParent).append(", replication=").append(replication).append(", createFlag=").append(flag).append(", blockSize=").append(blockSize).append(", supportedVersions=").append(Arrays.toString(supportedVersions));
        NameNode.stateChangeLog.debug(builder.toString());
    }
    if (!DFSUtil.isValidName(src) || FSDirectory.isExactReservedName(src) || (FSDirectory.isReservedName(src) && !FSDirectory.isReservedRawName(src) && !FSDirectory.isReservedInodesName(src))) {
        throw new InvalidPathException(src);
    }
    FSPermissionChecker pc = getPermissionChecker();
    INodesInPath iip = null;
    // until we do something that might create edits
    boolean skipSync = true;
    HdfsFileStatus stat = null;
    BlocksMapUpdateInfo toRemoveBlocks = null;
    checkOperation(OperationCategory.WRITE);
    writeLock();
    try {
        checkOperation(OperationCategory.WRITE);
        checkNameNodeSafeMode("Cannot create file" + src);
        iip = FSDirWriteFileOp.resolvePathForStartFile(dir, pc, src, flag, createParent);
        if (!FSDirErasureCodingOp.hasErasureCodingPolicy(this, iip)) {
            blockManager.verifyReplication(src, replication, clientMachine);
        }
        if (blockSize < minBlockSize) {
            throw new IOException("Specified block size is less than configured" + " minimum value (" + DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY + "): " + blockSize + " < " + minBlockSize);
        }
        FileEncryptionInfo feInfo = null;
        if (provider != null) {
            EncryptionKeyInfo ezInfo = FSDirEncryptionZoneOp.getEncryptionKeyInfo(this, iip, supportedVersions);
            // and/or EZ has not mutated
            if (ezInfo != null) {
                checkOperation(OperationCategory.WRITE);
                iip = FSDirWriteFileOp.resolvePathForStartFile(dir, pc, iip.getPath(), flag, createParent);
                feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(dir, iip, ezInfo);
            }
        }
        // following might generate edits
        skipSync = false;
        toRemoveBlocks = new BlocksMapUpdateInfo();
        dir.writeLock();
        try {
            stat = FSDirWriteFileOp.startFile(this, iip, permissions, holder, clientMachine, flag, createParent, replication, blockSize, feInfo, toRemoveBlocks, logRetryCache);
        } catch (IOException e) {
            skipSync = e instanceof StandbyException;
            throw e;
        } finally {
            dir.writeUnlock();
        }
    } finally {
        writeUnlock("create");
        // They need to be sync'ed even when an exception was thrown.
        if (!skipSync) {
            getEditLog().logSync();
            if (toRemoveBlocks != null) {
                removeBlocks(toRemoveBlocks);
                toRemoveBlocks.clear();
            }
        }
    }
    return stat;
}
Also used : BlocksMapUpdateInfo(org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo) StandbyException(org.apache.hadoop.ipc.StandbyException) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) EncryptionKeyInfo(org.apache.hadoop.hdfs.server.namenode.FSDirEncryptionZoneOp.EncryptionKeyInfo) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) InvalidPathException(org.apache.hadoop.fs.InvalidPathException)

Example 4 with StandbyException

use of org.apache.hadoop.ipc.StandbyException in project hadoop by apache.

the class TestRequestHedgingProxyProvider method testHedgingWhenFileNotFoundException.

@Test
public void testHedgingWhenFileNotFoundException() throws Exception {
    NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(active.getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong())).thenThrow(new RemoteException("java.io.FileNotFoundException", "File does not exist!"));
    NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(standby.getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong())).thenThrow(new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode"));
    RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
    try {
        provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L);
        Assert.fail("Should fail since the active namenode throws" + " FileNotFoundException!");
    } catch (MultiException me) {
        for (Exception ex : me.getExceptions().values()) {
            Exception rEx = ((RemoteException) ex).unwrapRemoteException();
            if (rEx instanceof StandbyException) {
                continue;
            }
            Assert.assertTrue(rEx instanceof FileNotFoundException);
        }
    }
    Mockito.verify(active).getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong());
    Mockito.verify(standby).getBlockLocations(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong());
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) StandbyException(org.apache.hadoop.ipc.StandbyException) FileNotFoundException(java.io.FileNotFoundException) RemoteException(org.apache.hadoop.ipc.RemoteException) MultiException(org.apache.hadoop.io.retry.MultiException) URISyntaxException(java.net.URISyntaxException) ConnectException(java.net.ConnectException) MultiException(org.apache.hadoop.io.retry.MultiException) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) EOFException(java.io.EOFException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 5 with StandbyException

use of org.apache.hadoop.ipc.StandbyException in project hadoop by apache.

the class TestDelegationTokensWithHA method testDelegationTokenDuringNNFailover.

/**
   * Test if correct exception (StandbyException or RetriableException) can be
   * thrown during the NN failover. 
   */
@Test(timeout = 300000)
public void testDelegationTokenDuringNNFailover() throws Exception {
    EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer();
    // stop the editLogTailer of nn1
    editLogTailer.stop();
    Configuration conf = (Configuration) Whitebox.getInternalState(editLogTailer, "conf");
    nn1.getNamesystem().setEditLogTailerForTests(new EditLogTailerForTest(nn1.getNamesystem(), conf));
    // create token
    final Token<DelegationTokenIdentifier> token = getDelegationToken(fs, "JobTracker");
    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
    byte[] tokenId = token.getIdentifier();
    identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
    // Ensure that it's present in the nn0 secret manager and can
    // be renewed directly from there.
    LOG.info("A valid token should have non-null password, " + "and should be renewed successfully");
    assertTrue(null != dtSecretManager.retrievePassword(identifier));
    dtSecretManager.renewToken(token, "JobTracker");
    // transition nn0 to standby
    cluster.transitionToStandby(0);
    try {
        cluster.getNameNodeRpc(0).renewDelegationToken(token);
        fail("StandbyException is expected since nn0 is in standby state");
    } catch (StandbyException e) {
        GenericTestUtils.assertExceptionContains(HAServiceState.STANDBY.toString(), e);
    }
    new Thread() {

        @Override
        public void run() {
            try {
                cluster.transitionToActive(1);
            } catch (Exception e) {
                LOG.error("Transition nn1 to active failed", e);
            }
        }
    }.start();
    Thread.sleep(1000);
    try {
        nn1.getNamesystem().verifyToken(token.decodeIdentifier(), token.getPassword());
        fail("RetriableException/StandbyException is expected since nn1 is in transition");
    } catch (IOException e) {
        assertTrue(e instanceof StandbyException || e instanceof RetriableException);
        LOG.info("Got expected exception", e);
    }
    catchup = true;
    synchronized (this) {
        this.notifyAll();
    }
    Configuration clientConf = dfs.getConf();
    doRenewOrCancel(token, clientConf, TokenTestAction.RENEW);
    doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) RetriableException(org.apache.hadoop.ipc.RetriableException) StandbyException(org.apache.hadoop.ipc.StandbyException) ByteArrayInputStream(java.io.ByteArrayInputStream) RetriableException(org.apache.hadoop.ipc.RetriableException) Test(org.junit.Test)

Aggregations

StandbyException (org.apache.hadoop.ipc.StandbyException)11 IOException (java.io.IOException)8 Test (org.junit.Test)8 RemoteException (org.apache.hadoop.ipc.RemoteException)6 EOFException (java.io.EOFException)3 FileNotFoundException (java.io.FileNotFoundException)3 Configuration (org.apache.hadoop.conf.Configuration)3 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)3 ByteArrayInputStream (java.io.ByteArrayInputStream)2 DataInputStream (java.io.DataInputStream)2 ConnectException (java.net.ConnectException)2 URISyntaxException (java.net.URISyntaxException)2 DelegationTokenIdentifier (org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier)2 MultiException (org.apache.hadoop.io.retry.MultiException)2 AccessControlException (org.apache.hadoop.security.AccessControlException)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)1 InetSocketAddress (java.net.InetSocketAddress)1 MalformedURLException (java.net.MalformedURLException)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1