Search in sources :

Example 26 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestEditLogJournalFailures method testMultipleRedundantFailedEditsDirOnSetReadyToFlush.

@Test
public void testMultipleRedundantFailedEditsDirOnSetReadyToFlush() throws IOException {
    // Set up 4 name/edits dirs.
    shutDownMiniCluster();
    Configuration conf = getConf();
    String[] nameDirs = new String[4];
    for (int i = 0; i < nameDirs.length; i++) {
        File nameDir = new File(PathUtils.getTestDir(getClass()), "name-dir" + i);
        nameDir.mkdirs();
        nameDirs[i] = nameDir.getAbsolutePath();
    }
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, StringUtils.join(nameDirs, ","));
    // Keep running unless there are less than 2 edits dirs remaining.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, 2);
    setUpMiniCluster(conf, false);
    // All journals active.
    assertTrue(doAnEdit());
    // The NN has not terminated (no ExitException thrown)
    // Invalidate 1/4 of the redundant journals.
    invalidateEditsDirAtIndex(0, false, false);
    assertTrue(doAnEdit());
    // The NN has not terminated (no ExitException thrown)
    // Invalidate 2/4 of the redundant journals.
    invalidateEditsDirAtIndex(1, false, false);
    assertTrue(doAnEdit());
    // The NN has not terminated (no ExitException thrown)
    // Invalidate 3/4 of the redundant journals.
    invalidateEditsDirAtIndex(2, false, false);
    try {
        doAnEdit();
        fail("A failure of more than the minimum number of redundant journals " + "should have halted ");
    } catch (RemoteException re) {
        assertTrue(re.getClassName().contains("ExitException"));
        GenericTestUtils.assertExceptionContains("Could not sync enough journals to persistent storage due to " + "setReadyToFlush failed for too many journals. " + "Unsynced transactions: 1", re);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RemoteException(org.apache.hadoop.ipc.RemoteException) File(java.io.File) Test(org.junit.Test)

Example 27 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestEditLogJournalFailures method testSingleRequiredFailedEditsDirOnSetReadyToFlush.

@Test
public void testSingleRequiredFailedEditsDirOnSetReadyToFlush() throws IOException {
    // Set one of the edits dirs to be required.
    String[] editsDirs = cluster.getConfiguration(0).getTrimmedStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
    shutDownMiniCluster();
    Configuration conf = getConf();
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY, editsDirs[0]);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, 0);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 0);
    setUpMiniCluster(conf, true);
    assertTrue(doAnEdit());
    // Invalidated the one required edits journal.
    invalidateEditsDirAtIndex(0, false, false);
    JournalAndStream nonRequiredJas = getJournalAndStream(1);
    EditLogFileOutputStream nonRequiredSpy = spyOnStream(nonRequiredJas);
    // The NN has not terminated (no ExitException thrown)
    // ..and that the other stream is active.
    assertTrue(nonRequiredJas.isActive());
    try {
        doAnEdit();
        fail("A single failure of a required journal should have halted the NN");
    } catch (RemoteException re) {
        assertTrue(re.getClassName().contains("ExitException"));
        GenericTestUtils.assertExceptionContains("setReadyToFlush failed for required journal", re);
    }
    // Since the required directory failed setReadyToFlush, and that
    // directory was listed prior to the non-required directory,
    // we should not call setReadyToFlush on the non-required
    // directory. Regression test for HDFS-2874.
    Mockito.verify(nonRequiredSpy, Mockito.never()).setReadyToFlush();
    assertFalse(nonRequiredJas.isActive());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) JournalAndStream(org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 28 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class CacheDirectiveIterator method makeRequest.

@Override
public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey) throws IOException {
    BatchedEntries<CacheDirectiveEntry> entries;
    try (TraceScope ignored = tracer.newScope("listCacheDirectives")) {
        entries = namenode.listCacheDirectives(prevKey, filter);
    } catch (IOException e) {
        if (e.getMessage().contains("Filtering by ID is unsupported")) {
            // Retry case for old servers, do the filtering client-side
            long id = filter.getId();
            filter = removeIdFromFilter(filter);
            // Using id - 1 as prevId should get us a window containing the id
            // This is somewhat brittle, since it depends on directives being
            // returned in order of ascending ID.
            entries = namenode.listCacheDirectives(id - 1, filter);
            for (int i = 0; i < entries.size(); i++) {
                CacheDirectiveEntry entry = entries.get(i);
                if (entry.getInfo().getId().equals(id)) {
                    return new SingleEntry(entry);
                }
            }
            throw new RemoteException(InvalidRequestException.class.getName(), "Did not find requested id " + id);
        }
        throw e;
    }
    Preconditions.checkNotNull(entries);
    return entries;
}
Also used : TraceScope(org.apache.htrace.core.TraceScope) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 29 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestDFSClientRetries method testIdempotentAllocateBlockAndClose.

/**
   * Test that getAdditionalBlock() and close() are idempotent. This allows
   * a client to safely retry a call and still produce a correct
   * file. See HDFS-3031.
   */
@Test
public void testIdempotentAllocateBlockAndClose() throws Exception {
    final String src = "/testIdempotentAllocateBlock";
    Path file = new Path(src);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
        NamenodeProtocols spyNN = spy(preSpyNN);
        DFSClient client = new DFSClient(null, spyNN, conf, null);
        // Make the call to addBlock() get called twice, as if it were retried
        // due to an IPC issue.
        doAnswer(new Answer<LocatedBlock>() {

            private int getBlockCount(LocatedBlock ret) throws IOException {
                LocatedBlocks lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
                assertEquals(lb.getLastLocatedBlock().getBlock(), ret.getBlock());
                return lb.getLocatedBlocks().size();
            }

            @Override
            public LocatedBlock answer(InvocationOnMock invocation) throws Throwable {
                LOG.info("Called addBlock: " + Arrays.toString(invocation.getArguments()));
                // call first time
                // warp NotReplicatedYetException with RemoteException as rpc does.
                final LocatedBlock ret;
                try {
                    ret = (LocatedBlock) invocation.callRealMethod();
                } catch (NotReplicatedYetException e) {
                    throw new RemoteException(e.getClass().getName(), e.getMessage());
                }
                final int blockCount = getBlockCount(ret);
                // Retrying should result in a new block at the end of the file.
                // (abandoning the old one)
                // It should not have NotReplicatedYetException.
                final LocatedBlock ret2;
                try {
                    ret2 = (LocatedBlock) invocation.callRealMethod();
                } catch (NotReplicatedYetException e) {
                    throw new AssertionError("Unexpected exception", e);
                }
                final int blockCount2 = getBlockCount(ret2);
                // We shouldn't have gained an extra block by the RPC.
                assertEquals(blockCount, blockCount2);
                return ret2;
            }
        }).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any(), Mockito.anyLong(), Mockito.<String[]>any(), Mockito.<EnumSet<AddBlockFlag>>any());
        doAnswer(new Answer<Boolean>() {

            @Override
            public Boolean answer(InvocationOnMock invocation) throws Throwable {
                // complete() may return false a few times before it returns
                // true. We want to wait until it returns true, and then
                // make it retry one more time after that.
                LOG.info("Called complete:");
                if (!(Boolean) invocation.callRealMethod()) {
                    LOG.info("Complete call returned false, not faking a retry RPC");
                    return false;
                }
                // We got a successful close. Call it again to check idempotence.
                try {
                    boolean ret = (Boolean) invocation.callRealMethod();
                    LOG.info("Complete call returned true, faked second RPC. " + "Returned: " + ret);
                    return ret;
                } catch (Throwable t) {
                    LOG.error("Idempotent retry threw exception", t);
                    throw t;
                }
            }
        }).when(spyNN).complete(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), anyLong());
        OutputStream stm = client.create(file.toString(), true);
        try {
            AppendTestUtil.write(stm, 0, 10000);
            stm.close();
            stm = null;
        } finally {
            IOUtils.cleanup(LOG, stm);
        }
        // Make sure the mock was actually properly injected.
        Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any(), Mockito.anyLong(), Mockito.<String[]>any(), Mockito.<EnumSet<AddBlockFlag>>any());
        Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), anyLong());
        AppendTestUtil.check(fs, file, 10000);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FileSystem(org.apache.hadoop.fs.FileSystem) RemoteException(org.apache.hadoop.ipc.RemoteException) Matchers.anyBoolean(org.mockito.Matchers.anyBoolean) NotReplicatedYetException(org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException) Test(org.junit.Test)

Example 30 with RemoteException

use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.

the class TestAuditLogger method testBrokenLogger.

/**
   * Tests that a broken audit logger causes requests to fail.
   */
@Test
public void testBrokenLogger() throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY, BrokenAuditLogger.class.getName());
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitClusterUp();
        FileSystem fs = cluster.getFileSystem();
        long time = System.currentTimeMillis();
        fs.setTimes(new Path("/"), time, time);
        fail("Expected exception due to broken audit logger.");
    } catch (RemoteException re) {
    // Expected.
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Aggregations

RemoteException (org.apache.hadoop.ipc.RemoteException)99 IOException (java.io.IOException)53 Test (org.junit.Test)39 Path (org.apache.hadoop.fs.Path)36 Configuration (org.apache.hadoop.conf.Configuration)20 FileNotFoundException (java.io.FileNotFoundException)19 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)13 FileSystem (org.apache.hadoop.fs.FileSystem)12 InterruptedIOException (java.io.InterruptedIOException)10 AccessControlException (org.apache.hadoop.security.AccessControlException)10 ServerName (org.apache.hadoop.hbase.ServerName)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)8 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)7 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 EOFException (java.io.EOFException)6 ArrayList (java.util.ArrayList)6 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)6 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)6