Search in sources :

Example 6 with TestContext

use of org.apache.hadoop.test.MultithreadedTestUtil.TestContext in project hadoop by apache.

the class TestPipelinesFailover method testPipelineRecoveryStress.

/**
   * Stress test for pipeline/lease recovery. Starts a number of
   * threads, each of which creates a file and has another client
   * break the lease. While these threads run, failover proceeds
   * back and forth between two namenodes.
   */
@Test(timeout = STRESS_RUNTIME * 3)
public void testPipelineRecoveryStress() throws Exception {
    // The following section of code is to help debug HDFS-6694 about
    // this test that fails from time to time due to "too many open files".
    //
    LOG.info("HDFS-6694 Debug Data BEGIN");
    String[][] scmds = new String[][] { { "/bin/sh", "-c", "ulimit -a" }, { "hostname" }, { "ifconfig", "-a" } };
    for (String[] scmd : scmds) {
        String scmd_str = StringUtils.join(" ", scmd);
        try {
            ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
            sce.execute();
            LOG.info("'" + scmd_str + "' output:\n" + sce.getOutput());
        } catch (IOException e) {
            LOG.warn("Error when running '" + scmd_str + "'", e);
        }
    }
    LOG.info("HDFS-6694 Debug Data END");
    HAStressTestHarness harness = new HAStressTestHarness();
    // Disable permissions so that another user can recover the lease.
    harness.conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
    // This test triggers rapid NN failovers.  The client retry policy uses an
    // exponential backoff.  This can quickly lead to long sleep times and even
    // timeout the whole test.  Cap the sleep time at 1s to prevent this.
    harness.conf.setInt(HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY, 1000);
    final MiniDFSCluster cluster = harness.startCluster();
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        FileSystem fs = harness.getFailoverFs();
        DistributedFileSystem fsAsOtherUser = createFsAsOtherUser(cluster, harness.conf);
        TestContext testers = new TestContext();
        for (int i = 0; i < STRESS_NUM_THREADS; i++) {
            Path p = new Path("/test-" + i);
            testers.addThread(new PipelineTestThread(testers, fs, fsAsOtherUser, p));
        }
        // Start a separate thread which will make sure that replication
        // happens quickly by triggering deletion reports and replication
        // work calculation frequently.
        harness.addReplicationTriggerThread(500);
        harness.addFailoverThread(5000);
        harness.startThreads();
        testers.startThreads();
        testers.waitFor(STRESS_RUNTIME);
        testers.stop();
        harness.stopThreads();
    } finally {
        System.err.println("===========================\n\n\n\n");
        harness.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ShellCommandExecutor(org.apache.hadoop.util.Shell.ShellCommandExecutor) TestContext(org.apache.hadoop.test.MultithreadedTestUtil.TestContext) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 7 with TestContext

use of org.apache.hadoop.test.MultithreadedTestUtil.TestContext in project hadoop by apache.

the class TestHAStateTransitions method testTransitionSynchronization.

/**
   * Regression test for HDFS-2693: when doing state transitions, we need to
   * lock the FSNamesystem so that we don't end up doing any writes while it's
   * "in between" states.
   * This test case starts up several client threads which do mutation operations
   * while flipping a NN back and forth from active to standby.
   */
@Test(timeout = 120000)
public void testTransitionSynchronization() throws Exception {
    Configuration conf = new Configuration();
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
    try {
        cluster.waitActive();
        ReentrantReadWriteLock spyLock = NameNodeAdapter.spyOnFsLock(cluster.getNameNode(0).getNamesystem());
        Mockito.doAnswer(new GenericTestUtils.SleepAnswer(50)).when(spyLock).writeLock();
        final FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
        TestContext ctx = new TestContext();
        for (int i = 0; i < 50; i++) {
            final int finalI = i;
            ctx.addThread(new RepeatingTestThread(ctx) {

                @Override
                public void doAnAction() throws Exception {
                    Path p = new Path("/test-" + finalI);
                    fs.mkdirs(p);
                    fs.delete(p, true);
                }
            });
        }
        ctx.addThread(new RepeatingTestThread(ctx) {

            @Override
            public void doAnAction() throws Exception {
                cluster.transitionToStandby(0);
                Thread.sleep(50);
                cluster.transitionToActive(0);
            }
        });
        ctx.startThreads();
        ctx.waitFor(20000);
        ctx.stop();
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) TestContext(org.apache.hadoop.test.MultithreadedTestUtil.TestContext) GenericTestUtils(org.apache.hadoop.test.GenericTestUtils) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock) IOException(java.io.IOException) RepeatingTestThread(org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 8 with TestContext

use of org.apache.hadoop.test.MultithreadedTestUtil.TestContext in project hadoop by apache.

the class TestMultithreadedTestUtil method testThreadFails.

@Test
public void testThreadFails() throws Exception {
    TestContext ctx = new TestContext();
    ctx.addThread(new TestingThread(ctx) {

        @Override
        public void doWork() throws Exception {
            fail(FAIL_MSG);
        }
    });
    ctx.startThreads();
    long st = Time.now();
    try {
        ctx.waitFor(30000);
        fail("waitFor did not throw");
    } catch (RuntimeException rte) {
        // expected
        assertEquals(FAIL_MSG, rte.getCause().getMessage());
    }
    long et = Time.now();
    // Test shouldn't have waited the full 30 seconds, since
    // the thread throws faster than that
    assertTrue("Test took " + (et - st) + "ms", et - st < 5000);
}
Also used : TestContext(org.apache.hadoop.test.MultithreadedTestUtil.TestContext) TestingThread(org.apache.hadoop.test.MultithreadedTestUtil.TestingThread) IOException(java.io.IOException) Test(org.junit.Test)

Example 9 with TestContext

use of org.apache.hadoop.test.MultithreadedTestUtil.TestContext in project hadoop by apache.

the class TestMultithreadedTestUtil method testNoErrors.

@Test
public void testNoErrors() throws Exception {
    final AtomicInteger threadsRun = new AtomicInteger();
    TestContext ctx = new TestContext();
    for (int i = 0; i < 3; i++) {
        ctx.addThread(new TestingThread(ctx) {

            @Override
            public void doWork() throws Exception {
                threadsRun.incrementAndGet();
            }
        });
    }
    assertEquals(0, threadsRun.get());
    ctx.startThreads();
    long st = Time.now();
    ctx.waitFor(30000);
    long et = Time.now();
    // All threads should have run
    assertEquals(3, threadsRun.get());
    // Test shouldn't have waited the full 30 seconds, since
    // the threads exited faster than that.
    assertTrue("Test took " + (et - st) + "ms", et - st < 5000);
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestContext(org.apache.hadoop.test.MultithreadedTestUtil.TestContext) TestingThread(org.apache.hadoop.test.MultithreadedTestUtil.TestingThread) IOException(java.io.IOException) Test(org.junit.Test)

Example 10 with TestContext

use of org.apache.hadoop.test.MultithreadedTestUtil.TestContext in project hadoop by apache.

the class RPCCallBenchmark method setupClientTestContext.

private TestContext setupClientTestContext(final MyOptions opts) throws IOException, InterruptedException {
    if (opts.clientThreads <= 0) {
        return null;
    }
    // Set up a separate proxy for each client thread,
    // rather than making them share TCP pipes.
    int numProxies = opts.clientThreads;
    final RpcServiceWrapper[] proxies = new RpcServiceWrapper[numProxies];
    for (int i = 0; i < numProxies; i++) {
        proxies[i] = UserGroupInformation.createUserForTesting("proxy-" + i, new String[] {}).doAs(new PrivilegedExceptionAction<RpcServiceWrapper>() {

            @Override
            public RpcServiceWrapper run() throws Exception {
                return createRpcClient(opts);
            }
        });
    }
    // Create an echo message of the desired length
    final StringBuilder msgBuilder = new StringBuilder(opts.msgSize);
    for (int c = 0; c < opts.msgSize; c++) {
        msgBuilder.append('x');
    }
    final String echoMessage = msgBuilder.toString();
    // Create the clients in a test context
    TestContext ctx = new TestContext();
    for (int i = 0; i < opts.clientThreads; i++) {
        final RpcServiceWrapper proxy = proxies[i % numProxies];
        ctx.addThread(new MultithreadedTestUtil.RepeatingTestThread(ctx) {

            @Override
            public void doAnAction() throws Exception {
                proxy.doEcho(echoMessage);
                callCount.incrementAndGet();
            }
        });
    }
    return ctx;
}
Also used : TestContext(org.apache.hadoop.test.MultithreadedTestUtil.TestContext) MultithreadedTestUtil(org.apache.hadoop.test.MultithreadedTestUtil) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) IOException(java.io.IOException) ParseException(org.apache.commons.cli.ParseException)

Aggregations

TestContext (org.apache.hadoop.test.MultithreadedTestUtil.TestContext)11 Test (org.junit.Test)8 IOException (java.io.IOException)7 FileSystem (org.apache.hadoop.fs.FileSystem)4 Path (org.apache.hadoop.fs.Path)4 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 TestingThread (org.apache.hadoop.test.MultithreadedTestUtil.TestingThread)3 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 Configuration (org.apache.hadoop.conf.Configuration)2 RepeatingTestThread (org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread)2 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)1 ArrayList (java.util.ArrayList)1 ReentrantReadWriteLock (java.util.concurrent.locks.ReentrantReadWriteLock)1 ParseException (org.apache.commons.cli.ParseException)1 AlwaysSucceedFencer (org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 MiniDFSNNTopology (org.apache.hadoop.hdfs.MiniDFSNNTopology)1 Server (org.apache.hadoop.ipc.RPC.Server)1 GenericTestUtils (org.apache.hadoop.test.GenericTestUtils)1 MultithreadedTestUtil (org.apache.hadoop.test.MultithreadedTestUtil)1