Search in sources :

Example 46 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestBlockReaderFactory method testMultipleWaitersOnShortCircuitCache.

/**
   * Test the case where we have multiple threads waiting on the
   * ShortCircuitCache delivering a certain ShortCircuitReplica.
   *
   * In this case, there should only be one call to
   * createShortCircuitReplicaInfo.  This one replica should be shared
   * by all threads.
   */
@Test(timeout = 60000)
public void testMultipleWaitersOnShortCircuitCache() throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    final AtomicBoolean creationIsBlocked = new AtomicBoolean(true);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {

        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            Uninterruptibles.awaitUninterruptibly(latch);
            if (!creationIsBlocked.compareAndSet(true, false)) {
                Assert.fail("there were multiple calls to " + "createShortCircuitReplicaInfo.  Only one was expected.");
            }
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testMultipleWaitersOnShortCircuitCache", sockDir);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int SEED = 0xFADED;
    final int NUM_THREADS = 10;
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    Runnable readerRunnable = new Runnable() {

        @Override
        public void run() {
            try {
                byte[] contents = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
                Assert.assertFalse(creationIsBlocked.get());
                byte[] expected = DFSTestUtil.calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
                Assert.assertTrue(Arrays.equals(contents, expected));
            } catch (Throwable e) {
                LOG.error("readerRunnable error", e);
                testFailed.set(true);
            }
        }
    };
    Thread[] threads = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
        threads[i] = new Thread(readerRunnable);
        threads[i].start();
    }
    Thread.sleep(500);
    latch.countDown();
    for (int i = 0; i < NUM_THREADS; i++) {
        Uninterruptibles.joinUninterruptibly(threads[i]);
    }
    cluster.shutdown();
    sockDir.close();
    Assert.assertFalse(testFailed.get());
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) CountDownLatch(java.util.concurrent.CountDownLatch) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ShortCircuitReplicaInfo(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo) Test(org.junit.Test)

Example 47 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestDNFencing method testRBWReportArrivesAfterEdits.

/**
   * Another regression test for HDFS-2742. This tests the following sequence:
   * - DN does a block report while file is open. This BR contains
   *   the block in RBW state.
   * - The block report is delayed in reaching the standby.
   * - The file is closed.
   * - The standby processes the OP_ADD and OP_CLOSE operations before
   *   the RBW block report arrives.
   * - The standby should not mark the block as corrupt.
   */
@Test
public void testRBWReportArrivesAfterEdits() throws Exception {
    final CountDownLatch brFinished = new CountDownLatch(1);
    DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {

        @Override
        protected Object passThrough(InvocationOnMock invocation) throws Throwable {
            try {
                return super.passThrough(invocation);
            } finally {
                // inform the test that our block report went through.
                brFinished.countDown();
            }
        }
    };
    FSDataOutputStream out = fs.create(TEST_FILE_PATH);
    try {
        AppendTestUtil.write(out, 0, 10);
        out.hflush();
        DataNode dn = cluster.getDataNodes().get(0);
        DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn2);
        Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.<DatanodeRegistration>anyObject(), Mockito.anyString(), Mockito.<StorageBlockReport[]>anyObject(), Mockito.<BlockReportContext>anyObject());
        dn.scheduleAllBlockReport(0);
        delayer.waitForCall();
    } finally {
        IOUtils.closeStream(out);
    }
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    delayer.proceed();
    brFinished.await();
    // Verify that no replicas are marked corrupt, and that the
    // file is readable from the failed-over standby.
    BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
    BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
    assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
    assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
    DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) InvocationOnMock(org.mockito.invocation.InvocationOnMock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 48 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestRPC method testRPCInterrupted.

@Test(timeout = 30000)
public void testRPCInterrupted() throws Exception {
    Server server;
    RPC.Builder builder = newServerBuilder(conf).setNumHandlers(5).setVerbose(true).setSecretManager(null);
    server = setupTestServer(builder);
    int numConcurrentRPC = 200;
    final CyclicBarrier barrier = new CyclicBarrier(numConcurrentRPC);
    final CountDownLatch latch = new CountDownLatch(numConcurrentRPC);
    final AtomicBoolean leaderRunning = new AtomicBoolean(true);
    final AtomicReference<Throwable> error = new AtomicReference<>();
    Thread leaderThread = null;
    try {
        for (int i = 0; i < numConcurrentRPC; i++) {
            final int num = i;
            final TestRpcService proxy = getClient(addr, conf);
            Thread rpcThread = new Thread(new Runnable() {

                @Override
                public void run() {
                    try {
                        barrier.await();
                        while (num == 0 || leaderRunning.get()) {
                            proxy.slowPing(null, newSlowPingRequest(false));
                        }
                        proxy.slowPing(null, newSlowPingRequest(false));
                    } catch (Exception e) {
                        if (num == 0) {
                            leaderRunning.set(false);
                        } else {
                            error.set(e);
                        }
                        LOG.error("thread " + num, e);
                    } finally {
                        latch.countDown();
                    }
                }
            });
            rpcThread.start();
            if (leaderThread == null) {
                leaderThread = rpcThread;
            }
        }
        // let threads get past the barrier
        Thread.sleep(1000);
        // stop a single thread
        while (leaderRunning.get()) {
            leaderThread.interrupt();
        }
        latch.await();
        // should not cause any other thread to get an error
        assertTrue("rpc got exception " + error.get(), error.get() == null);
    } finally {
        server.stop();
    }
}
Also used : AtomicReference(java.util.concurrent.atomic.AtomicReference) CountDownLatch(java.util.concurrent.CountDownLatch) ServiceException(com.google.protobuf.ServiceException) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) InterruptedIOException(java.io.InterruptedIOException) SocketTimeoutException(java.net.SocketTimeoutException) ConnectException(java.net.ConnectException) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) AccessControlException(org.apache.hadoop.security.AccessControlException) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Example 49 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestRPC method testExternalCall.

@Test(timeout = 30000)
public void testExternalCall() throws Exception {
    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting("user123", new String[0]);
    final IOException expectedIOE = new IOException("boom");
    // use 1 handler so the callq can be plugged
    final Server server = setupTestServer(conf, 1);
    try {
        final AtomicBoolean result = new AtomicBoolean();
        ExternalCall<String> remoteUserCall = newExtCall(ugi, new PrivilegedExceptionAction<String>() {

            @Override
            public String run() throws Exception {
                return UserGroupInformation.getCurrentUser().getUserName();
            }
        });
        ExternalCall<String> exceptionCall = newExtCall(ugi, new PrivilegedExceptionAction<String>() {

            @Override
            public String run() throws Exception {
                throw expectedIOE;
            }
        });
        final CountDownLatch latch = new CountDownLatch(1);
        final CyclicBarrier barrier = new CyclicBarrier(2);
        ExternalCall<Void> barrierCall = newExtCall(ugi, new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                // notify we are in a handler and then wait to keep the callq
                // plugged up
                latch.countDown();
                barrier.await();
                return null;
            }
        });
        server.queueCall(barrierCall);
        server.queueCall(exceptionCall);
        server.queueCall(remoteUserCall);
        // wait for barrier call to enter the handler, check that the other 2
        // calls are actually queued
        latch.await();
        assertEquals(2, server.getCallQueueLen());
        // unplug the callq
        barrier.await();
        barrierCall.get();
        // verify correct ugi is used
        String answer = remoteUserCall.get();
        assertEquals(ugi.getUserName(), answer);
        try {
            exceptionCall.get();
            fail("didn't throw");
        } catch (ExecutionException ee) {
            assertTrue((ee.getCause()) instanceof IOException);
            assertEquals(expectedIOE.getMessage(), ee.getCause().getMessage());
        }
    } finally {
        server.stop();
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) ServiceException(com.google.protobuf.ServiceException) AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) InterruptedIOException(java.io.InterruptedIOException) SocketTimeoutException(java.net.SocketTimeoutException) ConnectException(java.net.ConnectException) HadoopIllegalArgumentException(org.apache.hadoop.HadoopIllegalArgumentException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) AccessControlException(org.apache.hadoop.security.AccessControlException) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ExecutionException(java.util.concurrent.ExecutionException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 50 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestIPC method checkBlocking.

// goal is to jam a handler with a connection, fill the callq with
// connections, in turn jamming the readers - then flood the server and
// ensure that the listener blocks when the reader connection queues fill
@SuppressWarnings("unchecked")
private void checkBlocking(int readers, int readerQ, int callQ) throws Exception {
    // makes it easier
    int handlers = 1;
    final Configuration conf = new Configuration();
    conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_KEY, readerQ);
    // send in enough clients to block up the handlers, callq, and readers
    final int initialClients = readers + callQ + handlers;
    // max connections we should ever end up accepting at once
    // 1 = listener
    final int maxAccept = initialClients + readers * readerQ + 1;
    // stress it with 2X the max
    int clients = maxAccept * 2;
    final AtomicInteger failures = new AtomicInteger(0);
    final CountDownLatch callFinishedLatch = new CountDownLatch(clients);
    // start server
    final TestServerQueue server = new TestServerQueue(clients, readers, callQ, handlers, conf);
    CallQueueManager<Call> spy = spy((CallQueueManager<Call>) Whitebox.getInternalState(server, "callQueue"));
    Whitebox.setInternalState(server, "callQueue", spy);
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    Client.setConnectTimeout(conf, 10000);
    // instantiate the threads, will start in batches
    Thread[] threads = new Thread[clients];
    for (int i = 0; i < clients; i++) {
        threads[i] = new Thread(new Runnable() {

            @Override
            public void run() {
                Client client = new Client(LongWritable.class, conf);
                try {
                    call(client, new LongWritable(Thread.currentThread().getId()), addr, 60000, conf);
                } catch (Throwable e) {
                    LOG.error(e);
                    failures.incrementAndGet();
                    return;
                } finally {
                    callFinishedLatch.countDown();
                    client.stop();
                }
            }
        });
    }
    // and others not blocking in the race to fill the callq
    for (int i = 0; i < initialClients; i++) {
        threads[i].start();
        if (i == 0) {
            // let first reader block in a call
            server.firstCallLatch.await();
        }
        // wait until reader put a call to callQueue, to make sure all readers
        // are blocking on the queue after initialClients threads are started.
        verify(spy, timeout(100).times(i + 1)).put(Mockito.<Call>anyObject());
    }
    try {
        // wait till everything is slotted, should happen immediately
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                return server.getNumOpenConnections() >= initialClients;
            }
        }, 100, 3000);
    } catch (TimeoutException e) {
        fail("timed out while waiting for connections to open.");
    }
    LOG.info("(initial clients) need:" + initialClients + " connections have:" + server.getNumOpenConnections());
    LOG.info("ipc layer should be blocked");
    assertEquals(callQ, server.getCallQueueLen());
    assertEquals(initialClients, server.getNumOpenConnections());
    // connection queues should fill and then the listener should block
    for (int i = initialClients; i < clients; i++) {
        threads[i].start();
    }
    Thread.sleep(10);
    try {
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                return server.getNumOpenConnections() >= maxAccept;
            }
        }, 100, 3000);
    } catch (TimeoutException e) {
        fail("timed out while waiting for connections to open until maxAccept.");
    }
    LOG.info("(max clients) need:" + maxAccept + " connections have:" + server.getNumOpenConnections());
    // check a few times to make sure we didn't go over
    for (int i = 0; i < 4; i++) {
        assertEquals(maxAccept, server.getNumOpenConnections());
        Thread.sleep(100);
    }
    // sanity check that no calls have finished
    assertEquals(clients, callFinishedLatch.getCount());
    LOG.info("releasing the calls");
    server.callBlockLatch.countDown();
    callFinishedLatch.await();
    for (Thread t : threads) {
        t.join();
    }
    assertEquals(0, failures.get());
    server.stop();
}
Also used : Call(org.apache.hadoop.ipc.Server.Call) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LongWritable(org.apache.hadoop.io.LongWritable) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TimeoutException(java.util.concurrent.TimeoutException) ConnectTimeoutException(org.apache.hadoop.net.ConnectTimeoutException) SocketTimeoutException(java.net.SocketTimeoutException)

Aggregations

CountDownLatch (java.util.concurrent.CountDownLatch)5355 Test (org.junit.Test)2594 IOException (java.io.IOException)631 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)550 AtomicReference (java.util.concurrent.atomic.AtomicReference)501 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)475 ArrayList (java.util.ArrayList)471 QuickTest (com.hazelcast.test.annotation.QuickTest)375 ParallelTest (com.hazelcast.test.annotation.ParallelTest)355 ExecutorService (java.util.concurrent.ExecutorService)322 Test (org.testng.annotations.Test)310 HazelcastInstance (com.hazelcast.core.HazelcastInstance)251 List (java.util.List)212 HashMap (java.util.HashMap)207 HttpServletResponse (javax.servlet.http.HttpServletResponse)207 ExecutionException (java.util.concurrent.ExecutionException)203 HttpServletRequest (javax.servlet.http.HttpServletRequest)189 Ignite (org.apache.ignite.Ignite)188 ServletException (javax.servlet.ServletException)183 TimeoutException (java.util.concurrent.TimeoutException)168