Search in sources :

Example 41 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project flink by apache.

the class ZooKeeperStateHandleStoreITCase method testRemoveWithCallback.

/**
	 * Tests that state handles are correctly removed with a callback.
	 */
@Test
public void testRemoveWithCallback() throws Exception {
    // Setup
    LongStateStorage stateHandleProvider = new LongStateStorage();
    ZooKeeperStateHandleStore<Long> store = new ZooKeeperStateHandleStore<>(ZooKeeper.getClient(), stateHandleProvider, Executors.directExecutor());
    // Config
    final String pathInZooKeeper = "/testRemoveWithCallback";
    final Long state = 27255442L;
    store.add(pathInZooKeeper, state);
    final CountDownLatch sync = new CountDownLatch(1);
    BackgroundCallback callback = mock(BackgroundCallback.class);
    doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            sync.countDown();
            return null;
        }
    }).when(callback).processResult(eq(ZooKeeper.getClient()), any(CuratorEvent.class));
    // Test
    store.remove(pathInZooKeeper, callback);
    // Verify discarded and callback called
    assertEquals(0, ZooKeeper.getClient().getChildren().forPath("/").size());
    sync.await();
    verify(callback, times(1)).processResult(eq(ZooKeeper.getClient()), any(CuratorEvent.class));
}
Also used : BackgroundCallback(org.apache.curator.framework.api.BackgroundCallback) CuratorEvent(org.apache.curator.framework.api.CuratorEvent) CountDownLatch(java.util.concurrent.CountDownLatch) InvocationOnMock(org.mockito.invocation.InvocationOnMock) Test(org.junit.Test)

Example 42 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project flink by apache.

the class RescalingITCase method createJobGraphWithKeyedAndNonPartitionedOperatorState.

private static JobGraph createJobGraphWithKeyedAndNonPartitionedOperatorState(int parallelism, int maxParallelism, int fixedParallelism, int numberKeys, int numberElements, boolean terminateAfterEmission, int checkpointingInterval) {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(parallelism);
    env.getConfig().setMaxParallelism(maxParallelism);
    env.enableCheckpointing(checkpointingInterval);
    env.setRestartStrategy(RestartStrategies.noRestart());
    DataStream<Integer> input = env.addSource(new SubtaskIndexNonPartitionedStateSource(numberKeys, numberElements, terminateAfterEmission)).setParallelism(fixedParallelism).keyBy(new KeySelector<Integer, Integer>() {

        private static final long serialVersionUID = -7952298871120320940L;

        @Override
        public Integer getKey(Integer value) throws Exception {
            return value;
        }
    });
    SubtaskIndexFlatMapper.workCompletedLatch = new CountDownLatch(numberKeys);
    DataStream<Tuple2<Integer, Integer>> result = input.flatMap(new SubtaskIndexFlatMapper(numberElements));
    result.addSink(new CollectionSink<Tuple2<Integer, Integer>>());
    return env.getStreamGraph().getJobGraph();
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) TimeoutException(java.util.concurrent.TimeoutException) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)

Example 43 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project flink by apache.

the class RescalingITCase method createJobGraphWithKeyedState.

private static JobGraph createJobGraphWithKeyedState(int parallelism, int maxParallelism, int numberKeys, int numberElements, boolean terminateAfterEmission, int checkpointingInterval) {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(parallelism);
    if (0 < maxParallelism) {
        env.getConfig().setMaxParallelism(maxParallelism);
    }
    env.enableCheckpointing(checkpointingInterval);
    env.setRestartStrategy(RestartStrategies.noRestart());
    DataStream<Integer> input = env.addSource(new SubtaskIndexSource(numberKeys, numberElements, terminateAfterEmission)).keyBy(new KeySelector<Integer, Integer>() {

        private static final long serialVersionUID = -7952298871120320940L;

        @Override
        public Integer getKey(Integer value) throws Exception {
            return value;
        }
    });
    SubtaskIndexFlatMapper.workCompletedLatch = new CountDownLatch(numberKeys);
    DataStream<Tuple2<Integer, Integer>> result = input.flatMap(new SubtaskIndexFlatMapper(numberElements));
    result.addSink(new CollectionSink<Tuple2<Integer, Integer>>());
    return env.getStreamGraph().getJobGraph();
}
Also used : CountDownLatch(java.util.concurrent.CountDownLatch) TimeoutException(java.util.concurrent.TimeoutException) JobExecutionException(org.apache.flink.runtime.client.JobExecutionException) Tuple2(org.apache.flink.api.java.tuple.Tuple2) StreamExecutionEnvironment(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment)

Example 44 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestMutableMetrics method testMutableRatesWithAggregationManyThreads.

@Test
public void testMutableRatesWithAggregationManyThreads() throws InterruptedException {
    final MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
    final int n = 10;
    long[] opCount = new long[n];
    double[] opTotalTime = new double[n];
    for (int i = 0; i < n; i++) {
        opCount[i] = 0;
        opTotalTime[i] = 0;
        // Initialize so that the getLongCounter() method doesn't complain
        rates.add("metric" + i, 0);
    }
    Thread[] threads = new Thread[n];
    final CountDownLatch firstAddsFinished = new CountDownLatch(threads.length);
    final CountDownLatch firstSnapshotsFinished = new CountDownLatch(1);
    final CountDownLatch secondAddsFinished = new CountDownLatch(threads.length);
    final CountDownLatch secondSnapshotsFinished = new CountDownLatch(1);
    long seed = new Random().nextLong();
    LOG.info("Random seed = " + seed);
    final Random sleepRandom = new Random(seed);
    for (int tIdx = 0; tIdx < threads.length; tIdx++) {
        final int threadIdx = tIdx;
        threads[threadIdx] = new Thread() {

            @Override
            public void run() {
                try {
                    for (int i = 0; i < 1000; i++) {
                        rates.add("metric" + (i % n), (i / n) % 2 == 0 ? 1 : 2);
                        // Sleep so additions can be interleaved with snapshots
                        Thread.sleep(sleepRandom.nextInt(5));
                    }
                    firstAddsFinished.countDown();
                    // Make sure all threads stay alive long enough for the first
                    // snapshot to complete; else their metrics may be lost to GC
                    firstSnapshotsFinished.await();
                    // Let half the threads continue with more metrics and let half die
                    if (threadIdx % 2 == 0) {
                        for (int i = 0; i < 1000; i++) {
                            rates.add("metric" + (i % n), (i / n) % 2 == 0 ? 1 : 2);
                        }
                        secondAddsFinished.countDown();
                        secondSnapshotsFinished.await();
                    } else {
                        secondAddsFinished.countDown();
                    }
                } catch (InterruptedException e) {
                // Ignore
                }
            }
        };
    }
    for (Thread t : threads) {
        t.start();
    }
    // opCount / opTotalTime
    for (int i = 0; i < 100; i++) {
        snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
        Thread.sleep(sleepRandom.nextInt(20));
    }
    firstAddsFinished.await();
    // Final snapshot to grab any remaining metrics and then verify that
    // the totals are as expected
    snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
    for (int i = 0; i < n; i++) {
        assertEquals("metric" + i + " count", 1001, opCount[i]);
        assertEquals("metric" + i + " total", 1500, opTotalTime[i], 1.0);
    }
    firstSnapshotsFinished.countDown();
    // After half of the threads die, ensure that the remaining ones still
    // add metrics correctly and that snapshot occurs correctly
    secondAddsFinished.await();
    snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
    for (int i = 0; i < n; i++) {
        assertEquals("metric" + i + " count", 1501, opCount[i]);
        assertEquals("metric" + i + " total", 2250, opTotalTime[i], 1.0);
    }
    secondSnapshotsFinished.countDown();
}
Also used : Random(java.util.Random) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 45 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hadoop by apache.

the class TestBlockReaderFactory method testShortCircuitCacheTemporaryFailure.

/**
   * Test the case where we have a failure to complete a short circuit read
   * that occurs, and then later on, we have a success.
   * Any thread waiting on a cache load should receive the failure (if it
   * occurs);  however, the failure result should not be cached.  We want
   * to be able to retry later and succeed.
   */
@Test(timeout = 60000)
public void testShortCircuitCacheTemporaryFailure() throws Exception {
    BlockReaderTestUtil.enableBlockReaderFactoryTracing();
    final AtomicBoolean replicaCreationShouldFail = new AtomicBoolean(true);
    final AtomicBoolean testFailed = new AtomicBoolean(false);
    DFSInputStream.tcpReadsDisabledForTesting = true;
    BlockReaderFactory.createShortCircuitReplicaInfoCallback = new ShortCircuitCache.ShortCircuitReplicaCreator() {

        @Override
        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
            if (replicaCreationShouldFail.get()) {
                // Insert a short delay to increase the chance that one client
                // thread waits for the other client thread's failure via
                // a condition variable.
                Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
                return new ShortCircuitReplicaInfo();
            }
            return null;
        }
    };
    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
    Configuration conf = createShortCircuitConf("testShortCircuitCacheTemporaryFailure", sockDir);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String TEST_FILE = "/test_file";
    final int TEST_FILE_LEN = 4000;
    final int NUM_THREADS = 2;
    final int SEED = 0xFADED;
    final CountDownLatch gotFailureLatch = new CountDownLatch(NUM_THREADS);
    final CountDownLatch shouldRetryLatch = new CountDownLatch(1);
    DFSTestUtil.createFile(dfs, new Path(TEST_FILE), TEST_FILE_LEN, (short) 1, SEED);
    Runnable readerRunnable = new Runnable() {

        @Override
        public void run() {
            try {
                // First time should fail.
                List<LocatedBlock> locatedBlocks = cluster.getNameNode().getRpcServer().getBlockLocations(TEST_FILE, 0, TEST_FILE_LEN).getLocatedBlocks();
                // first block
                LocatedBlock lblock = locatedBlocks.get(0);
                BlockReader blockReader = null;
                try {
                    blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
                    Assert.fail("expected getBlockReader to fail the first time.");
                } catch (Throwable t) {
                    Assert.assertTrue("expected to see 'TCP reads were disabled " + "for testing' in exception " + t, t.getMessage().contains("TCP reads were disabled for testing"));
                } finally {
                    // keep findbugs happy
                    if (blockReader != null)
                        blockReader.close();
                }
                gotFailureLatch.countDown();
                shouldRetryLatch.await();
                // Second time should succeed.
                try {
                    blockReader = BlockReaderTestUtil.getBlockReader(cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
                } catch (Throwable t) {
                    LOG.error("error trying to retrieve a block reader " + "the second time.", t);
                    throw t;
                } finally {
                    if (blockReader != null)
                        blockReader.close();
                }
            } catch (Throwable t) {
                LOG.error("getBlockReader failure", t);
                testFailed.set(true);
            }
        }
    };
    Thread[] threads = new Thread[NUM_THREADS];
    for (int i = 0; i < NUM_THREADS; i++) {
        threads[i] = new Thread(readerRunnable);
        threads[i].start();
    }
    gotFailureLatch.await();
    replicaCreationShouldFail.set(false);
    shouldRetryLatch.countDown();
    for (int i = 0; i < NUM_THREADS; i++) {
        Uninterruptibles.joinUninterruptibly(threads[i]);
    }
    cluster.shutdown();
    sockDir.close();
    Assert.assertFalse(testFailed.get());
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) BlockReader(org.apache.hadoop.hdfs.BlockReader) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ShortCircuitCache(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CountDownLatch(java.util.concurrent.CountDownLatch) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ShortCircuitReplicaInfo(org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo) Test(org.junit.Test)

Aggregations

CountDownLatch (java.util.concurrent.CountDownLatch)5355 Test (org.junit.Test)2594 IOException (java.io.IOException)631 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)550 AtomicReference (java.util.concurrent.atomic.AtomicReference)501 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)475 ArrayList (java.util.ArrayList)471 QuickTest (com.hazelcast.test.annotation.QuickTest)375 ParallelTest (com.hazelcast.test.annotation.ParallelTest)355 ExecutorService (java.util.concurrent.ExecutorService)322 Test (org.testng.annotations.Test)310 HazelcastInstance (com.hazelcast.core.HazelcastInstance)251 List (java.util.List)212 HashMap (java.util.HashMap)207 HttpServletResponse (javax.servlet.http.HttpServletResponse)207 ExecutionException (java.util.concurrent.ExecutionException)203 HttpServletRequest (javax.servlet.http.HttpServletRequest)189 Ignite (org.apache.ignite.Ignite)188 ServletException (javax.servlet.ServletException)183 TimeoutException (java.util.concurrent.TimeoutException)168