Search in sources :

Example 96 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestAsyncTable method testCheckAndDelete.

@Test
public void testCheckAndDelete() throws InterruptedException, ExecutionException {
    AsyncTableBase table = getTable.get();
    int count = 10;
    CountDownLatch putLatch = new CountDownLatch(count + 1);
    table.put(new Put(row).addColumn(FAMILY, QUALIFIER, VALUE)).thenRun(() -> putLatch.countDown());
    IntStream.range(0, count).forEach(i -> table.put(new Put(row).addColumn(FAMILY, concat(QUALIFIER, i), VALUE)).thenRun(() -> putLatch.countDown()));
    putLatch.await();
    AtomicInteger successCount = new AtomicInteger(0);
    AtomicInteger successIndex = new AtomicInteger(-1);
    CountDownLatch deleteLatch = new CountDownLatch(count);
    IntStream.range(0, count).forEach(i -> table.checkAndDelete(row, FAMILY, QUALIFIER, VALUE, new Delete(row).addColumn(FAMILY, QUALIFIER).addColumn(FAMILY, concat(QUALIFIER, i))).thenAccept(x -> {
        if (x) {
            successCount.incrementAndGet();
            successIndex.set(i);
        }
        deleteLatch.countDown();
    }));
    deleteLatch.await();
    assertEquals(1, successCount.get());
    Result result = table.get(new Get(row)).get();
    IntStream.range(0, count).forEach(i -> {
        if (i == successIndex.get()) {
            assertFalse(result.containsColumn(FAMILY, concat(QUALIFIER, i)));
        } else {
            assertArrayEquals(VALUE, result.getValue(FAMILY, concat(QUALIFIER, i)));
        }
    });
}
Also used : IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) BeforeClass(org.junit.BeforeClass) RunWith(org.junit.runner.RunWith) Parameters(org.junit.runners.Parameterized.Parameters) ClientTests(org.apache.hadoop.hbase.testclassification.ClientTests) Supplier(java.util.function.Supplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestName(org.junit.rules.TestName) Assert.assertArrayEquals(org.junit.Assert.assertArrayEquals) Parameterized(org.junit.runners.Parameterized) Bytes(org.apache.hadoop.hbase.util.Bytes) Pair(org.apache.hadoop.hbase.util.Pair) Before(org.junit.Before) TableName(org.apache.hadoop.hbase.TableName) AfterClass(org.junit.AfterClass) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) Parameter(org.junit.runners.Parameterized.Parameter) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) BlockingQueue(java.util.concurrent.BlockingQueue) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) UncheckedIOException(java.io.UncheckedIOException) ExecutionException(java.util.concurrent.ExecutionException) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicLong(java.util.concurrent.atomic.AtomicLong) IOUtils(org.apache.commons.io.IOUtils) List(java.util.List) Assert.assertNull(org.junit.Assert.assertNull) Rule(org.junit.Rule) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Assert.assertFalse(org.junit.Assert.assertFalse) ForkJoinPool(java.util.concurrent.ForkJoinPool) Assert.assertEquals(org.junit.Assert.assertEquals) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 97 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hbase by apache.

the class TestAsyncTable method testAppend.

@Test
public void testAppend() throws InterruptedException, ExecutionException {
    AsyncTableBase table = getTable.get();
    int count = 10;
    CountDownLatch latch = new CountDownLatch(count);
    char suffix = ':';
    AtomicLong suffixCount = new AtomicLong(0L);
    IntStream.range(0, count).forEachOrdered(i -> table.append(new Append(row).add(FAMILY, QUALIFIER, Bytes.toBytes("" + i + suffix))).thenAccept(r -> {
        suffixCount.addAndGet(Bytes.toString(r.getValue(FAMILY, QUALIFIER)).chars().filter(x -> x == suffix).count());
        latch.countDown();
    }));
    latch.await();
    assertEquals((1 + count) * count / 2, suffixCount.get());
    String value = Bytes.toString(table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get().getValue(FAMILY, QUALIFIER));
    int[] actual = Arrays.asList(value.split("" + suffix)).stream().mapToInt(Integer::parseInt).sorted().toArray();
    assertArrayEquals(IntStream.range(0, count).toArray(), actual);
}
Also used : IntStream(java.util.stream.IntStream) Arrays(java.util.Arrays) BeforeClass(org.junit.BeforeClass) RunWith(org.junit.runner.RunWith) Parameters(org.junit.runners.Parameterized.Parameters) ClientTests(org.apache.hadoop.hbase.testclassification.ClientTests) Supplier(java.util.function.Supplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TestName(org.junit.rules.TestName) Assert.assertArrayEquals(org.junit.Assert.assertArrayEquals) Parameterized(org.junit.runners.Parameterized) Bytes(org.apache.hadoop.hbase.util.Bytes) Pair(org.apache.hadoop.hbase.util.Pair) Before(org.junit.Before) TableName(org.apache.hadoop.hbase.TableName) AfterClass(org.junit.AfterClass) MediumTests(org.apache.hadoop.hbase.testclassification.MediumTests) Parameter(org.junit.runners.Parameterized.Parameter) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) BlockingQueue(java.util.concurrent.BlockingQueue) Test(org.junit.Test) Category(org.junit.experimental.categories.Category) UncheckedIOException(java.io.UncheckedIOException) ExecutionException(java.util.concurrent.ExecutionException) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicLong(java.util.concurrent.atomic.AtomicLong) IOUtils(org.apache.commons.io.IOUtils) List(java.util.List) Assert.assertNull(org.junit.Assert.assertNull) Rule(org.junit.Rule) HBaseTestingUtility(org.apache.hadoop.hbase.HBaseTestingUtility) Assert.assertFalse(org.junit.Assert.assertFalse) ForkJoinPool(java.util.concurrent.ForkJoinPool) Assert.assertEquals(org.junit.Assert.assertEquals) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicLong(java.util.concurrent.atomic.AtomicLong) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 98 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hive by apache.

the class TestIOContextMap method testSparkThreadLocal.

@Test
public void testSparkThreadLocal() throws Exception {
    // Test that input name does not change IOContext returned, and that each thread gets its own.
    final Configuration conf1 = new Configuration();
    conf1.set(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname, "spark");
    final Configuration conf2 = new Configuration(conf1);
    conf2.set(Utilities.INPUT_NAME, "Other input");
    final int THREAD_COUNT = 2;
    ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);
    final CountDownLatch cdlIn = new CountDownLatch(THREAD_COUNT), cdlOut = new CountDownLatch(1);
    @SuppressWarnings("unchecked") FutureTask<IOContext>[] tasks = new FutureTask[THREAD_COUNT];
    for (int i = 0; i < tasks.length; ++i) {
        tasks[i] = new FutureTask<IOContext>(new Callable<IOContext>() {

            public IOContext call() throws Exception {
                syncThreadStart(cdlIn, cdlOut);
                IOContext c1 = IOContextMap.get(conf1), c2 = IOContextMap.get(conf2);
                assertSame(c1, c2);
                return c1;
            }
        });
        executor.execute(tasks[i]);
    }
    // Wait for all threads to be ready.
    cdlIn.await();
    // Release them at the same time.
    cdlOut.countDown();
    Set<IOContext> results = Sets.newIdentityHashSet();
    for (int i = 0; i < tasks.length; ++i) {
        // All the objects must be different.
        assertTrue(results.add(tasks[i].get()));
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) FutureTask(java.util.concurrent.FutureTask) ExecutorService(java.util.concurrent.ExecutorService) CountDownLatch(java.util.concurrent.CountDownLatch) Callable(java.util.concurrent.Callable) Test(org.junit.Test)

Example 99 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project hive by apache.

the class TestIOContextMap method testMRTezGlobalMap.

@Test
public void testMRTezGlobalMap() throws Exception {
    // Tests concurrent modification, and that results are the same per input across threads
    // but different between inputs.
    final int THREAD_COUNT = 2, ITER_COUNT = 1000;
    final AtomicInteger countdown = new AtomicInteger(ITER_COUNT);
    final CountDownLatch phase1End = new CountDownLatch(THREAD_COUNT);
    final IOContext[] results = new IOContext[ITER_COUNT];
    ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);
    final CountDownLatch cdlIn = new CountDownLatch(THREAD_COUNT), cdlOut = new CountDownLatch(1);
    @SuppressWarnings("unchecked") FutureTask<Void>[] tasks = new FutureTask[THREAD_COUNT];
    for (int i = 0; i < tasks.length; ++i) {
        tasks[i] = new FutureTask<Void>(new Callable<Void>() {

            public Void call() throws Exception {
                Configuration conf = new Configuration();
                syncThreadStart(cdlIn, cdlOut);
                // Phase 1 - create objects.
                while (true) {
                    int nextIx = countdown.decrementAndGet();
                    if (nextIx < 0)
                        break;
                    conf.set(Utilities.INPUT_NAME, "Input " + nextIx);
                    results[nextIx] = IOContextMap.get(conf);
                    if (nextIx == 0)
                        break;
                }
                phase1End.countDown();
                phase1End.await();
                // Phase 2 - verify we get the expected objects created by all threads.
                for (int i = 0; i < ITER_COUNT; ++i) {
                    conf.set(Utilities.INPUT_NAME, "Input " + i);
                    IOContext ctx = IOContextMap.get(conf);
                    assertSame(results[i], ctx);
                }
                return null;
            }
        });
        executor.execute(tasks[i]);
    }
    // Wait for all threads to be ready.
    cdlIn.await();
    // Release them at the same time.
    cdlOut.countDown();
    for (int i = 0; i < tasks.length; ++i) {
        tasks[i].get();
    }
    Set<IOContext> resultSet = Sets.newIdentityHashSet();
    for (int i = 0; i < results.length; ++i) {
        // All the objects must be different.
        assertTrue(resultSet.add(results[i]));
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) CountDownLatch(java.util.concurrent.CountDownLatch) Callable(java.util.concurrent.Callable) FutureTask(java.util.concurrent.FutureTask) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) Test(org.junit.Test)

Example 100 with CountDownLatch

use of java.util.concurrent.CountDownLatch in project kafka by apache.

the class KStreamAggregationIntegrationTest method shouldCountSessionWindows.

@Test
public void shouldCountSessionWindows() throws Exception {
    final long sessionGap = 5 * 60 * 1000L;
    final long maintainMillis = sessionGap * 3;
    final long t1 = mockTime.milliseconds() - TimeUnit.MILLISECONDS.convert(1, TimeUnit.HOURS);
    final List<KeyValue<String, String>> t1Messages = Arrays.asList(new KeyValue<>("bob", "start"), new KeyValue<>("penny", "start"), new KeyValue<>("jo", "pause"), new KeyValue<>("emily", "pause"));
    IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(userSessionsStream, t1Messages, TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), t1);
    final long t2 = t1 + (sessionGap / 2);
    IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(userSessionsStream, Collections.singletonList(new KeyValue<>("emily", "resume")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), t2);
    final long t3 = t1 + sessionGap + 1;
    IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(userSessionsStream, Arrays.asList(new KeyValue<>("bob", "pause"), new KeyValue<>("penny", "stop")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), t3);
    final long t4 = t3 + (sessionGap / 2);
    IntegrationTestUtils.produceKeyValuesSynchronouslyWithTimestamp(userSessionsStream, Arrays.asList(// bobs session continues
    new KeyValue<>("bob", "resume"), // jo's starts new session
    new KeyValue<>("jo", "resume")), TestUtils.producerConfig(CLUSTER.bootstrapServers(), StringSerializer.class, StringSerializer.class, new Properties()), t4);
    final Map<Windowed<String>, Long> results = new HashMap<>();
    final CountDownLatch latch = new CountDownLatch(11);
    builder.stream(Serdes.String(), Serdes.String(), userSessionsStream).groupByKey(Serdes.String(), Serdes.String()).count(SessionWindows.with(sessionGap).until(maintainMillis), "UserSessionsStore").toStream().foreach(new ForeachAction<Windowed<String>, Long>() {

        @Override
        public void apply(final Windowed<String> key, final Long value) {
            results.put(key, value);
            latch.countDown();
        }
    });
    startStreams();
    latch.await(30, TimeUnit.SECONDS);
    assertThat(results.get(new Windowed<>("bob", new SessionWindow(t1, t1))), equalTo(1L));
    assertThat(results.get(new Windowed<>("penny", new SessionWindow(t1, t1))), equalTo(1L));
    assertThat(results.get(new Windowed<>("jo", new SessionWindow(t1, t1))), equalTo(1L));
    assertThat(results.get(new Windowed<>("jo", new SessionWindow(t4, t4))), equalTo(1L));
    assertThat(results.get(new Windowed<>("emily", new SessionWindow(t1, t2))), equalTo(2L));
    assertThat(results.get(new Windowed<>("bob", new SessionWindow(t3, t4))), equalTo(2L));
    assertThat(results.get(new Windowed<>("penny", new SessionWindow(t3, t3))), equalTo(1L));
}
Also used : KeyValue(org.apache.kafka.streams.KeyValue) HashMap(java.util.HashMap) Properties(java.util.Properties) CountDownLatch(java.util.concurrent.CountDownLatch) Windowed(org.apache.kafka.streams.kstream.Windowed) SessionWindow(org.apache.kafka.streams.kstream.internals.SessionWindow) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Test(org.junit.Test)

Aggregations

CountDownLatch (java.util.concurrent.CountDownLatch)5355 Test (org.junit.Test)2594 IOException (java.io.IOException)631 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)550 AtomicReference (java.util.concurrent.atomic.AtomicReference)501 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)475 ArrayList (java.util.ArrayList)471 QuickTest (com.hazelcast.test.annotation.QuickTest)375 ParallelTest (com.hazelcast.test.annotation.ParallelTest)355 ExecutorService (java.util.concurrent.ExecutorService)322 Test (org.testng.annotations.Test)310 HazelcastInstance (com.hazelcast.core.HazelcastInstance)251 List (java.util.List)212 HashMap (java.util.HashMap)207 HttpServletResponse (javax.servlet.http.HttpServletResponse)207 ExecutionException (java.util.concurrent.ExecutionException)203 HttpServletRequest (javax.servlet.http.HttpServletRequest)189 Ignite (org.apache.ignite.Ignite)188 ServletException (javax.servlet.ServletException)183 TimeoutException (java.util.concurrent.TimeoutException)168