Search in sources :

Example 6 with ConcurrentSkipListSet

use of java.util.concurrent.ConcurrentSkipListSet in project ignite by apache.

the class GridCacheContinuousQueryMultiNodesFilteringTest method testWithNodeFilter.

/**
     * @throws Exception If failed.
     */
public void testWithNodeFilter() throws Exception {
    List<QueryCursor> qryCursors = new ArrayList<>();
    final int nodesCnt = 3;
    startGridsMultiThreaded(nodesCnt);
    awaitPartitionMapExchange();
    CacheConfiguration ccfg = cacheConfiguration(new NodeFilterByRegexp(".*(0|1)$"));
    grid(0).createCache(ccfg);
    final AtomicInteger cntr = new AtomicInteger();
    final ConcurrentMap<ClusterNode, Set<Integer>> maps = new ConcurrentHashMap<>();
    final AtomicBoolean doubleNtfFail = new AtomicBoolean(false);
    CacheEntryUpdatedListener<Integer, Integer> lsnr = new CacheEntryUpdatedListener<Integer, Integer>() {

        @Override
        public void onUpdated(Iterable<CacheEntryEvent<? extends Integer, ? extends Integer>> evts) throws CacheEntryListenerException {
            for (CacheEntryEvent<? extends Integer, ? extends Integer> e : evts) {
                cntr.incrementAndGet();
                ClusterNode node = ((Ignite) e.getSource().unwrap(Ignite.class)).cluster().localNode();
                Set<Integer> set = maps.get(node);
                if (set == null) {
                    set = new ConcurrentSkipListSet<>();
                    Set<Integer> oldVal = maps.putIfAbsent(node, set);
                    set = oldVal != null ? oldVal : set;
                }
                if (!set.add(e.getValue()))
                    doubleNtfFail.set(false);
            }
        }
    };
    for (int i = 0; i < nodesCnt; i++) {
        ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
        qry.setLocalListener(lsnr);
        Ignite ignite = grid(i);
        log.info("Try to start CQ on node: " + ignite.cluster().localNode().id());
        qryCursors.add(ignite.cache(ccfg.getName()).query(qry));
        log.info("CQ started on node: " + ignite.cluster().localNode().id());
    }
    client = true;
    startGrid(nodesCnt);
    awaitPartitionMapExchange();
    ContinuousQuery<Integer, Integer> qry = new ContinuousQuery<>();
    qry.setLocalListener(lsnr);
    qryCursors.add(grid(nodesCnt).cache(ccfg.getName()).query(qry));
    for (int i = 0; i <= nodesCnt; i++) {
        for (int key = 0; key < KEYS; key++) {
            int val = (i * KEYS) + key;
            grid(i).cache(ccfg.getName()).put(val, val);
        }
    }
    assertTrue(GridTestUtils.waitForCondition(new PA() {

        @Override
        public boolean apply() {
            return cntr.get() >= 2 * (nodesCnt + 1) * KEYS;
        }
    }, 5000L));
    assertFalse("Got duplicate", doubleNtfFail.get());
    for (int i = 0; i < (nodesCnt + 1) * KEYS; i++) {
        for (Map.Entry<ClusterNode, Set<Integer>> e : maps.entrySet()) assertTrue("Lost event on node: " + e.getKey().id() + ", event: " + i, e.getValue().remove(i));
    }
    for (Map.Entry<ClusterNode, Set<Integer>> e : maps.entrySet()) assertTrue("Unexpected event on node: " + e.getKey(), e.getValue().isEmpty());
    assertEquals("Not expected count of CQ", nodesCnt + 1, qryCursors.size());
    for (QueryCursor cur : qryCursors) cur.close();
}
Also used : Set(java.util.Set) ConcurrentSkipListSet(java.util.concurrent.ConcurrentSkipListSet) ArrayList(java.util.ArrayList) ContinuousQuery(org.apache.ignite.cache.query.ContinuousQuery) CacheEntryUpdatedListener(javax.cache.event.CacheEntryUpdatedListener) Ignite(org.apache.ignite.Ignite) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) QueryCursor(org.apache.ignite.cache.query.QueryCursor) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) ClusterNode(org.apache.ignite.cluster.ClusterNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) PA(org.apache.ignite.internal.util.typedef.PA) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ConcurrentMap(java.util.concurrent.ConcurrentMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 7 with ConcurrentSkipListSet

use of java.util.concurrent.ConcurrentSkipListSet in project cryptomator by cryptomator.

the class SingleInstanceManagerTest method testALotOfMessages.

@Test(timeout = 60000)
public void testALotOfMessages() throws Exception {
    final int connectors = 256;
    final int messagesPerConnector = 256;
    ExecutorService exec = Executors.newSingleThreadExecutor();
    ExecutorService exec2 = Executors.newFixedThreadPool(16);
    try (final LocalInstance server = SingleInstanceManager.startLocalInstance(appKey, exec)) {
        Set<String> sentMessages = new ConcurrentSkipListSet<>();
        Set<String> receivedMessages = new HashSet<>();
        CountDownLatch sendLatch = new CountDownLatch(connectors);
        CountDownLatch receiveLatch = new CountDownLatch(connectors * messagesPerConnector);
        server.registerListener(message -> {
            receivedMessages.add(message);
            receiveLatch.countDown();
        });
        Set<RemoteInstance> instances = Collections.synchronizedSet(new HashSet<>());
        for (int i = 0; i < connectors; i++) {
            exec2.submit(() -> {
                try {
                    final Optional<RemoteInstance> r = SingleInstanceManager.getRemoteInstance(appKey);
                    assertTrue(r.isPresent());
                    instances.add(r.get());
                    for (int j = 0; j < messagesPerConnector; j++) {
                        exec2.submit(() -> {
                            try {
                                for (; ; ) {
                                    final String message = UUID.randomUUID().toString();
                                    if (!sentMessages.add(message)) {
                                        continue;
                                    }
                                    r.get().sendMessage(message, 1000);
                                    break;
                                }
                            } catch (Exception e) {
                                e.printStackTrace();
                            }
                        });
                    }
                    sendLatch.countDown();
                } catch (Throwable e) {
                    e.printStackTrace();
                }
            });
        }
        assertTrue(sendLatch.await(1, TimeUnit.MINUTES));
        exec2.shutdown();
        assertTrue(exec2.awaitTermination(1, TimeUnit.MINUTES));
        assertTrue(receiveLatch.await(1, TimeUnit.MINUTES));
        assertEquals(sentMessages, receivedMessages);
        for (RemoteInstance remoteInstance : instances) {
            try {
                remoteInstance.close();
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    } finally {
        exec.shutdownNow();
        exec2.shutdownNow();
    }
}
Also used : LocalInstance(org.cryptomator.ui.util.SingleInstanceManager.LocalInstance) ConcurrentSkipListSet(java.util.concurrent.ConcurrentSkipListSet) CountDownLatch(java.util.concurrent.CountDownLatch) ExecutorService(java.util.concurrent.ExecutorService) RemoteInstance(org.cryptomator.ui.util.SingleInstanceManager.RemoteInstance) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 8 with ConcurrentSkipListSet

use of java.util.concurrent.ConcurrentSkipListSet in project druid by druid-io.

the class RocketMQFirehoseFactory method connect.

@Override
public Firehose connect(ByteBufferInputRowParser byteBufferInputRowParser) throws IOException, ParseException {
    Set<String> newDimExclus = Sets.union(byteBufferInputRowParser.getParseSpec().getDimensionsSpec().getDimensionExclusions(), Sets.newHashSet("feed"));
    final ByteBufferInputRowParser theParser = byteBufferInputRowParser.withParseSpec(byteBufferInputRowParser.getParseSpec().withDimensionsSpec(byteBufferInputRowParser.getParseSpec().getDimensionsSpec().withDimensionExclusions(newDimExclus)));
    /**
     * Topic-Queue mapping.
     */
    final ConcurrentHashMap<String, Set<MessageQueue>> topicQueueMap;
    /**
     * Default Pull-style client for RocketMQ.
     */
    final DefaultMQPullConsumer defaultMQPullConsumer;
    final DruidPullMessageService pullMessageService;
    messageQueueTreeSetMap.clear();
    windows.clear();
    try {
        defaultMQPullConsumer = new DefaultMQPullConsumer(this.consumerGroup);
        defaultMQPullConsumer.setMessageModel(MessageModel.CLUSTERING);
        topicQueueMap = new ConcurrentHashMap<>();
        pullMessageService = new DruidPullMessageService(defaultMQPullConsumer);
        for (String topic : feed) {
            Validators.checkTopic(topic);
            topicQueueMap.put(topic, defaultMQPullConsumer.fetchSubscribeMessageQueues(topic));
        }
        DruidMessageQueueListener druidMessageQueueListener = new DruidMessageQueueListener(Sets.newHashSet(feed), topicQueueMap, defaultMQPullConsumer);
        defaultMQPullConsumer.setMessageQueueListener(druidMessageQueueListener);
        defaultMQPullConsumer.start();
        pullMessageService.start();
    } catch (MQClientException e) {
        LOGGER.error("Failed to start DefaultMQPullConsumer", e);
        throw new IOException("Failed to start RocketMQ client", e);
    }
    return new Firehose() {

        @Override
        public boolean hasMore() {
            boolean hasMore = false;
            DruidPullRequest earliestPullRequest = null;
            for (Map.Entry<String, Set<MessageQueue>> entry : topicQueueMap.entrySet()) {
                for (MessageQueue messageQueue : entry.getValue()) {
                    if (JavaCompatUtils.keySet(messageQueueTreeSetMap).contains(messageQueue) && !messageQueueTreeSetMap.get(messageQueue).isEmpty()) {
                        hasMore = true;
                    } else {
                        try {
                            long offset = defaultMQPullConsumer.fetchConsumeOffset(messageQueue, false);
                            int batchSize = (null == pullBatchSize || pullBatchSize.isEmpty()) ? DEFAULT_PULL_BATCH_SIZE : Integer.parseInt(pullBatchSize);
                            DruidPullRequest newPullRequest = new DruidPullRequest(messageQueue, null, offset, batchSize, !hasMessagesPending());
                            // notify pull message service to pull messages from brokers.
                            pullMessageService.putRequest(newPullRequest);
                            // set the earliest pull in case we need to block.
                            if (null == earliestPullRequest) {
                                earliestPullRequest = newPullRequest;
                            }
                        } catch (MQClientException e) {
                            LOGGER.error("Failed to fetch consume offset for queue: {}", entry.getKey());
                        }
                    }
                }
            }
            // Block only when there is no locally pending messages.
            if (!hasMore && null != earliestPullRequest) {
                try {
                    earliestPullRequest.getCountDownLatch().await();
                    hasMore = true;
                } catch (InterruptedException e) {
                    LOGGER.error("CountDownLatch await got interrupted", e);
                }
            }
            return hasMore;
        }

        @Override
        public InputRow nextRow() {
            for (Map.Entry<MessageQueue, ConcurrentSkipListSet<MessageExt>> entry : messageQueueTreeSetMap.entrySet()) {
                if (!entry.getValue().isEmpty()) {
                    MessageExt message = entry.getValue().pollFirst();
                    InputRow inputRow = theParser.parse(ByteBuffer.wrap(message.getBody()));
                    if (!JavaCompatUtils.keySet(windows).contains(entry.getKey())) {
                        windows.put(entry.getKey(), new ConcurrentSkipListSet<Long>());
                    }
                    windows.get(entry.getKey()).add(message.getQueueOffset());
                    return inputRow;
                }
            }
            // should never happen.
            throw new RuntimeException("Unexpected Fatal Error! There should have been one row available.");
        }

        @Override
        public Runnable commit() {
            return new Runnable() {

                @Override
                public void run() {
                    OffsetStore offsetStore = defaultMQPullConsumer.getOffsetStore();
                    Set<MessageQueue> updated = new HashSet<>();
                    // calculate offsets according to consuming windows.
                    for (ConcurrentHashMap.Entry<MessageQueue, ConcurrentSkipListSet<Long>> entry : windows.entrySet()) {
                        while (!entry.getValue().isEmpty()) {
                            long offset = offsetStore.readOffset(entry.getKey(), ReadOffsetType.MEMORY_FIRST_THEN_STORE);
                            if (offset + 1 > entry.getValue().first()) {
                                entry.getValue().pollFirst();
                            } else if (offset + 1 == entry.getValue().first()) {
                                entry.getValue().pollFirst();
                                offsetStore.updateOffset(entry.getKey(), offset + 1, true);
                                updated.add(entry.getKey());
                            } else {
                                break;
                            }
                        }
                    }
                    offsetStore.persistAll(updated);
                }
            };
        }

        @Override
        public void close() throws IOException {
            defaultMQPullConsumer.shutdown();
            pullMessageService.shutdown(false);
        }
    };
}
Also used : HashSet(java.util.HashSet) Set(java.util.Set) ConcurrentSkipListSet(java.util.concurrent.ConcurrentSkipListSet) DefaultMQPullConsumer(com.alibaba.rocketmq.client.consumer.DefaultMQPullConsumer) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) MQClientException(com.alibaba.rocketmq.client.exception.MQClientException) HashSet(java.util.HashSet) ConcurrentSkipListSet(java.util.concurrent.ConcurrentSkipListSet) Firehose(io.druid.data.input.Firehose) IOException(java.io.IOException) ByteBufferInputRowParser(io.druid.data.input.ByteBufferInputRowParser) MessageExt(com.alibaba.rocketmq.common.message.MessageExt) MessageQueue(com.alibaba.rocketmq.common.message.MessageQueue) InputRow(io.druid.data.input.InputRow) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) OffsetStore(com.alibaba.rocketmq.client.consumer.store.OffsetStore)

Example 9 with ConcurrentSkipListSet

use of java.util.concurrent.ConcurrentSkipListSet in project mapdb by jankotek.

the class ConcurrentSkipListSubSetTest method dset5.

/**
     * Returns a new set of first 5 negative ints.
     */
private NavigableSet dset5() {
    ConcurrentSkipListSet q = new ConcurrentSkipListSet();
    assertTrue(q.isEmpty());
    q.add(m1);
    q.add(m2);
    q.add(m3);
    q.add(m4);
    q.add(m5);
    NavigableSet s = q.descendingSet();
    assertEquals(5, s.size());
    return s;
}
Also used : NavigableSet(java.util.NavigableSet) ConcurrentSkipListSet(java.util.concurrent.ConcurrentSkipListSet)

Example 10 with ConcurrentSkipListSet

use of java.util.concurrent.ConcurrentSkipListSet in project mapdb by jankotek.

the class ConcurrentSkipListSubSetTest method dset0.

private static NavigableSet dset0() {
    ConcurrentSkipListSet set = new ConcurrentSkipListSet();
    assertTrue(set.isEmpty());
    return set;
}
Also used : ConcurrentSkipListSet(java.util.concurrent.ConcurrentSkipListSet)

Aggregations

ConcurrentSkipListSet (java.util.concurrent.ConcurrentSkipListSet)17 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)4 ArrayList (java.util.ArrayList)3 Test (org.junit.Test)3 ByteBuffer (java.nio.ByteBuffer)2 HashSet (java.util.HashSet)2 Map (java.util.Map)2 NavigableSet (java.util.NavigableSet)2 Set (java.util.Set)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 ExecutorService (java.util.concurrent.ExecutorService)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 Ignite (org.apache.ignite.Ignite)2 DefaultMQPullConsumer (com.alibaba.rocketmq.client.consumer.DefaultMQPullConsumer)1 OffsetStore (com.alibaba.rocketmq.client.consumer.store.OffsetStore)1 MQClientException (com.alibaba.rocketmq.client.exception.MQClientException)1 MessageExt (com.alibaba.rocketmq.common.message.MessageExt)1 MessageQueue (com.alibaba.rocketmq.common.message.MessageQueue)1 LongObjectHashMap (com.carrotsearch.hppc.LongObjectHashMap)1