Search in sources :

Example 61 with ConcurrentLinkedDeque

use of java.util.concurrent.ConcurrentLinkedDeque in project mapdb by jankotek.

the class ConcurrentLinkedDequeTest method testToArray.

/**
 * toArray() contains all elements in FIFO order
 */
public void testToArray() {
    ConcurrentLinkedDeque q = populatedDeque(SIZE);
    Object[] o = q.toArray();
    for (int i = 0; i < o.length; i++) assertSame(o[i], q.poll());
}
Also used : ConcurrentLinkedDeque(java.util.concurrent.ConcurrentLinkedDeque)

Example 62 with ConcurrentLinkedDeque

use of java.util.concurrent.ConcurrentLinkedDeque in project mapdb by jankotek.

the class ConcurrentLinkedDequeTest method testRemoveFirst.

/**
 * removeFirst() removes first element, or throws NSEE if empty
 */
public void testRemoveFirst() {
    ConcurrentLinkedDeque q = populatedDeque(SIZE);
    for (int i = 0; i < SIZE; ++i) {
        assertEquals(i, q.removeFirst());
    }
    try {
        q.removeFirst();
        shouldThrow();
    } catch (NoSuchElementException success) {
    }
    assertNull(q.peekFirst());
}
Also used : ConcurrentLinkedDeque(java.util.concurrent.ConcurrentLinkedDeque)

Example 63 with ConcurrentLinkedDeque

use of java.util.concurrent.ConcurrentLinkedDeque in project bookkeeper by apache.

the class LedgerMetadataCreationTest method testExecution.

public void testExecution(boolean randomLedgerId) throws Exception {
    Set<Long> createRequestsLedgerIds = ConcurrentHashMap.newKeySet();
    ConcurrentLinkedDeque<Long> existingLedgerIds = new ConcurrentLinkedDeque<Long>();
    Vector<Long> failedCreates = new Vector<Long>();
    Vector<Long> failedDeletes = new Vector<Long>();
    BookKeeper bookKeeper = new BookKeeper(baseClientConf);
    ExecutorService executor = Executors.newFixedThreadPool(300);
    Random rand = new Random();
    int numberOfOperations = 20000;
    for (int i = 0; i < numberOfOperations; i++) {
        int iteration = i;
        if (rand.nextBoolean() || existingLedgerIds.isEmpty()) {
            executor.submit(() -> {
                long ledgerId = -1;
                try {
                    if (randomLedgerId) {
                        do {
                            ledgerId = Math.abs(rand.nextLong());
                            if (!baseClientConf.getLedgerManagerFactoryClass().equals(LongHierarchicalLedgerManagerFactory.class)) {
                                /*
                                     * since LongHierarchicalLedgerManager
                                     * supports ledgerIds of decimal length upto
                                     * 19 digits but other LedgerManagers only
                                     * upto 10 decimals
                                     */
                                ledgerId %= 9999999999L;
                            }
                        } while (!createRequestsLedgerIds.add(ledgerId));
                    } else {
                        ledgerId = iteration;
                    }
                    bookKeeper.createLedgerAdv(ledgerId, 3, 2, 2, DigestType.CRC32, "passwd".getBytes(), null);
                    existingLedgerIds.add(ledgerId);
                } catch (Exception e) {
                    LOG.error("Got Exception while creating Ledger with ledgerId " + ledgerId, e);
                    failedCreates.add(ledgerId);
                }
            });
        } else {
            executor.submit(() -> {
                Long ledgerId = null;
                if (rand.nextBoolean()) {
                    ledgerId = existingLedgerIds.pollFirst();
                } else {
                    ledgerId = existingLedgerIds.pollLast();
                }
                if (ledgerId == null) {
                    return;
                }
                try {
                    bookKeeper.deleteLedger(ledgerId);
                } catch (Exception e) {
                    LOG.error("Got Exception while deleting Ledger with ledgerId " + ledgerId, e);
                    failedDeletes.add(ledgerId);
                }
            });
        }
    }
    executor.shutdown();
    assertTrue("All the ledger create/delete operations should have'been completed", executor.awaitTermination(120, TimeUnit.SECONDS));
    assertTrue("There should be no failed creates. But there are " + failedCreates.size() + " failedCreates", failedCreates.isEmpty());
    assertTrue("There should be no failed deletes. But there are " + failedDeletes.size() + " failedDeletes", failedDeletes.isEmpty());
    bookKeeper.close();
}
Also used : BookKeeper(org.apache.bookkeeper.client.BookKeeper) ConcurrentLinkedDeque(java.util.concurrent.ConcurrentLinkedDeque) Random(java.util.Random) ExecutorService(java.util.concurrent.ExecutorService) Vector(java.util.Vector)

Example 64 with ConcurrentLinkedDeque

use of java.util.concurrent.ConcurrentLinkedDeque in project jackrabbit-oak by apache.

the class MetricStatisticsProviderTest method concurrentAccess.

@Test
public void concurrentAccess() throws Exception {
    // Queue is used to collect instances with minimal overhead in concurrent scenario
    final Queue<MeterStats> statsQueue = new ConcurrentLinkedDeque<MeterStats>();
    List<Thread> threads = Lists.newArrayList();
    int numWorker = 5;
    final CountDownLatch latch = new CountDownLatch(1);
    for (int i = 0; i < numWorker; i++) {
        threads.add(new Thread(new Runnable() {

            @Override
            public void run() {
                Uninterruptibles.awaitUninterruptibly(latch);
                statsQueue.add(statsProvider.getMeter("foo", StatsOptions.DEFAULT));
            }
        }));
    }
    for (Thread t : threads) {
        t.start();
    }
    latch.countDown();
    for (Thread t : threads) {
        t.join();
    }
    // Assert that we get same reference for every call
    Set<MeterStats> statsSet = Sets.newIdentityHashSet();
    for (MeterStats m : statsQueue) {
        statsSet.add(m);
    }
    assertEquals(1, statsSet.size());
}
Also used : MeterStats(org.apache.jackrabbit.oak.stats.MeterStats) CountDownLatch(java.util.concurrent.CountDownLatch) ConcurrentLinkedDeque(java.util.concurrent.ConcurrentLinkedDeque) Test(org.junit.Test)

Example 65 with ConcurrentLinkedDeque

use of java.util.concurrent.ConcurrentLinkedDeque in project cloudbreak by hortonworks.

the class ClusterRepairFlowEventChainFactory method createFlowTriggerEventQueue.

@Override
public Queue<Selectable> createFlowTriggerEventQueue(ClusterRepairTriggerEvent event) {
    StackView stackView = stackService.getByIdView(event.getStackId());
    Queue<Selectable> flowChainTriggers = new ConcurrentLinkedDeque<>();
    Map<String, List<String>> failedNodesMap = event.getFailedNodesMap();
    for (Entry<String, List<String>> failedNodes : failedNodesMap.entrySet()) {
        String hostGroupName = failedNodes.getKey();
        List<String> hostNames = failedNodes.getValue();
        HostGroup hostGroup = hostGroupService.getByClusterIdAndName(stackView.getClusterView().getId(), hostGroupName);
        InstanceGroup instanceGroup = hostGroup.getConstraint().getInstanceGroup();
        if (InstanceGroupType.GATEWAY.equals(instanceGroup.getInstanceGroupType())) {
            List<InstanceMetaData> primary = instanceMetadataRepository.findAllByInstanceGroup(instanceGroup).stream().filter(imd -> hostNames.contains(imd.getDiscoveryFQDN()) && imd.getInstanceMetadataType() == InstanceMetadataType.GATEWAY_PRIMARY).collect(Collectors.toList());
            if (!primary.isEmpty()) {
                flowChainTriggers.add(new ChangePrimaryGatewayTriggerEvent(ChangePrimaryGatewayEvent.CHANGE_PRIMARY_GATEWAY_TRIGGER_EVENT.event(), event.getStackId(), event.accepted()));
            }
        }
        flowChainTriggers.add(new ClusterAndStackDownscaleTriggerEvent(FlowChainTriggers.FULL_DOWNSCALE_TRIGGER_EVENT, event.getStackId(), hostGroupName, new HashSet<>(hostNames), ScalingType.DOWNSCALE_TOGETHER, event.accepted()));
        if (!event.isRemoveOnly()) {
            flowChainTriggers.add(new StackAndClusterUpscaleTriggerEvent(FlowChainTriggers.FULL_UPSCALE_TRIGGER_EVENT, event.getStackId(), hostGroupName, hostNames.size(), ScalingType.UPSCALE_TOGETHER, Sets.newHashSet(hostNames)));
            // we need to update all ephemeral clusters that are connected to a datalake
            if (InstanceGroupType.GATEWAY.equals(instanceGroup.getInstanceGroupType()) && !stackService.findClustersConnectedToDatalake(event.getStackId()).isEmpty()) {
                flowChainTriggers.add(new EphemeralClustersUpgradeTriggerEvent(FlowChainTriggers.EPHEMERAL_CLUSTERS_UPDATE_TRIGGER_EVENT, event.getStackId(), event.accepted()));
            }
        }
    }
    return flowChainTriggers;
}
Also used : StackView(com.sequenceiq.cloudbreak.domain.view.StackView) StackAndClusterUpscaleTriggerEvent(com.sequenceiq.cloudbreak.core.flow2.event.StackAndClusterUpscaleTriggerEvent) InstanceMetaDataRepository(com.sequenceiq.cloudbreak.repository.InstanceMetaDataRepository) LoggerFactory(org.slf4j.LoggerFactory) HostGroupService(com.sequenceiq.cloudbreak.service.hostgroup.HostGroupService) ClusterRepairTriggerEvent(com.sequenceiq.cloudbreak.reactor.api.event.orchestration.ClusterRepairTriggerEvent) InstanceGroupType(com.sequenceiq.cloudbreak.api.model.InstanceGroupType) InstanceGroup(com.sequenceiq.cloudbreak.domain.InstanceGroup) HashSet(java.util.HashSet) Inject(javax.inject.Inject) Map(java.util.Map) ChangePrimaryGatewayTriggerEvent(com.sequenceiq.cloudbreak.reactor.api.event.orchestration.ChangePrimaryGatewayTriggerEvent) ChangePrimaryGatewayEvent(com.sequenceiq.cloudbreak.core.flow2.cluster.repair.ChangePrimaryGatewayEvent) HostGroup(com.sequenceiq.cloudbreak.domain.HostGroup) Logger(org.slf4j.Logger) InstanceMetaData(com.sequenceiq.cloudbreak.domain.InstanceMetaData) Selectable(com.sequenceiq.cloudbreak.cloud.event.Selectable) ClusterAndStackDownscaleTriggerEvent(com.sequenceiq.cloudbreak.core.flow2.event.ClusterAndStackDownscaleTriggerEvent) Collectors(java.util.stream.Collectors) ConcurrentLinkedDeque(java.util.concurrent.ConcurrentLinkedDeque) Sets(com.google.common.collect.Sets) InstanceMetadataType(com.sequenceiq.cloudbreak.api.model.InstanceMetadataType) List(java.util.List) Component(org.springframework.stereotype.Component) ScalingType(com.sequenceiq.cloudbreak.common.type.ScalingType) Entry(java.util.Map.Entry) EphemeralClustersUpgradeTriggerEvent(com.sequenceiq.cloudbreak.reactor.api.event.orchestration.EphemeralClustersUpgradeTriggerEvent) Queue(java.util.Queue) StackService(com.sequenceiq.cloudbreak.service.stack.StackService) ClusterAndStackDownscaleTriggerEvent(com.sequenceiq.cloudbreak.core.flow2.event.ClusterAndStackDownscaleTriggerEvent) HostGroup(com.sequenceiq.cloudbreak.domain.HostGroup) EphemeralClustersUpgradeTriggerEvent(com.sequenceiq.cloudbreak.reactor.api.event.orchestration.EphemeralClustersUpgradeTriggerEvent) ConcurrentLinkedDeque(java.util.concurrent.ConcurrentLinkedDeque) InstanceGroup(com.sequenceiq.cloudbreak.domain.InstanceGroup) InstanceMetaData(com.sequenceiq.cloudbreak.domain.InstanceMetaData) ChangePrimaryGatewayTriggerEvent(com.sequenceiq.cloudbreak.reactor.api.event.orchestration.ChangePrimaryGatewayTriggerEvent) StackAndClusterUpscaleTriggerEvent(com.sequenceiq.cloudbreak.core.flow2.event.StackAndClusterUpscaleTriggerEvent) Selectable(com.sequenceiq.cloudbreak.cloud.event.Selectable) List(java.util.List) StackView(com.sequenceiq.cloudbreak.domain.view.StackView) HashSet(java.util.HashSet)

Aggregations

ConcurrentLinkedDeque (java.util.concurrent.ConcurrentLinkedDeque)209 Test (org.junit.Test)21 NoSuchElementException (java.util.NoSuchElementException)16 Iterator (java.util.Iterator)14 Random (java.util.Random)14 CountDownLatch (java.util.concurrent.CountDownLatch)14 Deque (java.util.Deque)7 ExecutorService (java.util.concurrent.ExecutorService)7 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)6 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)6 ArrayList (java.util.ArrayList)5 Map (java.util.Map)5 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)5 IOException (java.io.IOException)4 List (java.util.List)4 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)4 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)4 BlockIdList (alluxio.grpc.BlockIdList)3 BlockStoreLocationProto (alluxio.grpc.BlockStoreLocationProto)3 LocationBlockIdListEntry (alluxio.grpc.LocationBlockIdListEntry)3