use of java.util.concurrent.ConcurrentLinkedDeque in project mapdb by jankotek.
the class ConcurrentLinkedDequeTest method testToArray.
/**
* toArray() contains all elements in FIFO order
*/
public void testToArray() {
ConcurrentLinkedDeque q = populatedDeque(SIZE);
Object[] o = q.toArray();
for (int i = 0; i < o.length; i++) assertSame(o[i], q.poll());
}
use of java.util.concurrent.ConcurrentLinkedDeque in project mapdb by jankotek.
the class ConcurrentLinkedDequeTest method testRemoveFirst.
/**
* removeFirst() removes first element, or throws NSEE if empty
*/
public void testRemoveFirst() {
ConcurrentLinkedDeque q = populatedDeque(SIZE);
for (int i = 0; i < SIZE; ++i) {
assertEquals(i, q.removeFirst());
}
try {
q.removeFirst();
shouldThrow();
} catch (NoSuchElementException success) {
}
assertNull(q.peekFirst());
}
use of java.util.concurrent.ConcurrentLinkedDeque in project bookkeeper by apache.
the class LedgerMetadataCreationTest method testExecution.
public void testExecution(boolean randomLedgerId) throws Exception {
Set<Long> createRequestsLedgerIds = ConcurrentHashMap.newKeySet();
ConcurrentLinkedDeque<Long> existingLedgerIds = new ConcurrentLinkedDeque<Long>();
Vector<Long> failedCreates = new Vector<Long>();
Vector<Long> failedDeletes = new Vector<Long>();
BookKeeper bookKeeper = new BookKeeper(baseClientConf);
ExecutorService executor = Executors.newFixedThreadPool(300);
Random rand = new Random();
int numberOfOperations = 20000;
for (int i = 0; i < numberOfOperations; i++) {
int iteration = i;
if (rand.nextBoolean() || existingLedgerIds.isEmpty()) {
executor.submit(() -> {
long ledgerId = -1;
try {
if (randomLedgerId) {
do {
ledgerId = Math.abs(rand.nextLong());
if (!baseClientConf.getLedgerManagerFactoryClass().equals(LongHierarchicalLedgerManagerFactory.class)) {
/*
* since LongHierarchicalLedgerManager
* supports ledgerIds of decimal length upto
* 19 digits but other LedgerManagers only
* upto 10 decimals
*/
ledgerId %= 9999999999L;
}
} while (!createRequestsLedgerIds.add(ledgerId));
} else {
ledgerId = iteration;
}
bookKeeper.createLedgerAdv(ledgerId, 3, 2, 2, DigestType.CRC32, "passwd".getBytes(), null);
existingLedgerIds.add(ledgerId);
} catch (Exception e) {
LOG.error("Got Exception while creating Ledger with ledgerId " + ledgerId, e);
failedCreates.add(ledgerId);
}
});
} else {
executor.submit(() -> {
Long ledgerId = null;
if (rand.nextBoolean()) {
ledgerId = existingLedgerIds.pollFirst();
} else {
ledgerId = existingLedgerIds.pollLast();
}
if (ledgerId == null) {
return;
}
try {
bookKeeper.deleteLedger(ledgerId);
} catch (Exception e) {
LOG.error("Got Exception while deleting Ledger with ledgerId " + ledgerId, e);
failedDeletes.add(ledgerId);
}
});
}
}
executor.shutdown();
assertTrue("All the ledger create/delete operations should have'been completed", executor.awaitTermination(120, TimeUnit.SECONDS));
assertTrue("There should be no failed creates. But there are " + failedCreates.size() + " failedCreates", failedCreates.isEmpty());
assertTrue("There should be no failed deletes. But there are " + failedDeletes.size() + " failedDeletes", failedDeletes.isEmpty());
bookKeeper.close();
}
use of java.util.concurrent.ConcurrentLinkedDeque in project jackrabbit-oak by apache.
the class MetricStatisticsProviderTest method concurrentAccess.
@Test
public void concurrentAccess() throws Exception {
// Queue is used to collect instances with minimal overhead in concurrent scenario
final Queue<MeterStats> statsQueue = new ConcurrentLinkedDeque<MeterStats>();
List<Thread> threads = Lists.newArrayList();
int numWorker = 5;
final CountDownLatch latch = new CountDownLatch(1);
for (int i = 0; i < numWorker; i++) {
threads.add(new Thread(new Runnable() {
@Override
public void run() {
Uninterruptibles.awaitUninterruptibly(latch);
statsQueue.add(statsProvider.getMeter("foo", StatsOptions.DEFAULT));
}
}));
}
for (Thread t : threads) {
t.start();
}
latch.countDown();
for (Thread t : threads) {
t.join();
}
// Assert that we get same reference for every call
Set<MeterStats> statsSet = Sets.newIdentityHashSet();
for (MeterStats m : statsQueue) {
statsSet.add(m);
}
assertEquals(1, statsSet.size());
}
use of java.util.concurrent.ConcurrentLinkedDeque in project cloudbreak by hortonworks.
the class ClusterRepairFlowEventChainFactory method createFlowTriggerEventQueue.
@Override
public Queue<Selectable> createFlowTriggerEventQueue(ClusterRepairTriggerEvent event) {
StackView stackView = stackService.getByIdView(event.getStackId());
Queue<Selectable> flowChainTriggers = new ConcurrentLinkedDeque<>();
Map<String, List<String>> failedNodesMap = event.getFailedNodesMap();
for (Entry<String, List<String>> failedNodes : failedNodesMap.entrySet()) {
String hostGroupName = failedNodes.getKey();
List<String> hostNames = failedNodes.getValue();
HostGroup hostGroup = hostGroupService.getByClusterIdAndName(stackView.getClusterView().getId(), hostGroupName);
InstanceGroup instanceGroup = hostGroup.getConstraint().getInstanceGroup();
if (InstanceGroupType.GATEWAY.equals(instanceGroup.getInstanceGroupType())) {
List<InstanceMetaData> primary = instanceMetadataRepository.findAllByInstanceGroup(instanceGroup).stream().filter(imd -> hostNames.contains(imd.getDiscoveryFQDN()) && imd.getInstanceMetadataType() == InstanceMetadataType.GATEWAY_PRIMARY).collect(Collectors.toList());
if (!primary.isEmpty()) {
flowChainTriggers.add(new ChangePrimaryGatewayTriggerEvent(ChangePrimaryGatewayEvent.CHANGE_PRIMARY_GATEWAY_TRIGGER_EVENT.event(), event.getStackId(), event.accepted()));
}
}
flowChainTriggers.add(new ClusterAndStackDownscaleTriggerEvent(FlowChainTriggers.FULL_DOWNSCALE_TRIGGER_EVENT, event.getStackId(), hostGroupName, new HashSet<>(hostNames), ScalingType.DOWNSCALE_TOGETHER, event.accepted()));
if (!event.isRemoveOnly()) {
flowChainTriggers.add(new StackAndClusterUpscaleTriggerEvent(FlowChainTriggers.FULL_UPSCALE_TRIGGER_EVENT, event.getStackId(), hostGroupName, hostNames.size(), ScalingType.UPSCALE_TOGETHER, Sets.newHashSet(hostNames)));
// we need to update all ephemeral clusters that are connected to a datalake
if (InstanceGroupType.GATEWAY.equals(instanceGroup.getInstanceGroupType()) && !stackService.findClustersConnectedToDatalake(event.getStackId()).isEmpty()) {
flowChainTriggers.add(new EphemeralClustersUpgradeTriggerEvent(FlowChainTriggers.EPHEMERAL_CLUSTERS_UPDATE_TRIGGER_EVENT, event.getStackId(), event.accepted()));
}
}
}
return flowChainTriggers;
}
Aggregations