use of java.util.concurrent.ConcurrentLinkedDeque in project alluxio by Alluxio.
the class BlockMasterWorkerServiceHandlerTest method registerWithNoLeaseIsRejected.
@Test
public void registerWithNoLeaseIsRejected() {
long workerId = mBlockMaster.getWorkerId(NET_ADDRESS_1);
// Prepare LocationBlockIdListEntry objects
BlockStoreLocation loc = new BlockStoreLocation("MEM", 0);
BlockStoreLocationProto locationProto = BlockStoreLocationProto.newBuilder().setTierAlias(loc.tierAlias()).setMediumType(loc.mediumType()).build();
BlockIdList blockIdList1 = BlockIdList.newBuilder().addAllBlockId(ImmutableList.of(1L, 2L)).build();
LocationBlockIdListEntry listEntry1 = LocationBlockIdListEntry.newBuilder().setKey(locationProto).setValue(blockIdList1).build();
RegisterWorkerPRequest request = RegisterWorkerPRequest.newBuilder().setWorkerId(workerId).addStorageTiers("MEM").putTotalBytesOnTiers("MEM", 1000L).putUsedBytesOnTiers("MEM", 0L).setOptions(RegisterWorkerPOptions.getDefaultInstance()).addCurrentBlocks(listEntry1).build();
Queue<Throwable> errors = new ConcurrentLinkedDeque<>();
StreamObserver<RegisterWorkerPResponse> noopResponseObserver = new StreamObserver<RegisterWorkerPResponse>() {
@Override
public void onNext(RegisterWorkerPResponse response) {
}
@Override
public void onError(Throwable t) {
errors.offer(t);
}
@Override
public void onCompleted() {
}
};
// The responseObserver should see an error
mHandler.registerWorker(request, noopResponseObserver);
assertEquals(1, errors.size());
Throwable t = errors.poll();
Assert.assertThat(t.getMessage(), containsString("does not have a lease or the lease has expired."));
}
use of java.util.concurrent.ConcurrentLinkedDeque in project alluxio by Alluxio.
the class BlockMasterWorkerServiceHandlerTest method registerLeaseTurnedOff.
@Test
public void registerLeaseTurnedOff() throws Exception {
initServiceHandler(false);
long workerId = mBlockMaster.getWorkerId(NET_ADDRESS_1);
// Prepare LocationBlockIdListEntry objects
BlockStoreLocation loc = new BlockStoreLocation("MEM", 0);
BlockStoreLocationProto locationProto = BlockStoreLocationProto.newBuilder().setTierAlias(loc.tierAlias()).setMediumType(loc.mediumType()).build();
BlockIdList blockIdList1 = BlockIdList.newBuilder().addAllBlockId(ImmutableList.of(1L, 2L)).build();
LocationBlockIdListEntry listEntry1 = LocationBlockIdListEntry.newBuilder().setKey(locationProto).setValue(blockIdList1).build();
// No lease is acquired
RegisterWorkerPRequest request = RegisterWorkerPRequest.newBuilder().setWorkerId(workerId).addStorageTiers("MEM").putTotalBytesOnTiers("MEM", 1000L).putUsedBytesOnTiers("MEM", 0L).setOptions(RegisterWorkerPOptions.getDefaultInstance()).addCurrentBlocks(listEntry1).build();
Queue<Throwable> errors = new ConcurrentLinkedDeque<>();
StreamObserver<RegisterWorkerPResponse> noopResponseObserver = new StreamObserver<RegisterWorkerPResponse>() {
@Override
public void onNext(RegisterWorkerPResponse response) {
}
@Override
public void onError(Throwable t) {
errors.offer(t);
}
@Override
public void onCompleted() {
}
};
mHandler.registerWorker(request, noopResponseObserver);
assertEquals(0, errors.size());
}
use of java.util.concurrent.ConcurrentLinkedDeque in project netty by netty.
the class LocalTransportThreadModelTest3 method testConcurrentAddRemove.
private static void testConcurrentAddRemove(boolean inbound) throws Exception {
EventLoopGroup l = new DefaultEventLoopGroup(4, new DefaultThreadFactory("l"));
EventExecutorGroup e1 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e1"));
EventExecutorGroup e2 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e2"));
EventExecutorGroup e3 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e3"));
EventExecutorGroup e4 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e4"));
EventExecutorGroup e5 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e5"));
final EventExecutorGroup[] groups = { e1, e2, e3, e4, e5 };
try {
Deque<EventType> events = new ConcurrentLinkedDeque<EventType>();
final EventForwarder h1 = new EventForwarder();
final EventForwarder h2 = new EventForwarder();
final EventForwarder h3 = new EventForwarder();
final EventForwarder h4 = new EventForwarder();
final EventForwarder h5 = new EventForwarder();
final EventRecorder h6 = new EventRecorder(events, inbound);
final Channel ch = new LocalChannel();
if (!inbound) {
ch.config().setAutoRead(false);
}
ch.pipeline().addLast(e1, h1).addLast(e1, h2).addLast(e1, h3).addLast(e1, h4).addLast(e1, h5).addLast(e1, "recorder", h6);
l.register(ch).sync().channel().connect(localAddr).sync();
final LinkedList<EventType> expectedEvents = events(inbound, 8192);
Throwable cause = new Throwable();
Thread pipelineModifier = new Thread(new Runnable() {
@Override
public void run() {
Random random = new Random();
while (true) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
return;
}
if (!ch.isRegistered()) {
continue;
}
// EventForwardHandler forwardHandler = forwarders[random.nextInt(forwarders.length)];
ChannelHandler handler = ch.pipeline().removeFirst();
ch.pipeline().addBefore(groups[random.nextInt(groups.length)], "recorder", UUID.randomUUID().toString(), handler);
}
}
});
pipelineModifier.setDaemon(true);
pipelineModifier.start();
for (EventType event : expectedEvents) {
switch(event) {
case EXCEPTION_CAUGHT:
ch.pipeline().fireExceptionCaught(cause);
break;
case MESSAGE_RECEIVED:
ch.pipeline().fireChannelRead("");
break;
case MESSAGE_RECEIVED_LAST:
ch.pipeline().fireChannelReadComplete();
break;
case USER_EVENT:
ch.pipeline().fireUserEventTriggered("");
break;
case WRITE:
ch.pipeline().write("");
break;
case READ:
ch.pipeline().read();
break;
}
}
ch.close().sync();
while (events.peekLast() != EventType.UNREGISTERED) {
Thread.sleep(10);
}
expectedEvents.addFirst(EventType.ACTIVE);
expectedEvents.addFirst(EventType.REGISTERED);
expectedEvents.addLast(EventType.INACTIVE);
expectedEvents.addLast(EventType.UNREGISTERED);
for (; ; ) {
EventType event = events.poll();
if (event == null) {
assertTrue(expectedEvents.isEmpty(), "Missing events:" + expectedEvents);
break;
}
assertEquals(event, expectedEvents.poll());
}
} finally {
l.shutdownGracefully();
e1.shutdownGracefully();
e2.shutdownGracefully();
e3.shutdownGracefully();
e4.shutdownGracefully();
e5.shutdownGracefully();
l.terminationFuture().sync();
e1.terminationFuture().sync();
e2.terminationFuture().sync();
e3.terminationFuture().sync();
e4.terminationFuture().sync();
e5.terminationFuture().sync();
}
}
use of java.util.concurrent.ConcurrentLinkedDeque in project druid by druid-io.
the class CachingClusteredClientTest method testOutOfOrderBackgroundCachePopulation.
@Test
public void testOutOfOrderBackgroundCachePopulation() {
// to trigger the actual execution when we are ready to shuffle the order.
abstract class DrainTask implements Runnable {
}
final ForwardingListeningExecutorService randomizingExecutorService = new ForwardingListeningExecutorService() {
final ConcurrentLinkedDeque<Pair<SettableFuture, Object>> taskQueue = new ConcurrentLinkedDeque<>();
final ListeningExecutorService delegate = MoreExecutors.listeningDecorator(// are complete before moving on to the next query run.
Execs.directExecutor());
@Override
protected ListeningExecutorService delegate() {
return delegate;
}
private <T> ListenableFuture<T> maybeSubmitTask(Object task, boolean wait) {
if (wait) {
SettableFuture<T> future = SettableFuture.create();
taskQueue.addFirst(Pair.of(future, task));
return future;
} else {
List<Pair<SettableFuture, Object>> tasks = Lists.newArrayList(taskQueue.iterator());
Collections.shuffle(tasks, new Random(0));
for (final Pair<SettableFuture, Object> pair : tasks) {
ListenableFuture future = pair.rhs instanceof Callable ? delegate.submit((Callable) pair.rhs) : delegate.submit((Runnable) pair.rhs);
Futures.addCallback(future, new FutureCallback() {
@Override
public void onSuccess(@Nullable Object result) {
pair.lhs.set(result);
}
@Override
public void onFailure(Throwable t) {
pair.lhs.setException(t);
}
});
}
}
return task instanceof Callable ? delegate.submit((Callable) task) : (ListenableFuture<T>) delegate.submit((Runnable) task);
}
@SuppressWarnings("ParameterPackage")
@Override
public <T> ListenableFuture<T> submit(Callable<T> task) {
return maybeSubmitTask(task, true);
}
@Override
public ListenableFuture<?> submit(Runnable task) {
if (task instanceof DrainTask) {
return maybeSubmitTask(task, false);
} else {
return maybeSubmitTask(task, true);
}
}
};
client = makeClient(new BackgroundCachePopulator(randomizingExecutorService, JSON_MAPPER, new CachePopulatorStats(), -1));
// callback to be run every time a query run is complete, to ensure all background
// caching tasks are executed, and cache is populated before we move onto the next query
queryCompletedCallback = new Runnable() {
@Override
public void run() {
try {
randomizingExecutorService.submit(new DrainTask() {
@Override
public void run() {
// no-op
}
}).get();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT).randomQueryId();
QueryRunner runner = new FinalizeResultsQueryRunner(getDefaultQueryRunner(), new TimeseriesQueryQueryToolChest());
testQueryCaching(runner, builder.build(), Intervals.of("2011-01-05/2011-01-10"), makeTimeResults(DateTimes.of("2011-01-05"), 85, 102, DateTimes.of("2011-01-06"), 412, 521, DateTimes.of("2011-01-07"), 122, 21894, DateTimes.of("2011-01-08"), 5, 20, DateTimes.of("2011-01-09"), 18, 521), Intervals.of("2011-01-10/2011-01-13"), makeTimeResults(DateTimes.of("2011-01-10"), 85, 102, DateTimes.of("2011-01-11"), 412, 521, DateTimes.of("2011-01-12"), 122, 21894));
}
use of java.util.concurrent.ConcurrentLinkedDeque in project j2objc by google.
the class ConcurrentLinkedDequeTest method testAddAll2.
/**
* addAll of a collection with null elements throws NPE
*/
public void testAddAll2() {
ConcurrentLinkedDeque q = new ConcurrentLinkedDeque();
try {
q.addAll(Arrays.asList(new Integer[SIZE]));
shouldThrow();
} catch (NullPointerException success) {
}
}
Aggregations