use of java.util.concurrent.CopyOnWriteArrayList in project undertow by undertow-io.
the class HttpClientTestCase method testConnectionClose.
@Test
public void testConnectionClose() throws Exception {
//
DefaultServer.setRootHandler(SIMPLE_MESSAGE_HANDLER);
final UndertowClient client = createClient();
final CountDownLatch latch = new CountDownLatch(1);
final ClientConnection connection = client.connect(ADDRESS, worker, DefaultServer.getBufferPool(), OptionMap.EMPTY).get();
try {
ClientRequest request = new ClientRequest().setPath("/1324").setMethod(Methods.GET);
request.getRequestHeaders().put(Headers.HOST, DefaultServer.getHostAddress());
final List<ClientResponse> responses = new CopyOnWriteArrayList<>();
request.getRequestHeaders().add(Headers.CONNECTION, Headers.CLOSE.toString());
connection.sendRequest(request, createClientCallback(responses, latch));
latch.await();
final ClientResponse response = responses.iterator().next();
Assert.assertEquals(message, response.getAttachment(RESPONSE_BODY));
Assert.assertEquals(false, connection.isOpen());
} finally {
IoUtils.safeClose(connection);
}
}
use of java.util.concurrent.CopyOnWriteArrayList in project voldemort by voldemort.
the class ThreadPoolRoutedStore method getAll.
@Override
public Map<ByteArray, List<Versioned<byte[]>>> getAll(Iterable<ByteArray> keys, Map<ByteArray, byte[]> transforms) throws VoldemortException {
StoreUtils.assertValidKeys(keys);
Map<ByteArray, List<Versioned<byte[]>>> result = StoreUtils.newEmptyHashMap(keys);
// Keys for each node needed to satisfy storeDef.getPreferredReads() if
// no failures.
Map<Node, List<ByteArray>> nodeToKeysMap = Maps.newHashMap();
// Keep track of nodes per key that might be needed if there are
// failures during getAll
Map<ByteArray, List<Node>> keyToExtraNodesMap = Maps.newHashMap();
for (ByteArray key : keys) {
List<Node> availableNodes = availableNodes(routingStrategy.routeRequest(key.get()));
// quickly fail if there aren't enough nodes to meet the requirement
checkRequiredReads(availableNodes);
int preferredReads = storeDef.getPreferredReads();
List<Node> preferredNodes = Lists.newArrayListWithCapacity(preferredReads);
List<Node> extraNodes = Lists.newArrayListWithCapacity(3);
for (Node node : availableNodes) {
if (preferredNodes.size() < preferredReads)
preferredNodes.add(node);
else
extraNodes.add(node);
}
for (Node node : preferredNodes) {
List<ByteArray> nodeKeys = nodeToKeysMap.get(node);
if (nodeKeys == null) {
nodeKeys = Lists.newArrayList();
nodeToKeysMap.put(node, nodeKeys);
}
nodeKeys.add(key);
}
if (!extraNodes.isEmpty()) {
List<Node> nodes = keyToExtraNodesMap.get(key);
if (nodes == null)
keyToExtraNodesMap.put(key, extraNodes);
else
nodes.addAll(extraNodes);
}
}
List<Callable<GetAllResult>> callables = Lists.newArrayList();
for (Map.Entry<Node, List<ByteArray>> entry : nodeToKeysMap.entrySet()) {
final Node node = entry.getKey();
final Collection<ByteArray> nodeKeys = entry.getValue();
if (failureDetector.isAvailable(node))
callables.add(new GetAllCallable(node, nodeKeys, transforms));
}
// A list of thrown exceptions, indicating the number of failures
List<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
List<NodeValue<ByteArray, byte[]>> nodeValues = Lists.newArrayList();
Map<ByteArray, MutableInt> keyToSuccessCount = Maps.newHashMap();
for (ByteArray key : keys) keyToSuccessCount.put(key, new MutableInt(0));
List<Future<GetAllResult>> futures;
long timeoutMs = timeoutConfig.getOperationTimeout(VoldemortOpCode.GET_ALL_OP_CODE);
try {
// TODO What to do about timeouts? They should be longer as getAll
// is likely to
// take longer. At the moment, it's just timeoutMs * 3, but should
// this be based on the number of the keys?
futures = executor.invokeAll(callables, timeoutMs * 3, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new InsufficientOperationalNodesException("getAll operation interrupted.", e);
}
for (Future<GetAllResult> f : futures) {
if (f.isCancelled()) {
logger.warn("Get operation timed out after " + timeoutMs + " ms.");
continue;
}
try {
GetAllResult getResult = f.get();
if (getResult.exception != null) {
if (getResult.exception instanceof VoldemortApplicationException) {
throw (VoldemortException) getResult.exception;
}
failures.add(getResult.exception);
continue;
}
for (ByteArray key : getResult.callable.nodeKeys) {
List<Versioned<byte[]>> retrieved = getResult.retrieved.get(key);
MutableInt successCount = keyToSuccessCount.get(key);
successCount.increment();
/*
* retrieved can be null if there are no values for the key
* provided
*/
if (retrieved != null) {
List<Versioned<byte[]>> existing = result.get(key);
if (existing == null)
result.put(key, Lists.newArrayList(retrieved));
else
existing.addAll(retrieved);
}
}
nodeValues.addAll(getResult.nodeValues);
} catch (InterruptedException e) {
throw new InsufficientOperationalNodesException("getAll operation interrupted.", e);
} catch (ExecutionException e) {
// should never happen
if (e.getCause() instanceof Error)
throw (Error) e.getCause();
else
logger.error(e.getMessage(), e);
}
}
for (ByteArray key : keys) {
MutableInt successCountWrapper = keyToSuccessCount.get(key);
int successCount = successCountWrapper.intValue();
if (successCount < storeDef.getPreferredReads()) {
List<Node> extraNodes = keyToExtraNodesMap.get(key);
if (extraNodes != null) {
for (Node node : extraNodes) {
long startNs = System.nanoTime();
try {
List<Versioned<byte[]>> values = innerStores.get(node.getId()).get(key, transforms == null ? null : transforms.get(key));
fillRepairReadsValues(nodeValues, key, node, values);
List<Versioned<byte[]>> versioneds = result.get(key);
if (versioneds == null)
result.put(key, Lists.newArrayList(values));
else
versioneds.addAll(values);
recordSuccess(node, startNs);
if (++successCount >= storeDef.getPreferredReads())
break;
} catch (UnreachableStoreException e) {
failures.add(e);
recordException(node, startNs, e);
} catch (VoldemortApplicationException e) {
throw e;
} catch (Exception e) {
logger.warn("Error in GET_ALL on node " + node.getId() + "(" + node.getHost() + ")", e);
failures.add(e);
}
}
}
}
successCountWrapper.setValue(successCount);
}
repairReads(nodeValues, repairReads && (transforms == null || transforms.size() == 0));
for (Map.Entry<ByteArray, MutableInt> mapEntry : keyToSuccessCount.entrySet()) {
int successCount = mapEntry.getValue().intValue();
if (successCount < storeDef.getRequiredReads())
throw new InsufficientOperationalNodesException(this.storeDef.getRequiredReads() + " reads required, but " + successCount + " succeeded.", failures);
}
return result;
}
use of java.util.concurrent.CopyOnWriteArrayList in project voldemort by voldemort.
the class ThreadPoolRoutedStore method delete.
@Override
public boolean delete(final ByteArray key, final Version version) throws VoldemortException {
StoreUtils.assertValidKey(key);
final List<Node> nodes = availableNodes(routingStrategy.routeRequest(key.get()));
// quickly fail if there aren't enough live nodes to meet the
// requirements
final int numNodes = nodes.size();
if (numNodes < this.storeDef.getRequiredWrites())
throw new InsufficientOperationalNodesException("Only " + numNodes + " nodes in preference list, but " + this.storeDef.getRequiredWrites() + " writes required.");
// A count of the number of successful operations
final AtomicInteger successes = new AtomicInteger(0);
final AtomicBoolean deletedSomething = new AtomicBoolean(false);
// A list of thrown exceptions, indicating the number of failures
final List<Exception> failures = new CopyOnWriteArrayList<Exception>();
// A semaphore indicating the number of completed operations
// Once inititialized all permits are acquired, after that
// permits are released when an operation is completed.
// semaphore.acquire(n) waits for n operations to complete
final Semaphore semaphore = new Semaphore(0, false);
// Add the operations to the pool
for (final Node node : nodes) {
this.executor.execute(new Runnable() {
@Override
public void run() {
long startNs = System.nanoTime();
try {
boolean deleted = innerStores.get(node.getId()).delete(key, version);
successes.incrementAndGet();
deletedSomething.compareAndSet(false, deleted);
recordSuccess(node, startNs);
} catch (UnreachableStoreException e) {
failures.add(e);
recordException(node, startNs, e);
} catch (VoldemortApplicationException e) {
throw e;
} catch (Exception e) {
failures.add(e);
logger.warn("Error in DELETE on node " + node.getId() + "(" + node.getHost() + ")", e);
} finally {
// signal that the operation is complete
semaphore.release();
}
}
});
}
int attempts = Math.min(storeDef.getPreferredWrites(), numNodes);
if (this.storeDef.getPreferredWrites() <= 0) {
return true;
} else {
for (int i = 0; i < numNodes; i++) {
try {
long timeoutMs = timeoutConfig.getOperationTimeout(VoldemortOpCode.DELETE_OP_CODE);
boolean acquired = semaphore.tryAcquire(timeoutMs, TimeUnit.MILLISECONDS);
if (!acquired)
logger.warn("Delete operation timed out waiting for operation " + i + " to complete after waiting " + timeoutMs + " ms.");
// completed, were they successful?
if (successes.get() >= attempts)
return deletedSomething.get();
} catch (InterruptedException e) {
throw new InsufficientOperationalNodesException("Delete operation interrupted!", e);
}
}
}
// number
if (successes.get() < storeDef.getRequiredWrites())
throw new InsufficientOperationalNodesException(this.storeDef.getRequiredWrites() + " deletes required, but " + successes.get() + " succeeded.", failures);
else
return deletedSomething.get();
}
use of java.util.concurrent.CopyOnWriteArrayList in project voldemort by voldemort.
the class ThreadPoolRoutedStore method get.
/*
* 1. Attempt preferredReads, and then wait for these to complete 2. If we
* got all the reads we wanted, then we are done. 3. If not then continue
* serially attempting to read from each node until we get preferredReads or
* run out of nodes. 4. If we have multiple results do a read repair 5. If
* we have at least requiredReads return. Otherwise throw an exception.
*/
private <R> List<R> get(final ByteArray key, final byte[] transforms, StoreOp<R> fetcher, Function<List<GetResult<R>>, Void> preReturnProcedure) throws VoldemortException {
StoreUtils.assertValidKey(key);
final List<Node> nodes = availableNodes(routingStrategy.routeRequest(key.get()));
// quickly fail if there aren't enough nodes to meet the requirement
checkRequiredReads(nodes);
final List<GetResult<R>> retrieved = Lists.newArrayList();
// A count of the number of successful operations
int successes = 0;
// A list of thrown exceptions, indicating the number of failures
final List<Throwable> failures = new CopyOnWriteArrayList<Throwable>();
// Do the preferred number of reads in parallel
int attempts = Math.min(this.storeDef.getPreferredReads(), nodes.size());
int nodeIndex = 0;
List<Callable<GetResult<R>>> callables = Lists.newArrayListWithCapacity(attempts);
for (; nodeIndex < attempts; nodeIndex++) {
final Node node = nodes.get(nodeIndex);
callables.add(new GetCallable<R>(node, key, transforms, fetcher));
}
List<Future<GetResult<R>>> futures;
long timeoutMs = (fetcher == VERSION_OP) ? timeoutConfig.getOperationTimeout(VoldemortOpCode.GET_VERSION_OP_CODE) : timeoutConfig.getOperationTimeout(VoldemortOpCode.GET_OP_CODE);
try {
futures = executor.invokeAll(callables, timeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw new InsufficientOperationalNodesException("Get operation interrupted!", e);
}
for (Future<GetResult<R>> f : futures) {
if (f.isCancelled()) {
logger.warn("Get operation timed out after " + timeoutMs + " ms.");
continue;
}
try {
GetResult<R> getResult = f.get();
if (getResult.exception != null) {
if (getResult.exception instanceof VoldemortApplicationException) {
throw (VoldemortException) getResult.exception;
}
failures.add(getResult.exception);
continue;
}
++successes;
retrieved.add(getResult);
} catch (InterruptedException e) {
throw new InsufficientOperationalNodesException("Get operation interrupted!", e);
} catch (ExecutionException e) {
// part should never happen.
if (e.getCause() instanceof Error)
throw (Error) e.getCause();
else
logger.error(e.getMessage(), e);
}
}
// reads to make up for these.
while (successes < this.storeDef.getPreferredReads() && nodeIndex < nodes.size()) {
Node node = nodes.get(nodeIndex);
long startNs = System.nanoTime();
try {
retrieved.add(new GetResult<R>(node, key, fetcher.execute(innerStores.get(node.getId()), key, transforms), null));
++successes;
recordSuccess(node, startNs);
} catch (UnreachableStoreException e) {
failures.add(e);
recordException(node, startNs, e);
} catch (VoldemortApplicationException e) {
throw e;
} catch (Exception e) {
logger.warn("Error in GET on node " + node.getId() + "(" + node.getHost() + ")", e);
failures.add(e);
}
nodeIndex++;
}
if (logger.isTraceEnabled())
logger.trace("GET retrieved the following node values: " + formatNodeValues(retrieved));
if (preReturnProcedure != null)
preReturnProcedure.apply(retrieved);
if (successes >= this.storeDef.getRequiredReads()) {
List<R> result = Lists.newArrayListWithExpectedSize(retrieved.size());
for (GetResult<R> getResult : retrieved) result.addAll(getResult.retrieved);
return result;
} else
throw new InsufficientOperationalNodesException(this.storeDef.getRequiredReads() + " reads required, but " + successes + " succeeded.", failures);
}
use of java.util.concurrent.CopyOnWriteArrayList in project undertow by undertow-io.
the class ChannelUpgradeHandler method addProtocol.
private synchronized void addProtocol(String productString, HttpUpgradeListener openListener, final ChannelListener<? super StreamConnection> channelListener, final HttpUpgradeHandshake handshake) {
if (productString == null) {
throw new IllegalArgumentException("productString is null");
}
if (openListener == null && channelListener == null) {
throw new IllegalArgumentException("openListener is null");
}
if (openListener == null) {
openListener = new HttpUpgradeListener() {
@Override
public void handleUpgrade(StreamConnection streamConnection, HttpServerExchange exchange) {
ChannelListeners.invokeChannelListener(streamConnection, channelListener);
}
};
}
List<Holder> list = handlers.get(productString);
if (list == null) {
handlers.put(productString, list = new CopyOnWriteArrayList<>());
}
list.add(new Holder(openListener, handshake, channelListener));
}
Aggregations