use of voldemort.store.routed.Response in project voldemort by voldemort.
the class PerformParallelDeleteRequests method executeInternal.
private void executeInternal(final Pipeline pipeline) {
List<Node> nodes = pipelineData.getNodes();
final Map<Integer, Response<ByteArray, Object>> responses = new ConcurrentHashMap<Integer, Response<ByteArray, Object>>();
int attempts = nodes.size();
int blocks = Math.min(preferred, attempts);
final CountDownLatch attemptsLatch = new CountDownLatch(attempts);
final CountDownLatch blocksLatch = new CountDownLatch(blocks);
if (logger.isTraceEnabled())
logger.trace("Attempting " + attempts + " " + pipeline.getOperation().getSimpleName() + " operations in parallel");
long beginTime = System.nanoTime();
for (int i = 0; i < attempts; i++) {
final Node node = nodes.get(i);
pipelineData.incrementNodeIndex();
NonblockingStoreCallback callback = new NonblockingStoreCallback() {
public void requestComplete(Object result, long requestTime) {
if (logger.isTraceEnabled())
logger.trace(pipeline.getOperation().getSimpleName() + " response received (" + requestTime + " ms.) from node " + node.getId());
Response<ByteArray, Object> response = new Response<ByteArray, Object>(node, key, result, requestTime);
if (logger.isTraceEnabled()) {
logger.trace(attemptsLatch.getCount() + " attempts remaining. Will block " + " for " + blocksLatch.getCount() + " more ");
}
responses.put(node.getId(), response);
if (response.getValue() instanceof Exception && isOperationCompleted.get()) {
handleException(response, pipeline);
}
attemptsLatch.countDown();
blocksLatch.countDown();
}
};
if (logger.isTraceEnabled())
logger.info("Submitting " + pipeline.getOperation().getSimpleName() + " request on node " + node.getId());
NonblockingStore store = nonblockingStores.get(node.getId());
store.submitDeleteRequest(key, version, callback, timeoutMs);
}
try {
long ellapsedNs = System.nanoTime() - beginTime;
long remainingNs = (timeoutMs * Time.NS_PER_MS) - ellapsedNs;
if (remainingNs > 0) {
blocksLatch.await(remainingNs, TimeUnit.NANOSECONDS);
}
} catch (InterruptedException e) {
if (logger.isEnabledFor(Level.WARN))
logger.warn(e, e);
}
if (processResponses(responses, pipeline))
return;
// wait for more responses in case we did not have enough successful
// response to achieve the required count
boolean quorumSatisfied = true;
if (pipelineData.getSuccesses() < required) {
long ellapsedNs = System.nanoTime() - beginTime;
long remainingNs = (timeoutMs * Time.NS_PER_MS) - ellapsedNs;
if (remainingNs > 0) {
try {
attemptsLatch.await(remainingNs, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
if (logger.isEnabledFor(Level.WARN))
logger.warn(e, e);
}
if (processResponses(responses, pipeline))
return;
}
if (pipelineData.getSuccesses() < required) {
pipelineData.setFatalError(new InsufficientOperationalNodesException(required + " " + pipeline.getOperation().getSimpleName() + "s required, but only " + pipelineData.getSuccesses() + " succeeded", pipelineData.getReplicationSet(), pipelineData.getNodes(), pipelineData.getFailedNodes(), pipelineData.getFailures()));
abortPipeline(pipeline);
quorumSatisfied = false;
}
}
if (quorumSatisfied) {
if (pipelineData.getZonesRequired() != null) {
int zonesSatisfied = pipelineData.getZoneResponses().size();
if (zonesSatisfied >= (pipelineData.getZonesRequired() + 1)) {
completePipeline(pipeline);
} else {
long timeMs = (System.nanoTime() - beginTime) / Time.NS_PER_MS;
if ((timeoutMs - timeMs) > 0) {
try {
attemptsLatch.await(timeoutMs - timeMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
if (logger.isEnabledFor(Level.WARN))
logger.warn(e, e);
}
if (processResponses(responses, pipeline))
return;
}
if (pipelineData.getZoneResponses().size() >= (pipelineData.getZonesRequired() + 1)) {
completePipeline(pipeline);
} else {
pipelineData.setFatalError(new InsufficientZoneResponsesException((pipelineData.getZonesRequired() + 1) + " " + pipeline.getOperation().getSimpleName() + "s required zone, but only " + zonesSatisfied + " succeeded"));
abortPipeline(pipeline);
}
}
} else {
completePipeline(pipeline);
}
}
}
use of voldemort.store.routed.Response in project voldemort by voldemort.
the class PerformParallelGetAllRequests method execute.
@SuppressWarnings("unchecked")
public void execute(final Pipeline pipeline) {
int attempts = pipelineData.getNodeToKeysMap().size();
final Map<Integer, Response<Iterable<ByteArray>, Object>> responses = new ConcurrentHashMap<Integer, Response<Iterable<ByteArray>, Object>>();
final CountDownLatch latch = new CountDownLatch(attempts);
if (logger.isTraceEnabled())
logger.trace("Attempting " + attempts + " " + pipeline.getOperation().getSimpleName() + " operations in parallel");
Map<ByteArray, byte[]> transforms = pipelineData.getTransforms();
final AtomicBoolean isResponseProcessed = new AtomicBoolean(false);
for (Map.Entry<Node, List<ByteArray>> entry : pipelineData.getNodeToKeysMap().entrySet()) {
final Node node = entry.getKey();
final Collection<ByteArray> keys = entry.getValue();
NonblockingStoreCallback callback = new NonblockingStoreCallback() {
public void requestComplete(Object result, long requestTime) {
if (logger.isTraceEnabled())
logger.trace(pipeline.getOperation().getSimpleName() + " response received (" + requestTime + " ms.) from node " + node.getId());
Response<Iterable<ByteArray>, Object> response = new Response<Iterable<ByteArray>, Object>(node, keys, result, requestTime);
responses.put(node.getId(), response);
latch.countDown();
// This reduces the window where an exception is lost
if (isResponseProcessed.get() && response.getValue() instanceof Exception)
if (response.getValue() instanceof InvalidMetadataException) {
pipelineData.reportException((InvalidMetadataException) response.getValue());
logger.warn("Received invalid metadata problem after a successful " + pipeline.getOperation().getSimpleName() + " call on node " + node.getId() + ", store '" + pipelineData.getStoreName() + "'");
} else {
handleResponseError(response, pipeline, failureDetector);
}
}
};
if (logger.isTraceEnabled())
logger.trace("Submitting " + pipeline.getOperation().getSimpleName() + " request on node " + node.getId());
NonblockingStore store = nonblockingStores.get(node.getId());
store.submitGetAllRequest(keys, transforms, callback, timeoutMs);
}
try {
latch.await(timeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
if (logger.isEnabledFor(Level.WARN))
logger.warn(e, e);
}
for (Response<Iterable<ByteArray>, Object> response : responses.values()) {
if (response.getValue() instanceof Exception) {
if (handleResponseError(response, pipeline, failureDetector))
return;
} else {
Map<ByteArray, List<Versioned<byte[]>>> values = (Map<ByteArray, List<Versioned<byte[]>>>) response.getValue();
for (ByteArray key : response.getKey()) {
MutableInt successCount = pipelineData.getSuccessCount(key);
successCount.increment();
List<Versioned<byte[]>> retrieved = values.get(key);
/*
* retrieved can be null if there are no values for the key
* provided
*/
if (retrieved != null) {
List<Versioned<byte[]>> existing = pipelineData.getResult().get(key);
if (existing == null)
pipelineData.getResult().put(key, Lists.newArrayList(retrieved));
else
existing.addAll(retrieved);
}
HashSet<Integer> zoneResponses = null;
if (pipelineData.getKeyToZoneResponse().containsKey(key)) {
zoneResponses = pipelineData.getKeyToZoneResponse().get(key);
} else {
zoneResponses = new HashSet<Integer>();
pipelineData.getKeyToZoneResponse().put(key, zoneResponses);
}
zoneResponses.add(response.getNode().getZoneId());
}
pipelineData.getResponses().add(new Response<Iterable<ByteArray>, Map<ByteArray, List<Versioned<byte[]>>>>(response.getNode(), response.getKey(), values, response.getRequestTime()));
failureDetector.recordSuccess(response.getNode(), response.getRequestTime());
}
}
isResponseProcessed.set(true);
pipeline.addEvent(completeEvent);
}
use of voldemort.store.routed.Response in project voldemort by voldemort.
the class PerformParallelRequests method execute.
public void execute(final Pipeline pipeline) {
List<Node> nodes = pipelineData.getNodes();
int attempts = Math.min(preferred, nodes.size());
final Map<Integer, Response<ByteArray, Object>> responses = new ConcurrentHashMap<Integer, Response<ByteArray, Object>>();
final CountDownLatch latch = new CountDownLatch(attempts);
if (logger.isTraceEnabled())
logger.trace("Attempting " + attempts + " " + pipeline.getOperation().getSimpleName() + " operations in parallel for key " + key);
final AtomicBoolean isResponseProcessed = new AtomicBoolean(false);
for (int i = 0; i < attempts; i++) {
final Node node = nodes.get(i);
pipelineData.incrementNodeIndex();
final long startMs = logger.isDebugEnabled() ? System.currentTimeMillis() : -1;
NonblockingStoreCallback callback = new NonblockingStoreCallback() {
public void requestComplete(Object result, long requestTime) {
if (logger.isTraceEnabled())
logger.trace(pipeline.getOperation().getSimpleName() + " response received (" + requestTime + " ms.) from node " + node.getId() + "for key " + key);
Response<ByteArray, Object> response = new Response<ByteArray, Object>(node, key, result, requestTime);
if (logger.isDebugEnabled())
logger.debug("Finished " + pipeline.getOperation().getSimpleName() + " for key " + ByteUtils.toHexString(key.get()) + " (keyRef: " + System.identityHashCode(key) + "); started at " + startMs + " took " + requestTime + " ms on node " + node.getId() + "(" + node.getHost() + ")");
responses.put(node.getId(), response);
latch.countDown();
// This reduces the window where an exception is lost
if (isResponseProcessed.get() && response.getValue() instanceof Exception) {
if (response.getValue() instanceof InvalidMetadataException) {
pipelineData.reportException((InvalidMetadataException) response.getValue());
logger.warn("Received invalid metadata problem after a successful " + pipeline.getOperation().getSimpleName() + " call on node " + node.getId() + ", store '" + pipelineData.getStoreName() + "' for key " + key);
} else {
handleResponseError(response, pipeline, failureDetector);
}
}
}
};
if (logger.isTraceEnabled())
logger.trace("Submitting " + pipeline.getOperation().getSimpleName() + " request on node " + node.getId() + " for key " + key);
NonblockingStore store = nonblockingStores.get(node.getId());
if (pipeline.getOperation() == Operation.GET)
store.submitGetRequest(key, transforms, callback, timeoutMs);
else if (pipeline.getOperation() == Operation.GET_VERSIONS)
store.submitGetVersionsRequest(key, callback, timeoutMs);
else
throw new IllegalStateException(getClass().getName() + " does not support pipeline operation " + pipeline.getOperation());
}
try {
latch.await(timeoutMs, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
if (logger.isEnabledFor(Level.WARN))
logger.warn(e, e);
}
for (Response<ByteArray, Object> response : responses.values()) {
if (response.getValue() instanceof Exception) {
if (handleResponseError(response, pipeline, failureDetector))
return;
} else {
pipelineData.incrementSuccesses();
Response<ByteArray, V> rCast = Utils.uncheckedCast(response);
pipelineData.getResponses().add(rCast);
failureDetector.recordSuccess(response.getNode(), response.getRequestTime());
pipelineData.getZoneResponses().add(response.getNode().getZoneId());
}
}
isResponseProcessed.set(true);
if (logger.isDebugEnabled())
logger.debug("GET for key " + ByteUtils.toHexString(key.get()) + " (keyRef: " + System.identityHashCode(key) + "); successes: " + pipelineData.getSuccesses() + " preferred: " + preferred + " required: " + required);
if (pipelineData.getSuccesses() < required) {
if (insufficientSuccessesEvent != null) {
pipeline.addEvent(insufficientSuccessesEvent);
} else {
pipelineData.setFatalError(new InsufficientOperationalNodesException(required + " " + pipeline.getOperation().getSimpleName() + "s required, but only " + pipelineData.getSuccesses() + " succeeded", pipelineData.getReplicationSet(), pipelineData.getNodes(), pipelineData.getFailedNodes(), pipelineData.getFailures()));
pipeline.abort();
}
} else {
if (pipelineData.getZonesRequired() != null) {
int zonesSatisfied = pipelineData.getZoneResponses().size();
if (zonesSatisfied >= (pipelineData.getZonesRequired() + 1)) {
pipeline.addEvent(completeEvent);
} else {
if (logger.isDebugEnabled()) {
logger.debug("Operation " + pipeline.getOperation().getSimpleName() + "failed due to insufficient zone responses, required " + pipelineData.getZonesRequired() + " obtained " + zonesSatisfied + " " + pipelineData.getZoneResponses() + " for key " + key);
}
if (this.insufficientZonesEvent != null) {
pipeline.addEvent(this.insufficientZonesEvent);
} else {
pipelineData.setFatalError(new InsufficientZoneResponsesException((pipelineData.getZonesRequired() + 1) + " " + pipeline.getOperation().getSimpleName() + "s required zone, but only " + zonesSatisfied + " succeeded"));
}
}
} else {
pipeline.addEvent(completeEvent);
}
}
}
use of voldemort.store.routed.Response in project voldemort by voldemort.
the class PerformZoneSerialRequests method execute.
public void execute(Pipeline pipeline) {
List<Node> nodes = pipelineData.getNodes();
while (pipelineData.getNodeIndex() < nodes.size() && (pipelineData.getZoneResponses().size() + 1) < pipelineData.getZonesRequired()) {
Node node = nodes.get(pipelineData.getNodeIndex());
long start = System.nanoTime();
try {
Store<ByteArray, byte[], byte[]> store = stores.get(node.getId());
V result = storeRequest.request(store);
Response<ByteArray, V> response = new Response<ByteArray, V>(node, key, result, ((System.nanoTime() - start) / Time.NS_PER_MS));
pipelineData.incrementSuccesses();
pipelineData.getResponses().add(response);
failureDetector.recordSuccess(response.getNode(), response.getRequestTime());
pipelineData.getZoneResponses().add(node.getZoneId());
} catch (Exception e) {
long requestTime = (System.nanoTime() - start) / Time.NS_PER_MS;
if (handleResponseError(e, node, requestTime, pipeline, failureDetector))
return;
}
pipelineData.incrementNodeIndex();
}
int zonesSatisfied = pipelineData.getZoneResponses().size();
if (zonesSatisfied >= (pipelineData.getZonesRequired() + 1)) {
pipeline.addEvent(completeEvent);
} else {
pipelineData.setFatalError(new InsufficientZoneResponsesException((pipelineData.getZonesRequired() + 1) + " " + pipeline.getOperation().getSimpleName() + "s required zone, but only " + zonesSatisfied + " succeeded"));
pipeline.abort();
}
}
use of voldemort.store.routed.Response in project voldemort by voldemort.
the class PerformSerialRequests method execute.
public void execute(Pipeline pipeline) {
List<Node> nodes = pipelineData.getNodes();
// reads to make up for these.
while (pipelineData.getSuccesses() < preferred && pipelineData.getNodeIndex() < nodes.size()) {
Node node = nodes.get(pipelineData.getNodeIndex());
long start = System.nanoTime();
try {
Store<ByteArray, byte[], byte[]> store = stores.get(node.getId());
V result = storeRequest.request(store);
Response<ByteArray, V> response = new Response<ByteArray, V>(node, key, result, ((System.nanoTime() - start) / Time.NS_PER_MS));
if (logger.isDebugEnabled())
logger.debug(pipeline.getOperation().getSimpleName() + " for key " + ByteUtils.toHexString(key.get()) + " successes: " + pipelineData.getSuccesses() + " preferred: " + preferred + " required: " + required + " new " + pipeline.getOperation().getSimpleName() + " success on node " + node.getId());
pipelineData.incrementSuccesses();
pipelineData.getResponses().add(response);
failureDetector.recordSuccess(response.getNode(), response.getRequestTime());
pipelineData.getZoneResponses().add(node.getZoneId());
} catch (Exception e) {
long requestTime = (System.nanoTime() - start) / Time.NS_PER_MS;
if (handleResponseError(e, node, requestTime, pipeline, failureDetector))
return;
}
// break out if we have satisfied everything
if (isSatisfied())
break;
pipelineData.incrementNodeIndex();
}
if (pipelineData.getSuccesses() < required) {
if (insufficientSuccessesEvent != null) {
pipeline.addEvent(insufficientSuccessesEvent);
} else {
pipelineData.setFatalError(new InsufficientOperationalNodesException(required + " " + pipeline.getOperation().getSimpleName() + "s required, but only " + pipelineData.getSuccesses() + " succeeded", pipelineData.getReplicationSet(), pipelineData.getNodes(), pipelineData.getFailedNodes(), pipelineData.getFailures()));
pipeline.abort();
}
} else {
if (pipelineData.getZonesRequired() != null) {
int zonesSatisfied = pipelineData.getZoneResponses().size();
if (zonesSatisfied >= (pipelineData.getZonesRequired() + 1)) {
pipeline.addEvent(completeEvent);
} else {
// if you run with zoneCountReads > 0, we could frequently
// run into this exception since our preference list for
// zone routing is laid out thus : <a node from each of
// 'zoneCountReads' zones>, <nodes from local zone>, <nodes
// from remote zone1>, <nodes from remote zone2>,...
// #preferred number of reads may not be able to satisfy
// zoneCountReads, if the original read to a remote node
// fails in the parallel stage
pipelineData.setFatalError(new InsufficientZoneResponsesException((pipelineData.getZonesRequired() + 1) + " " + pipeline.getOperation().getSimpleName() + "s required zone, but only " + zonesSatisfied + " succeeded"));
pipeline.abort();
}
} else {
pipeline.addEvent(completeEvent);
}
}
}
Aggregations