use of voldemort.store.InsufficientOperationalNodesException in project voldemort by voldemort.
the class ThreadPoolRoutedStore method blockOnPut.
/**
* @return false if the operation timed out, true otherwise.
*/
private boolean blockOnPut(long startNs, Semaphore semaphore, int startingIndex, int blockCount, AtomicInteger successes, int successesRequired) {
for (int i = startingIndex; i < blockCount; i++) {
try {
long elapsedNs = System.nanoTime() - startNs;
long remainingNs = (timeoutConfig.getOperationTimeout(VoldemortOpCode.PUT_OP_CODE) * Time.NS_PER_MS) - elapsedNs;
boolean acquiredPermit = semaphore.tryAcquire(Math.max(remainingNs, 0), TimeUnit.NANOSECONDS);
if (!acquiredPermit) {
logger.warn("Timed out waiting for put # " + (i + 1) + " of " + blockCount + " to succeed.");
return false;
}
if (successes.get() >= successesRequired)
break;
} catch (InterruptedException e) {
throw new InsufficientOperationalNodesException("Put operation interrupted", e);
}
}
return true;
}
use of voldemort.store.InsufficientOperationalNodesException in project voldemort by voldemort.
the class PerformSerialRequests method execute.
public void execute(Pipeline pipeline) {
List<Node> nodes = pipelineData.getNodes();
// reads to make up for these.
while (pipelineData.getSuccesses() < preferred && pipelineData.getNodeIndex() < nodes.size()) {
Node node = nodes.get(pipelineData.getNodeIndex());
long start = System.nanoTime();
try {
Store<ByteArray, byte[], byte[]> store = stores.get(node.getId());
V result = storeRequest.request(store);
Response<ByteArray, V> response = new Response<ByteArray, V>(node, key, result, ((System.nanoTime() - start) / Time.NS_PER_MS));
if (logger.isDebugEnabled())
logger.debug(pipeline.getOperation().getSimpleName() + " for key " + ByteUtils.toHexString(key.get()) + " successes: " + pipelineData.getSuccesses() + " preferred: " + preferred + " required: " + required + " new " + pipeline.getOperation().getSimpleName() + " success on node " + node.getId());
pipelineData.incrementSuccesses();
pipelineData.getResponses().add(response);
failureDetector.recordSuccess(response.getNode(), response.getRequestTime());
pipelineData.getZoneResponses().add(node.getZoneId());
} catch (Exception e) {
long requestTime = (System.nanoTime() - start) / Time.NS_PER_MS;
if (handleResponseError(e, node, requestTime, pipeline, failureDetector))
return;
}
// break out if we have satisfied everything
if (isSatisfied())
break;
pipelineData.incrementNodeIndex();
}
if (pipelineData.getSuccesses() < required) {
if (insufficientSuccessesEvent != null) {
pipeline.addEvent(insufficientSuccessesEvent);
} else {
pipelineData.setFatalError(new InsufficientOperationalNodesException(required + " " + pipeline.getOperation().getSimpleName() + "s required, but only " + pipelineData.getSuccesses() + " succeeded", pipelineData.getReplicationSet(), pipelineData.getNodes(), pipelineData.getFailedNodes(), pipelineData.getFailures()));
pipeline.abort();
}
} else {
if (pipelineData.getZonesRequired() != null) {
int zonesSatisfied = pipelineData.getZoneResponses().size();
if (zonesSatisfied >= (pipelineData.getZonesRequired() + 1)) {
pipeline.addEvent(completeEvent);
} else {
// if you run with zoneCountReads > 0, we could frequently
// run into this exception since our preference list for
// zone routing is laid out thus : <a node from each of
// 'zoneCountReads' zones>, <nodes from local zone>, <nodes
// from remote zone1>, <nodes from remote zone2>,...
// #preferred number of reads may not be able to satisfy
// zoneCountReads, if the original read to a remote node
// fails in the parallel stage
pipelineData.setFatalError(new InsufficientZoneResponsesException((pipelineData.getZonesRequired() + 1) + " " + pipeline.getOperation().getSimpleName() + "s required zone, but only " + zonesSatisfied + " succeeded"));
pipeline.abort();
}
} else {
pipeline.addEvent(completeEvent);
}
}
}
use of voldemort.store.InsufficientOperationalNodesException in project voldemort by voldemort.
the class PerformSerialGetAllRequests method execute.
public void execute(Pipeline pipeline) {
Map<ByteArray, List<Versioned<byte[]>>> result = pipelineData.getResult();
for (ByteArray key : keys) {
boolean zoneRequirement = false;
MutableInt successCount = pipelineData.getSuccessCount(key);
if (logger.isDebugEnabled())
logger.debug("GETALL for key " + ByteUtils.toHexString(key.get()) + " (keyRef: " + System.identityHashCode(key) + ") successes: " + successCount.intValue() + " preferred: " + preferred + " required: " + required);
if (successCount.intValue() >= preferred) {
if (pipelineData.getZonesRequired() != null && pipelineData.getZonesRequired() > 0) {
if (pipelineData.getKeyToZoneResponse().containsKey(key)) {
int zonesSatisfied = pipelineData.getKeyToZoneResponse().get(key).size();
if (zonesSatisfied >= (pipelineData.getZonesRequired() + 1)) {
continue;
} else {
zoneRequirement = true;
}
} else {
zoneRequirement = true;
}
} else {
continue;
}
}
List<Node> extraNodes = pipelineData.getKeyToExtraNodesMap().get(key);
Map<ByteArray, byte[]> transforms = pipelineData.getTransforms();
if (extraNodes == null)
continue;
for (Node node : extraNodes) {
long start = System.nanoTime();
try {
Store<ByteArray, byte[], byte[]> store = stores.get(node.getId());
List<Versioned<byte[]>> values;
if (transforms == null)
values = store.get(key, null);
else
values = store.get(key, transforms.get(key));
if (values.size() != 0) {
if (result.get(key) == null)
result.put(key, Lists.newArrayList(values));
else
result.get(key).addAll(values);
}
Map<ByteArray, List<Versioned<byte[]>>> map = new HashMap<ByteArray, List<Versioned<byte[]>>>();
map.put(key, values);
Response<Iterable<ByteArray>, Map<ByteArray, List<Versioned<byte[]>>>> response = new Response<Iterable<ByteArray>, Map<ByteArray, List<Versioned<byte[]>>>>(node, Arrays.asList(key), map, ((System.nanoTime() - start) / Time.NS_PER_MS));
successCount.increment();
pipelineData.getResponses().add(response);
failureDetector.recordSuccess(response.getNode(), response.getRequestTime());
if (logger.isDebugEnabled())
logger.debug("GET for key " + ByteUtils.toHexString(key.get()) + " (keyRef: " + System.identityHashCode(key) + ") successes: " + successCount.intValue() + " preferred: " + preferred + " required: " + required + " new GET success on node " + node.getId());
HashSet<Integer> zoneResponses = null;
if (pipelineData.getKeyToZoneResponse().containsKey(key)) {
zoneResponses = pipelineData.getKeyToZoneResponse().get(key);
} else {
zoneResponses = new HashSet<Integer>();
pipelineData.getKeyToZoneResponse().put(key, zoneResponses);
}
zoneResponses.add(response.getNode().getZoneId());
if (zoneRequirement) {
if (zoneResponses.size() >= pipelineData.getZonesRequired())
break;
} else {
if (successCount.intValue() >= preferred)
break;
}
} catch (Exception e) {
long requestTime = (System.nanoTime() - start) / Time.NS_PER_MS;
if (handleResponseError(e, node, requestTime, pipeline, failureDetector))
return;
}
}
}
for (ByteArray key : keys) {
MutableInt successCount = pipelineData.getSuccessCount(key);
if (successCount.intValue() < required) {
// not meet 'required' guarantee; else raise error
if (allowPartial) {
if (logger.isDebugEnabled()) {
logger.debug("Excluding Key " + ByteUtils.toHexString(key.get()) + " from partial get_all result");
}
result.remove(key);
} else {
pipelineData.setFatalError(new InsufficientOperationalNodesException(required + " " + pipeline.getOperation().getSimpleName() + "s required, but " + successCount.intValue() + " succeeded. Failing nodes : " + pipelineData.getFailedNodes(), pipelineData.getFailures()));
pipeline.addEvent(Event.ERROR);
return;
}
}
}
pipeline.addEvent(completeEvent);
}
use of voldemort.store.InsufficientOperationalNodesException in project voldemort by voldemort.
the class Pipeline method execute.
/**
* Process events in the order as they were received.
*
* <p/>
*
* The overall time to process the events must be within the bounds of the
* timeout or an {@link InsufficientOperationalNodesException} will be
* thrown.
*/
public void execute() {
try {
while (true) {
Event event = null;
try {
event = eventQueue.poll(timeout, unit);
} catch (InterruptedException e) {
throw new InsufficientOperationalNodesException(operation.getSimpleName() + " operation interrupted!", e);
}
if (event == null)
throw new VoldemortException(operation.getSimpleName() + " returned a null event");
if (event.equals(Event.ERROR)) {
if (logger.isTraceEnabled())
logger.trace(operation.getSimpleName() + " request, events complete due to error");
break;
} else if (event.equals(Event.COMPLETED)) {
if (logger.isTraceEnabled())
logger.trace(operation.getSimpleName() + " request, events complete");
break;
}
Action action = eventActions.get(event);
if (action == null)
throw new IllegalStateException("action was null for event " + event);
if (logger.isTraceEnabled())
logger.trace(operation.getSimpleName() + " request, action " + action.getClass().getSimpleName() + " to handle " + event + " event");
action.execute(this);
}
} finally {
finished = true;
}
}
use of voldemort.store.InsufficientOperationalNodesException in project voldemort by voldemort.
the class RoutedStoreTest method testOperationSpecificTimeouts.
@Test
public void testOperationSpecificTimeouts() throws Exception {
StoreDefinition definition = new StoreDefinitionBuilder().setName("test").setType("foo").setKeySerializer(new SerializerDefinition("test")).setValueSerializer(new SerializerDefinition("test")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(3).setPreferredReads(3).setRequiredReads(3).setPreferredWrites(3).setRequiredWrites(3).build();
Map<Integer, Store<ByteArray, byte[], byte[]>> stores = new HashMap<Integer, Store<ByteArray, byte[], byte[]>>();
List<Node> nodes = new ArrayList<Node>();
for (int i = 0; i < 3; i++) {
Store<ByteArray, byte[], byte[]> store = new SleepyStore<ByteArray, byte[], byte[]>(200, new InMemoryStorageEngine<ByteArray, byte[], byte[]>("test"));
stores.put(i, store);
List<Integer> partitions = Arrays.asList(i);
nodes.add(new Node(i, "none", 0, 0, 0, partitions));
}
setFailureDetector(stores);
routedStoreThreadPool = Executors.newFixedThreadPool(3);
// with a 500ms general timeout and a 100ms get timeout, only get should
// fail
TimeoutConfig timeoutConfig = new TimeoutConfig(1500, false);
timeoutConfig.setOperationTimeout(VoldemortOpCode.GET_OP_CODE, 100);
RoutedStoreFactory routedStoreFactory = createFactory();
RoutedStore routedStore = routedStoreFactory.create(new Cluster("test", nodes), definition, stores, failureDetector, createConfig(timeoutConfig));
try {
routedStore.put(new ByteArray("test".getBytes()), new Versioned<byte[]>(new byte[] { 1 }), null);
} catch (InsufficientOperationalNodesException e) {
fail("Should not have failed");
}
try {
routedStore.get(new ByteArray("test".getBytes()), null);
fail("Should have thrown");
} catch (InsufficientOperationalNodesException e) {
}
}
Aggregations