use of voldemort.store.slop.Slop in project voldemort by voldemort.
the class PerformDeleteHintedHandoff method execute.
@Override
public void execute(Pipeline pipeline) {
for (Map.Entry<Node, Slop> slopToBeSent : slopsToBeSent.entrySet()) {
Slop slop = slopToBeSent.getValue();
Node failedNode = slopToBeSent.getKey();
if (logger.isTraceEnabled())
logger.trace("Performing hinted handoff for node " + failedNode + ", store " + pipelineData.getStoreName() + "key " + key + ", version" + version);
hintedHandoff.sendHintParallel(failedNode, version, slop);
}
pipeline.addEvent(completeEvent);
}
use of voldemort.store.slop.Slop in project voldemort by voldemort.
the class PerformParallelDeleteRequests method handleException.
/**
* @param response
* @param pipeline
* @param isParallel
* @return true if it is a terminal error, false otherwise
*/
private boolean handleException(Response<ByteArray, Object> response, Pipeline pipeline) {
Node node = response.getNode();
Exception ex = null;
if (!(response.getValue() instanceof Exception)) {
return false;
}
ex = (Exception) response.getValue();
if (enableHintedHandoff) {
if (ex instanceof UnreachableStoreException || ex instanceof QuotaExceededException) {
Slop slop = new Slop(pipelineData.getStoreName(), Slop.Operation.DELETE, key, null, null, node.getId(), new Date());
if (isOperationCompleted.get() == false) {
hintedHandoffAction.rememberSlopForLaterEvent(node, slop);
} else if (isDeleteSuccessful.get() == true) {
hintedHandoff.sendHintParallel(node, version, slop);
}
}
}
if (ex instanceof ObsoleteVersionException) {
// able to write on this node and should be termed as clean success.
return false;
} else if (ex instanceof QuotaExceededException) {
// QuotaException silently as well
return false;
}
// responses below.
if (ex instanceof InvalidMetadataException && isOperationCompleted.get()) {
pipelineData.reportException(ex);
if (logger.isInfoEnabled()) {
logger.info("Received invalid metadata problem after a successful " + pipeline.getOperation().getSimpleName() + " call on node " + node.getId() + ", store '" + pipelineData.getStoreName() + "'");
}
} else {
return handleResponseError(response, pipeline, failureDetector);
}
return false;
}
use of voldemort.store.slop.Slop in project voldemort by voldemort.
the class PerformParallelPutRequests method execute.
@Override
public void execute(final Pipeline pipeline) {
final Node masterNode = pipelineData.getMaster();
final List<Node> nodes = pipelineData.getNodes();
final Versioned<byte[]> versionedCopy = pipelineData.getVersionedCopy();
final Integer numNodesTouchedInSerialPut = nodes.indexOf(masterNode) + 1;
numNodesPendingResponse = nodes.size() - numNodesTouchedInSerialPut;
if (logger.isDebugEnabled())
logger.debug("PUT {key:" + key + "} MasterNode={id:" + masterNode.getId() + "} totalNodesToAsyncPut=" + numNodesPendingResponse);
// initiate parallel puts
for (int i = numNodesTouchedInSerialPut; i < nodes.size(); i++) {
final Node node = nodes.get(i);
pipelineData.incrementNodeIndex();
NonblockingStoreCallback callback = new NonblockingStoreCallback() {
@Override
public void requestComplete(Object result, long requestTime) {
boolean responseHandledByMaster = false;
if (logger.isDebugEnabled())
logger.debug("PUT {key:" + key + "} response received from node={id:" + node.getId() + "} in " + requestTime + " ms)");
Response<ByteArray, Object> response;
response = new Response<ByteArray, Object>(node, key, result, requestTime);
if (logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key + "} Parallel put thread trying to return result to main thread");
}
responseHandledByMaster = pipelineData.getSynchronizer().tryDelegateResponseHandling(response);
if (logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key + "} Master thread accepted the response: " + responseHandledByMaster);
}
if (!responseHandledByMaster) {
if (logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key + "} Master thread did not accept the response: will handle in worker thread");
}
if (PipelineRoutedStore.isSlopableFailure(response.getValue()) || response.getValue() instanceof QuotaExceededException) {
if (logger.isDebugEnabled())
logger.debug("PUT {key:" + key + "} failed on node={id:" + node.getId() + ",host:" + node.getHost() + "}");
if (isHintedHandoffEnabled()) {
boolean triedDelegateSlop = pipelineData.getSynchronizer().tryDelegateSlop(node);
if (logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key + "} triedDelegateSlop: " + triedDelegateSlop);
}
if (!triedDelegateSlop) {
Slop slop = new Slop(pipelineData.getStoreName(), Slop.Operation.PUT, key, versionedCopy.getValue(), transforms, node.getId(), new Date());
pipelineData.addFailedNode(node);
if (logger.isDebugEnabled())
logger.debug("PUT {key:" + key + "} Start registering Slop(node:" + node.getId() + ",host:" + node.getHost() + ")");
hintedHandoff.sendHintParallel(node, versionedCopy.getVersion(), slop);
if (logger.isDebugEnabled())
logger.debug("PUT {key:" + key + "} Sent out request to register Slop(node: " + node.getId() + ",host:" + node.getHost() + ")");
}
}
} else {
// the exception is ignorable
if (logger.isDebugEnabled()) {
if (result instanceof Exception) {
logger.debug("PUT {key:" + key + "} will not send hint. Response is ignorable exception: " + result.getClass().toString());
} else {
logger.debug("PUT {key:" + key + "} will not send hint. Response is success");
}
}
}
if (result instanceof Exception && !(result instanceof ObsoleteVersionException)) {
if (response.getValue() instanceof InvalidMetadataException) {
pipelineData.reportException((InvalidMetadataException) response.getValue());
logger.warn("Received invalid metadata problem after a successful " + pipeline.getOperation().getSimpleName() + " call on node " + node.getId() + ", store '" + pipelineData.getStoreName() + "'");
} else if (response.getValue() instanceof QuotaExceededException) {
/**
* TODO Not sure if we need to count this
* Exception for stats or silently ignore and
* just log a warning. While
* QuotaExceededException thrown from other
* places mean the operation failed, this one
* does not fail the operation but instead
* stores slops. Introduce a new Exception in
* client side to just monitor how mamy Async
* writes fail on exceeding Quota?
*/
logger.warn("Received QuotaExceededException after a successful " + pipeline.getOperation().getSimpleName() + " call on node " + node.getId() + ", store '" + pipelineData.getStoreName() + "', master-node '" + masterNode.getId() + "'");
} else {
handleResponseError(response, pipeline, failureDetector);
}
}
}
}
};
if (logger.isTraceEnabled())
logger.trace("Submitting " + pipeline.getOperation().getSimpleName() + " request on node " + node.getId() + " for key " + key);
NonblockingStore store = nonblockingStores.get(node.getId());
store.submitPutRequest(key, versionedCopy, transforms, callback, timeoutMs);
}
try {
boolean preferredSatisfied = false;
while (true) {
long elapsedNs = System.nanoTime() - pipelineData.getStartTimeNs();
long remainingNs = (timeoutMs * Time.NS_PER_MS) - elapsedNs;
remainingNs = Math.max(0, remainingNs);
// preferred check
if (numResponsesGot >= preferred - 1) {
preferredSatisfied = true;
}
quorumSatisfied = isQuorumSatisfied();
zonesSatisfied = isZonesSatisfied();
if (quorumSatisfied && zonesSatisfied && preferredSatisfied || remainingNs <= 0 || numNodesPendingResponse <= 0) {
pipelineData.getSynchronizer().cutoffHandling();
break;
} else {
if (logger.isTraceEnabled()) {
logger.trace("PUT {key:" + key + "} trying to poll from queue");
}
Response<ByteArray, Object> response = pipelineData.getSynchronizer().responseQueuePoll(remainingNs, TimeUnit.NANOSECONDS);
processResponse(response, pipeline);
if (logger.isTraceEnabled()) {
logger.trace("PUT {key:" + key + "} tried to poll from queue. Null?: " + (response == null) + " numResponsesGot:" + numResponsesGot + " parallelResponseToWait: " + numNodesPendingResponse + "; preferred-1: " + (preferred - 1) + "; preferredOK: " + preferredSatisfied + " quorumOK: " + quorumSatisfied + "; zoneOK: " + zonesSatisfied);
}
}
}
// leftover)
while (!pipelineData.getSynchronizer().responseQueueIsEmpty()) {
Response<ByteArray, Object> response = pipelineData.getSynchronizer().responseQueuePoll(0, TimeUnit.NANOSECONDS);
processResponse(response, pipeline);
}
quorumSatisfied = isQuorumSatisfied();
zonesSatisfied = isZonesSatisfied();
if (quorumSatisfied && zonesSatisfied) {
if (logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key + "} succeeded at parallel put stage");
}
pipelineData.getSynchronizer().disallowDelegateSlop();
pipeline.addEvent(completeEvent);
} else {
VoldemortException fatalError;
if (!quorumSatisfied) {
if (logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key + "} failed due to insufficient nodes. required=" + required + " success=" + pipelineData.getSuccesses());
}
fatalError = new InsufficientOperationalNodesException(required + " " + pipeline.getOperation().getSimpleName() + "s required, but only " + pipelineData.getSuccesses() + " succeeded", pipelineData.getReplicationSet(), pipelineData.getNodes(), pipelineData.getFailedNodes(), pipelineData.getFailures());
pipelineData.setFatalError(fatalError);
} else if (!zonesSatisfied) {
if (logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key + "} failed due to insufficient zones. required=" + pipelineData.getZonesRequired() + 1 + " success=" + pipelineData.getZoneResponses().size());
}
fatalError = new InsufficientZoneResponsesException((pipelineData.getZonesRequired() + 1) + " " + pipeline.getOperation().getSimpleName() + "s required zone, but only " + (pipelineData.getZoneResponses().size()) + " succeeded. Failing nodes : " + pipelineData.getFailedNodes());
pipelineData.setFatalError(fatalError);
}
pipeline.abort();
}
} catch (InterruptedException e) {
if (logger.isEnabledFor(Level.WARN))
logger.warn(e, e);
} catch (NoSuchElementException e) {
if (logger.isEnabledFor(Level.ERROR))
logger.error("Response Queue is empty. There may be a bug in PerformParallelPutRequest", e);
} finally {
if (logger.isDebugEnabled()) {
logger.debug("PUT {key:" + key + "} marking parallel put stage finished");
}
}
}
use of voldemort.store.slop.Slop in project voldemort by voldemort.
the class HintedHandoffSendHintTest method setUp.
@Before
public void setUp() throws Exception {
if (logger.isDebugEnabled()) {
logger.debug("Test Started: replication[" + REPLICATION_FACTOR + "], preferredW[" + P_WRITES + "], requiredW[" + R_WRITES + "]");
}
cluster = getNineNodeCluster();
storeDef = getStoreDef();
// create voldemort servers
for (Integer nodeId = 0; nodeId < NUM_NODES_TOTAL; nodeId++) {
SocketStoreFactory socketStoreFactory;
socketStoreFactory = new ClientRequestExecutorPool(2, 10000, 100000, 1024);
List<StoreDefinition> stores = new ArrayList<StoreDefinition>();
stores.add(storeDef);
VoldemortConfig config = ServerTestUtils.createServerConfigWithDefs(true, nodeId, TestUtils.createTempDir().getAbsolutePath(), cluster, stores, new Properties());
config.setNioAdminConnectorSelectors(1);
config.setNioConnectorSelectors(2);
VoldemortServer vs = ServerTestUtils.startVoldemortServer(socketStoreFactory, config);
VoldemortService vsrv = vs.getService(ServiceType.STORAGE);
StorageService ss = (StorageService) vsrv;
voldemortServers.put(nodeId, vs);
slopStorageEngines.put(nodeId, ss.getStoreRepository().getSlopStore());
slopStores.put(nodeId, SerializingStore.wrap(ss.getStoreRepository().getSlopStore(), new ByteArraySerializer(), new SlopSerializer(), new IdentitySerializer()));
// wrap original store with force fail store
Store<ByteArray, byte[], byte[]> store = ss.getStoreRepository().removeLocalStore(STORE_NAME);
UnreachableStoreException exception = new UnreachableStoreException("Force failed");
ForceFailStore<ByteArray, byte[], byte[]> forceFailStore = new ForceFailStore<ByteArray, byte[], byte[]>(store, exception);
forceFailStores.put(nodeId, forceFailStore);
ss.getStoreRepository().addLocalStore(forceFailStore);
}
strategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, cluster);
// create client socket stores and slop stores
SocketStoreClientFactoryForTest clientSocketStoreFactory = new SocketStoreClientFactoryForTest(STORE_NAME, SLOP_STORE_NAME);
Serializer<ByteArray> slopKeySerializer = new ByteArraySerializer();
Serializer<Slop> slopValueSerializer = new SlopSerializer();
Map<Integer, Store<ByteArray, byte[], byte[]>> testStores = subStores;
Map<Integer, Store<ByteArray, Slop, byte[]>> slopStores = new HashMap<Integer, Store<ByteArray, Slop, byte[]>>();
for (Node node : cluster.getNodes()) {
// test store
SocketStore socketTestStore = clientSocketStoreFactory.getSocketTestStoreByNode(node);
socketTestStores.put(node.getId(), socketTestStore);
testStores.put(node.getId(), socketTestStore);
// slop store
SocketStore socketSlopStore = clientSocketStoreFactory.getSocketSlopStoreByNode(node);
Store<ByteArray, Slop, byte[]> slopStore = SerializingStore.wrap(socketSlopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
socketSlopStores.put(node.getId(), socketSlopStore);
slopStores.put(node.getId(), slopStore);
}
// set failure detector
if (failureDetector != null)
failureDetector.destroy();
FailureDetectorConfig failureDetectorConfig = new FailureDetectorConfig();
failureDetectorConfig.setImplementationClassName(failureDetectorCls.getName());
failureDetectorConfig.setThreshold(50);
failureDetectorConfig.setCluster(cluster);
failureDetectorConfig.setConnectionVerifier(MutableStoreConnectionVerifier.create(subStores));
failureDetector = FailureDetectorUtils.create(failureDetectorConfig, false);
// make routedStore
RoutedStoreFactory factory = new RoutedStoreFactory();
routedStore = factory.create(cluster, storeDef, testStores, socketTestStores, slopStores, socketSlopStores, failureDetector, new RoutedStoreConfig().setTimeoutConfig(new TimeoutConfig(1500L, false)));
// generate the keys
for (int i = 0; i < 5; i++) {
Set<Integer> nodesCovered = Sets.newHashSet();
while (nodesCovered.size() < NUM_NODES_TOTAL) {
ByteArray randomKey = new ByteArray(TestUtils.randomBytes(KEY_LENGTH));
byte[] randomValue = TestUtils.randomBytes(VALUE_LENGTH);
if (randomKey.length() > 0 && randomValue.length > 0) {
if (!keyList.contains(randomKey)) {
for (Node node : strategy.routeRequest(randomKey.get())) {
keysToNodes.put(randomKey, node.getId());
nodesCovered.add(node.getId());
}
logger.info("Inserting key [" + randomKey + "] to key list as id:" + keyList.size());
keyList.add(randomKey);
keyValues.put(randomKey, new ByteArray(randomValue));
}
}
}
}
}
use of voldemort.store.slop.Slop in project voldemort by voldemort.
the class HintedHandoffSendHintTest method testHintedHandoff.
@Test
public void testHintedHandoff() throws Exception {
Set<Integer> failedNodeSet = chooseFailedNodeSet(NUM_NODES_FAILED);
Multimap<Integer, ByteArray> nodeToFailedKeysMap = doBatchPut(failedNodeSet);
// wait for async operations
// must be greater than socket timeout to ensure slop is registered
logger.debug("Sleep for async operations to finish");
Thread.sleep(Math.max(2000, SOCKET_TIMEOUT_MS * 2));
Map<Integer, Map<ByteArray, byte[]>> nodeToSlopData = new HashMap<Integer, Map<ByteArray, byte[]>>();
Set<ByteArray> slopKeys = makeSlopKeys(nodeToFailedKeysMap, Slop.Operation.PUT);
for (Store<ByteArray, Slop, byte[]> slopStore : slopStores.values()) {
Map<ByteArray, List<Versioned<Slop>>> getAllResult = slopStore.getAll(slopKeys, null);
for (Map.Entry<ByteArray, List<Versioned<Slop>>> entry : getAllResult.entrySet()) {
Slop slop = entry.getValue().get(0).getValue();
Integer nodeId = slop.getNodeId();
// get data
if (!nodeToSlopData.containsKey(nodeId)) {
nodeToSlopData.put(nodeId, new HashMap<ByteArray, byte[]>());
}
Map<ByteArray, byte[]> perNodeSlopMap = nodeToSlopData.get(nodeId);
perNodeSlopMap.put(slop.getKey(), slop.getValue());
if (logger.isTraceEnabled())
logger.trace(slop);
}
}
int errorCount = 0;
for (Map.Entry<Integer, ByteArray> failedKey : nodeToFailedKeysMap.entries()) {
Integer nodeId = failedKey.getKey();
ByteArray key = failedKey.getValue();
byte[] expected = keyValues.get(key).get();
Integer id = keyList.indexOf(key);
// check if map exist
Map<ByteArray, byte[]> perNodeSlopMap = nodeToSlopData.get(nodeId);
if (perNodeSlopMap == null) {
logger.error("Slop does not have key[" + key + "][id:" + id + "]");
errorCount++;
continue;
}
byte[] actual = perNodeSlopMap.get(key);
if (actual == null) {
logger.error("Slop does not have key[" + key + "][nodeId:" + nodeId + "]");
errorCount++;
} else if (ByteUtils.compare(actual, expected) != 0) {
logger.error("Slop key[" + key + "][nodeId:" + nodeId + "] does not have the correct value");
errorCount++;
} else {
logger.debug("Slop has key[" + key + "][nodeId:" + nodeId + "] with value[" + actual + "]");
}
}
assertTrue(errorCount + " Slop(s) incorrect. See log for more info", errorCount == 0);
}
Aggregations