use of voldemort.store.routed.NodeValue in project voldemort by voldemort.
the class AbstractReadRepair method execute.
public void execute(Pipeline pipeline) {
insertNodeValues();
long startTimeNs = -1;
if (logger.isDebugEnabled())
startTimeNs = System.nanoTime();
if (nodeValues.size() > 1 && preferred > 1) {
List<NodeValue<ByteArray, byte[]>> toReadRepair = Lists.newArrayList();
/*
* We clone after computing read repairs in the assumption that the
* output will be smaller than the input. Note that we clone the
* version, but not the key or value as the latter two are not
* mutated.
*/
for (NodeValue<ByteArray, byte[]> v : readRepairer.getRepairs(nodeValues)) {
Versioned<byte[]> versioned = Versioned.value(v.getVersioned().getValue(), ((VectorClock) v.getVersion()).clone());
toReadRepair.add(new NodeValue<ByteArray, byte[]>(v.getNodeId(), v.getKey(), versioned));
}
for (NodeValue<ByteArray, byte[]> v : toReadRepair) {
try {
if (logger.isDebugEnabled())
logger.debug("Doing read repair on node " + v.getNodeId() + " for key '" + ByteUtils.toHexString(v.getKey().get()) + "' with version " + v.getVersion() + ".");
NonblockingStore store = nonblockingStores.get(v.getNodeId());
store.submitPutRequest(v.getKey(), v.getVersioned(), null, null, timeoutMs);
} catch (VoldemortApplicationException e) {
if (logger.isDebugEnabled())
logger.debug("Read repair cancelled due to application level exception on node " + v.getNodeId() + " for key '" + ByteUtils.toHexString(v.getKey().get()) + "' with version " + v.getVersion() + ": " + e.getMessage());
} catch (Exception e) {
logger.debug("Read repair failed: ", e);
}
}
if (logger.isDebugEnabled()) {
String logStr = "Repaired (node, key, version): (";
for (NodeValue<ByteArray, byte[]> v : toReadRepair) {
logStr += "(" + v.getNodeId() + ", " + v.getKey() + "," + v.getVersion() + ") ";
}
logStr += "in " + (System.nanoTime() - startTimeNs) + " ns";
logger.debug(logStr);
}
}
pipeline.addEvent(completeEvent);
}
use of voldemort.store.routed.NodeValue in project voldemort by voldemort.
the class SlopPusherDeadSlopTest method testAutoPurge.
@Test
public void testAutoPurge() {
try {
// generate slops for a non existent node 2.
List<Versioned<Slop>> deadNodeSlops = ServerTestUtils.createRandomSlops(2, 40, false, "test");
// generate slops for a non existent store "deleted_store"
List<Versioned<Slop>> deadStoreSlops = ServerTestUtils.createRandomSlops(0, 40, false, "deleted_store");
// generate some valid slops and make sure they go into the
// destination store
List<Versioned<Slop>> validStoreSlops = ServerTestUtils.createRandomSlops(1, 40, false, "test");
List<Versioned<Slop>> slops = new ArrayList<Versioned<Slop>>();
slops.addAll(deadStoreSlops);
slops.addAll(deadNodeSlops);
slops.addAll(validStoreSlops);
SlopSerializer slopSerializer = new SlopSerializer();
// Populate the store with the slops
for (Versioned<Slop> slop : slops) {
VectorClock clock = TestUtils.getClock(1);
NodeValue<ByteArray, byte[]> nodeValue = new NodeValue<ByteArray, byte[]>(0, slop.getValue().makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop.getValue()), clock));
adminClient.storeOps.putNodeKeyValue("slop", nodeValue);
}
// wait for twice the slop interval (in case a slop push was
// underway as we populated)
Thread.sleep(SLOP_FREQUENCY_MS * 2);
// Confirm the dead slops are all gone now..
for (List<Versioned<Slop>> deadSlops : Arrays.asList(deadStoreSlops, deadNodeSlops)) {
for (Versioned<Slop> slop : deadSlops) {
List<Versioned<byte[]>> slopEntry = adminClient.storeOps.getNodeKey("slop", 0, slop.getValue().makeKey());
assertEquals("Slop should be purged", 0, slopEntry.size());
}
}
StoreDefinitionsMapper mapper = new StoreDefinitionsMapper();
List<StoreDefinition> storeDefs = mapper.readStoreList(new StringReader(VoldemortTestConstants.getSingleStoreDefinitionsXml()));
BaseStoreRoutingPlan rPlan = new BaseStoreRoutingPlan(adminClient.getAdminClientCluster(), StoreDefinitionUtils.getStoreDefinitionWithName(storeDefs, "test"));
// Confirm the valid ones made it
for (Versioned<Slop> slop : validStoreSlops) {
ByteArray key = slop.getValue().getKey();
if (rPlan.getReplicationNodeList(key.get()).contains(1)) {
List<Versioned<byte[]>> slopEntry = adminClient.storeOps.getNodeKey("test", 1, key);
if (slop.getValue().getOperation() == Operation.DELETE) {
assertTrue("Delete Slop should have not reached destination", slopEntry.size() == 0);
} else {
assertTrue("Put Slop should have reached destination", slopEntry.size() > 0);
}
}
}
} catch (Exception e) {
logger.error("Test failed with", e);
fail("unexpected exception");
}
}
use of voldemort.store.routed.NodeValue in project voldemort by voldemort.
the class ExceededQuotaSlopTest method setGetPutQuotasForEachServer.
public void setGetPutQuotasForEachServer() throws Exception {
Properties adminProperties = new Properties();
adminProperties.setProperty("max_connections", "2");
adminClient = new AdminClient(cluster, new AdminClientConfig().setMaxConnectionsPerNode(2));
Map<Pair<Integer, QuotaType>, Integer> throughPutMap = new HashMap<Pair<Integer, QuotaType>, Integer>();
// Set Node0 Quota
throughPutMap.put(new Pair<Integer, QuotaType>(0, QuotaType.PUT_THROUGHPUT), 5);
throughPutMap.put(new Pair<Integer, QuotaType>(0, QuotaType.GET_THROUGHPUT), 20);
// Set Node1 Quota
throughPutMap.put(new Pair<Integer, QuotaType>(1, QuotaType.PUT_THROUGHPUT), 2);
throughPutMap.put(new Pair<Integer, QuotaType>(1, QuotaType.GET_THROUGHPUT), 20);
for (Entry<Pair<Integer, QuotaType>, Integer> throughPut : throughPutMap.entrySet()) {
int nodeId = throughPut.getKey().getFirst();
QuotaType type = throughPut.getKey().getSecond();
int value = throughPut.getValue();
VectorClock clock = VectorClockUtils.makeClockWithCurrentTime(cluster.getNodeIds());
NodeValue<ByteArray, byte[]> operationValue = new NodeValue<ByteArray, byte[]>(nodeId, new ByteArray(getKeyBytes(type)), new Versioned<byte[]>(ByteUtils.getBytes(Integer.toString(value), encodingType), clock));
try {
adminClient.storeOps.putNodeKeyValue(quotaStoreName, operationValue);
} catch (Exception e) {
throw new Exception("Exception when setting put quota for node " + nodeId + " Operation " + type + "." + e.getMessage());
}
}
}
use of voldemort.store.routed.NodeValue in project voldemort by voldemort.
the class AbstractConsistencyFixer method doConsistencyFix.
public Status doConsistencyFix() {
// Initialization.
byte[] keyInBytes;
List<Integer> nodeIdList = null;
int masterPartitionId = -1;
try {
keyInBytes = ByteUtils.fromHexString(badKey.getKeyInHexFormat());
masterPartitionId = this.storeInstance.getMasterPartitionId(keyInBytes);
nodeIdList = this.storeInstance.getReplicationNodeList(masterPartitionId);
} catch (Exception exception) {
logger.info("Aborting fixKey due to bad init.");
if (logger.isDebugEnabled()) {
exception.printStackTrace();
}
return Status.BAD_INIT;
}
ByteArray keyAsByteArray = new ByteArray(keyInBytes);
// Do the reads
Map<Integer, QueryKeyResult> nodeIdToKeyValues = doReads(nodeIdList, keyInBytes, badKey.getKeyInHexFormat());
// Process read replies (i.e., nodeIdToKeyValues)
ProcessReadRepliesResult result = processReadReplies(nodeIdList, keyAsByteArray, badKey.getKeyInHexFormat(), nodeIdToKeyValues);
if (result.status != Status.SUCCESS) {
return result.status;
}
// Resolve conflicts indicated in nodeValues
List<NodeValue<ByteArray, byte[]>> toReadRepair = resolveReadConflicts(result.nodeValues);
if (logger.isTraceEnabled()) {
if (toReadRepair.size() == 0) {
logger.trace("Nothing to repair");
}
for (NodeValue<ByteArray, byte[]> nodeValue : toReadRepair) {
logger.trace(nodeValue.getNodeId() + " --- " + nodeValue.getKey().toString());
}
}
// Do the repairs
Status status = doRepairPut(toReadRepair);
// return status of last operation (success or otherwise)
return status;
}
use of voldemort.store.routed.NodeValue in project voldemort by voldemort.
the class AbstractConsistencyFixer method processReadReplies.
/**
* @param nodeIdList
* @param keyAsByteArray
* @param keyInHexFormat
* @param nodeIdToKeyValues
* @param nodeValues Effectively the output of this method. Must pass in a
* non-null object to be populated by this method.
* @return
*/
private ProcessReadRepliesResult processReadReplies(final List<Integer> nodeIdList, final ByteArray keyAsByteArray, final String keyInHexFormat, final Map<Integer, QueryKeyResult> nodeIdToKeyValues) {
List<NodeValue<ByteArray, byte[]>> nodeValues = new ArrayList<NodeValue<ByteArray, byte[]>>();
boolean exceptionsEncountered = false;
for (int nodeId : nodeIdList) {
QueryKeyResult keyValue;
if (nodeIdToKeyValues.containsKey(nodeId)) {
keyValue = nodeIdToKeyValues.get(nodeId);
if (keyValue.hasException()) {
logger.debug("Exception encountered while fetching key " + keyInHexFormat + " from node with nodeId " + nodeId + " : " + keyValue.getException().getMessage());
exceptionsEncountered = true;
} else {
if (keyValue.getValues().isEmpty()) {
Versioned<byte[]> versioned = new Versioned<byte[]>(null);
nodeValues.add(new NodeValue<ByteArray, byte[]>(nodeId, keyValue.getKey(), versioned));
} else {
for (Versioned<byte[]> value : keyValue.getValues()) {
nodeValues.add(new NodeValue<ByteArray, byte[]>(nodeId, keyValue.getKey(), value));
}
}
}
} else {
logger.debug("No key-value returned from node with id:" + nodeId);
Versioned<byte[]> versioned = new Versioned<byte[]>(null);
nodeValues.add(new NodeValue<ByteArray, byte[]>(nodeId, keyAsByteArray, versioned));
}
}
if (exceptionsEncountered) {
logger.info("Aborting fixKey because exceptions were encountered when fetching key-values.");
return new ProcessReadRepliesResult(Status.FETCH_EXCEPTION);
}
if (logger.isDebugEnabled()) {
for (NodeValue<ByteArray, byte[]> nkv : nodeValues) {
logger.debug("\tRead NodeKeyValue : " + ByteUtils.toHexString(nkv.getKey().get()) + " on node with id " + nkv.getNodeId() + " for version " + nkv.getVersion());
}
}
return new ProcessReadRepliesResult(nodeValues);
}
Aggregations