use of voldemort.versioning.VectorClock in project voldemort by voldemort.
the class ConsistencyCheckTest method testDetermineConsistencyVectorClock.
@Test
public void testDetermineConsistencyVectorClock() {
Map<ConsistencyCheck.Value, Set<ConsistencyCheck.ClusterNode>> versionNodeSetMap = new HashMap<ConsistencyCheck.Value, Set<ConsistencyCheck.ClusterNode>>();
int replicationFactor = 4;
// Version is vector clock
VectorClock vc1 = new VectorClock();
vc1.incrementVersion(1, 100000001);
vc1.incrementVersion(2, 100000003);
VectorClock vc2 = new VectorClock();
vc2.incrementVersion(1, 100000001);
vc2.incrementVersion(3, 100000002);
VectorClock vc3 = new VectorClock();
vc3.incrementVersion(1, 100000001);
vc3.incrementVersion(4, 100000001);
ConsistencyCheck.Value v1 = new ConsistencyCheck.VersionValue(new Versioned<byte[]>(value1, vc1));
ConsistencyCheck.Value v2 = new ConsistencyCheck.VersionValue(new Versioned<byte[]>(value2, vc2));
ConsistencyCheck.Value v3 = new ConsistencyCheck.VersionValue(new Versioned<byte[]>(value3, vc3));
// FULL: simple
versionNodeSetMap.put(v1, setFourNodes);
assertEquals(ConsistencyCheck.ConsistencyLevel.FULL, ConsistencyCheck.determineConsistency(versionNodeSetMap, replicationFactor));
// FULL: three versions
versionNodeSetMap.clear();
versionNodeSetMap.put(v1, setFourNodes);
versionNodeSetMap.put(v2, setFourNodes);
versionNodeSetMap.put(v3, setFourNodes);
assertEquals(ConsistencyCheck.ConsistencyLevel.FULL, ConsistencyCheck.determineConsistency(versionNodeSetMap, replicationFactor));
// LATEST_CONSISTENCY: two versions
versionNodeSetMap.clear();
versionNodeSetMap.put(v1, setFourNodes);
versionNodeSetMap.put(v2, setThreeNodes);
assertEquals(ConsistencyCheck.ConsistencyLevel.LATEST_CONSISTENT, ConsistencyCheck.determineConsistency(versionNodeSetMap, replicationFactor));
// INCONSISTENT: one version
versionNodeSetMap.clear();
versionNodeSetMap.put(v1, setThreeNodes);
assertEquals(ConsistencyCheck.ConsistencyLevel.INCONSISTENT, ConsistencyCheck.determineConsistency(versionNodeSetMap, replicationFactor));
// INCONSISTENT: non-latest consistent
versionNodeSetMap.clear();
versionNodeSetMap.put(v1, setThreeNodes);
versionNodeSetMap.put(v2, setFourNodes);
assertEquals(ConsistencyCheck.ConsistencyLevel.INCONSISTENT, ConsistencyCheck.determineConsistency(versionNodeSetMap, replicationFactor));
// INCONSISTENT: three versions
versionNodeSetMap.clear();
versionNodeSetMap.put(v1, setThreeNodes);
versionNodeSetMap.put(v2, setFourNodes);
versionNodeSetMap.put(v3, setThreeNodes);
assertEquals(ConsistencyCheck.ConsistencyLevel.INCONSISTENT, ConsistencyCheck.determineConsistency(versionNodeSetMap, replicationFactor));
}
use of voldemort.versioning.VectorClock in project voldemort by voldemort.
the class RoutedStoreTest method testBasicOperationFailure.
private void testBasicOperationFailure(int reads, int writes, int failures, int threads, RoutedStore customRoutedStore) throws Exception {
VectorClock clock = getClock(1);
Versioned<byte[]> versioned = new Versioned<byte[]>(aValue, clock);
RoutedStore routedStore = null;
if (customRoutedStore == null) {
routedStore = getStore(cluster, reads, writes, threads, failures, 0, RoutingStrategyType.TO_ALL_STRATEGY, new UnreachableStoreException("no go"));
} else {
routedStore = customRoutedStore;
}
try {
routedStore.put(aKey, versioned, aTransform);
fail("Put succeeded with too few operational nodes.");
} catch (InsufficientOperationalNodesException e) {
// expected
}
try {
routedStore.get(aKey, aTransform);
fail("Get succeeded with too few operational nodes.");
} catch (InsufficientOperationalNodesException e) {
// expected
}
try {
routedStore.delete(aKey, versioned.getVersion());
fail("Get succeeded with too few operational nodes.");
} catch (InsufficientOperationalNodesException e) {
// expected
}
}
use of voldemort.versioning.VectorClock in project voldemort by voldemort.
the class VoldemortAdminTool method executeQueryKey.
private static void executeQueryKey(final Integer nodeId, AdminClient adminClient, List<String> storeNames, String keyString, String keyFormat) throws IOException {
// decide queryingNode(s) for Key
List<Integer> queryingNodes = new ArrayList<Integer>();
if (nodeId < 0) {
// means all nodes
for (Node node : adminClient.getAdminClientCluster().getNodes()) {
queryingNodes.add(node.getId());
}
} else {
queryingNodes.add(nodeId);
}
// get basic info
List<StoreDefinition> storeDefinitionList = getStoreDefinitions(adminClient, nodeId);
Map<String, StoreDefinition> storeDefinitions = new HashMap<String, StoreDefinition>();
for (StoreDefinition storeDef : storeDefinitionList) {
storeDefinitions.put(storeDef.getName(), storeDef);
}
BufferedWriter out = new BufferedWriter(new OutputStreamWriter(System.out));
// iterate through stores
for (final String storeName : storeNames) {
// store definition
StoreDefinition storeDefinition = storeDefinitions.get(storeName);
if (storeDefinition == null) {
throw new StoreNotFoundException("Store " + storeName + " not found");
}
out.write("STORE_NAME: " + storeDefinition.getName() + "\n");
// k-v serializer
final SerializerDefinition keySerializerDef = storeDefinition.getKeySerializer();
final SerializerDefinition valueSerializerDef = storeDefinition.getValueSerializer();
SerializerFactory serializerFactory = new DefaultSerializerFactory();
@SuppressWarnings("unchecked") final Serializer<Object> keySerializer = (Serializer<Object>) serializerFactory.getSerializer(keySerializerDef);
@SuppressWarnings("unchecked") final Serializer<Object> valueSerializer = (Serializer<Object>) serializerFactory.getSerializer(valueSerializerDef);
// compression strategy
final CompressionStrategy keyCompressionStrategy;
final CompressionStrategy valueCompressionStrategy;
if (keySerializerDef != null && keySerializerDef.hasCompression()) {
keyCompressionStrategy = new CompressionStrategyFactory().get(keySerializerDef.getCompression());
} else {
keyCompressionStrategy = null;
}
if (valueSerializerDef != null && valueSerializerDef.hasCompression()) {
valueCompressionStrategy = new CompressionStrategyFactory().get(valueSerializerDef.getCompression());
} else {
valueCompressionStrategy = null;
}
if (keyCompressionStrategy == null) {
out.write("KEY_COMPRESSION_STRATEGY: None\n");
} else {
out.write("KEY_COMPRESSION_STRATEGY: " + keyCompressionStrategy.getType() + "\n");
}
out.write("KEY_SERIALIZER_NAME: " + keySerializerDef.getName() + "\n");
for (Map.Entry<Integer, String> entry : keySerializerDef.getAllSchemaInfoVersions().entrySet()) {
out.write(String.format("KEY_SCHEMA VERSION=%d\n", entry.getKey()));
out.write("====================================\n");
out.write(entry.getValue());
out.write("\n====================================\n");
}
out.write("\n");
if (valueCompressionStrategy == null) {
out.write("VALUE_COMPRESSION_STRATEGY: None\n");
} else {
out.write("VALUE_COMPRESSION_STRATEGY: " + valueCompressionStrategy.getType() + "\n");
}
out.write("VALUE_SERIALIZER_NAME: " + valueSerializerDef.getName() + "\n");
for (Map.Entry<Integer, String> entry : valueSerializerDef.getAllSchemaInfoVersions().entrySet()) {
out.write(String.format("VALUE_SCHEMA %d\n", entry.getKey()));
out.write("====================================\n");
out.write(entry.getValue());
out.write("\n====================================\n");
}
out.write("\n");
// although the streamingOps support multiple keys, we only query
// one key here
ByteArray key;
try {
if (keyFormat.equals("readable")) {
Object keyObject;
String keySerializerName = keySerializerDef.getName();
if (isAvroSchema(keySerializerName)) {
Schema keySchema = Schema.parse(keySerializerDef.getCurrentSchemaInfo());
JsonDecoder decoder = new JsonDecoder(keySchema, keyString);
GenericDatumReader<Object> datumReader = new GenericDatumReader<Object>(keySchema);
keyObject = datumReader.read(null, decoder);
} else if (keySerializerName.equals(DefaultSerializerFactory.JSON_SERIALIZER_TYPE_NAME)) {
JsonReader jsonReader = new JsonReader(new StringReader(keyString));
keyObject = jsonReader.read();
} else {
keyObject = keyString;
}
key = new ByteArray(keySerializer.toBytes(keyObject));
} else {
key = new ByteArray(ByteUtils.fromHexString(keyString));
}
} catch (SerializationException se) {
System.err.println("Error serializing key " + keyString);
System.err.println("If this is a JSON key, you need to include escaped quotation marks in the command line if it is a string");
se.printStackTrace();
return;
} catch (DecoderException de) {
System.err.println("Error decoding key " + keyString);
de.printStackTrace();
return;
} catch (IOException io) {
System.err.println("Error parsing avro string " + keyString);
io.printStackTrace();
return;
}
boolean printedKey = false;
// A Map<> could have been used instead of List<Entry<>> if
// Versioned supported correct hash codes. Read the comment in
// Versioned about the issue
List<Entry<List<Versioned<byte[]>>, List<Integer>>> nodeValues = new ArrayList<Entry<List<Versioned<byte[]>>, List<Integer>>>();
for (final Integer queryNodeId : queryingNodes) {
Iterator<QueryKeyResult> iterator;
iterator = adminClient.streamingOps.queryKeys(queryNodeId, storeName, Arrays.asList(key).iterator());
final StringWriter stringWriter = new StringWriter();
QueryKeyResult queryKeyResult = iterator.next();
if (!printedKey) {
// de-serialize and write key
byte[] keyBytes = queryKeyResult.getKey().get();
Object keyObject = keySerializer.toObject((null == keyCompressionStrategy) ? keyBytes : keyCompressionStrategy.inflate(keyBytes));
writeVoldKeyOrValueInternal(keyBytes, keySerializer, keyCompressionStrategy, "KEY", out);
printedKey = true;
}
// iterate through, de-serialize and write values
if (queryKeyResult.hasValues() && queryKeyResult.getValues().size() > 0) {
int elementId = -1;
for (int i = 0; i < nodeValues.size(); i++) {
if (Objects.equal(nodeValues.get(i).getKey(), queryKeyResult.getValues())) {
elementId = i;
break;
}
}
if (elementId == -1) {
ArrayList<Integer> nodes = new ArrayList<Integer>();
nodes.add(queryNodeId);
nodeValues.add(new AbstractMap.SimpleEntry<List<Versioned<byte[]>>, List<Integer>>(queryKeyResult.getValues(), nodes));
} else {
nodeValues.get(elementId).getValue().add(queryNodeId);
}
out.write(String.format("\nQueried node %d on store %s\n", queryNodeId, storeName));
int versionCount = 0;
if (queryKeyResult.getValues().size() > 1) {
out.write("VALUE " + versionCount + "\n");
}
for (Versioned<byte[]> versioned : queryKeyResult.getValues()) {
// write version
VectorClock version = (VectorClock) versioned.getVersion();
out.write("VECTOR_CLOCK_BYTE: " + ByteUtils.toHexString(version.toBytes()) + "\n");
out.write("VECTOR_CLOCK_TEXT: " + version.toString() + '[' + new Date(version.getTimestamp()).toString() + "]\n");
// write value
byte[] valueBytes = versioned.getValue();
writeVoldKeyOrValueInternal(valueBytes, valueSerializer, valueCompressionStrategy, "VALUE", out);
versionCount++;
}
} else // exception.
if (queryKeyResult.hasException()) {
boolean isInvalidMetadataException = queryKeyResult.getException() instanceof InvalidMetadataException;
// you are querying only a single node.
if (!isInvalidMetadataException || queryingNodes.size() == 1) {
out.write(String.format("\nNode %d on store %s returned exception\n", queryNodeId, storeName));
out.write(queryKeyResult.getException().toString());
out.write("\n====================================\n");
}
} else {
if (queryingNodes.size() == 1) {
out.write(String.format("\nNode %d on store %s returned NULL\n", queryNodeId, storeName));
out.write("\n====================================\n");
}
}
out.flush();
}
out.write("\n====================================\n");
for (Map.Entry<List<Versioned<byte[]>>, List<Integer>> nodeValue : nodeValues) {
out.write("Nodes with same Value " + Arrays.toString(nodeValue.getValue().toArray()));
out.write("\n====================================\n");
}
if (nodeValues.size() > 1) {
out.write("\n*** Multiple (" + nodeValues.size() + ") versions of key/value exist for the key ***\n");
}
out.flush();
}
}
use of voldemort.versioning.VectorClock in project voldemort by voldemort.
the class R2Store method parseGetResponse.
private List<Versioned<byte[]>> parseGetResponse(ByteString entity) {
List<Versioned<byte[]>> results = new ArrayList<Versioned<byte[]>>();
try {
// Build the multipart object
byte[] bytes = new byte[entity.length()];
entity.copyBytes(bytes, 0);
ByteArrayDataSource ds = new ByteArrayDataSource(bytes, "multipart/mixed");
MimeMultipart mp = new MimeMultipart(ds);
for (int i = 0; i < mp.getCount(); i++) {
MimeBodyPart part = (MimeBodyPart) mp.getBodyPart(i);
String serializedVC = part.getHeader(RestMessageHeaders.X_VOLD_VECTOR_CLOCK)[0];
int contentLength = Integer.parseInt(part.getHeader(RestMessageHeaders.CONTENT_LENGTH)[0]);
if (logger.isDebugEnabled()) {
logger.debug("Received VC : " + serializedVC);
}
VectorClockWrapper vcWrapper = mapper.readValue(serializedVC, VectorClockWrapper.class);
InputStream input = part.getInputStream();
byte[] bodyPartBytes = new byte[contentLength];
input.read(bodyPartBytes);
VectorClock clock = new VectorClock(vcWrapper.getVersions(), vcWrapper.getTimestamp());
results.add(new Versioned<byte[]>(bodyPartBytes, clock));
}
} catch (MessagingException e) {
throw new VoldemortException("Messaging exception while trying to parse GET response " + e.getMessage(), e);
} catch (JsonParseException e) {
throw new VoldemortException("JSON parsing exception while trying to parse GET response " + e.getMessage(), e);
} catch (JsonMappingException e) {
throw new VoldemortException("JSON mapping exception while trying to parse GET response " + e.getMessage(), e);
} catch (IOException e) {
throw new VoldemortException("IO exception while trying to parse GET response " + e.getMessage(), e);
}
return results;
}
use of voldemort.versioning.VectorClock in project voldemort by voldemort.
the class R2Store method delete.
@Override
public boolean delete(ByteArray key, Version version) throws VoldemortException {
try {
// Create the REST request with this byte array
String base64Key = RestUtils.encodeVoldemortKey(key.get());
RestRequestBuilder rb = new RestRequestBuilder(new URI(this.restBootstrapURL + "/" + getName() + "/" + base64Key));
// Create a HTTP POST request
rb.setMethod(DELETE);
rb.setHeader(CONTENT_LENGTH, "0");
String timeoutStr = Long.toString(this.config.getTimeoutConfig().getOperationTimeout(VoldemortOpCode.DELETE_OP_CODE));
rb.setHeader(RestMessageHeaders.X_VOLD_REQUEST_TIMEOUT_MS, timeoutStr);
rb.setHeader(RestMessageHeaders.X_VOLD_REQUEST_ORIGIN_TIME_MS, String.valueOf(System.currentTimeMillis()));
if (this.routingTypeCode != null) {
rb.setHeader(RestMessageHeaders.X_VOLD_ROUTING_TYPE_CODE, this.routingTypeCode);
}
if (this.zoneId != INVALID_ZONE_ID) {
rb.setHeader(RestMessageHeaders.X_VOLD_ZONE_ID, String.valueOf(this.zoneId));
}
// Serialize the Vector clock
VectorClock vc = (VectorClock) version;
// doing the put.
if (vc != null && vc.getEntries().size() != 0) {
String serializedVC = null;
if (!vc.getEntries().isEmpty()) {
serializedVC = RestUtils.getSerializedVectorClock(vc);
}
if (serializedVC != null && serializedVC.length() > 0) {
rb.setHeader(RestMessageHeaders.X_VOLD_VECTOR_CLOCK, serializedVC);
}
}
RestRequest request = rb.build();
Future<RestResponse> f = client.restRequest(request);
// This will block
RestResponse response = f.get();
final ByteString entity = response.getEntity();
if (entity == null) {
if (logger.isDebugEnabled()) {
logger.debug("Empty response !");
}
}
} catch (ExecutionException e) {
if (e.getCause() instanceof RestException) {
RestException exception = (RestException) e.getCause();
if (logger.isDebugEnabled()) {
logger.debug("REST EXCEPTION STATUS : " + exception.getResponse().getStatus());
}
if (exception.getResponse().getStatus() == NOT_FOUND.getCode()) {
return false;
}
} else {
throw new VoldemortException("Unknown HTTP request execution exception: " + e.getMessage(), e);
}
} catch (InterruptedException e) {
if (logger.isDebugEnabled()) {
logger.debug("Operation interrupted : " + e.getMessage());
}
throw new VoldemortException("Operation Interrupted: " + e.getMessage(), e);
} catch (URISyntaxException e) {
throw new VoldemortException("Illegal HTTP URL" + e.getMessage(), e);
}
return true;
}
Aggregations