use of voldemort.versioning.Versioned in project voldemort by voldemort.
the class AbstractRebalanceTest method checkForTupleEquivalence.
/**
* REFACTOR: these should belong AdminClient so existence checks can be done
* easily across the board
*
* @param admin
* @param serverId
* @param store
* @param keyList
*/
protected void checkForTupleEquivalence(AdminClient admin, int serverId, String store, List<ByteArray> keyList, HashMap<String, String> baselineTuples, HashMap<String, VectorClock> baselineVersions) {
// do the positive tests
Iterator<QueryKeyResult> positiveTestResultsItr = admin.streamingOps.queryKeys(serverId, store, keyList.iterator());
while (positiveTestResultsItr.hasNext()) {
QueryKeyResult item = positiveTestResultsItr.next();
ByteArray key = item.getKey();
List<Versioned<byte[]>> vals = item.getValues();
Exception e = item.getException();
assertEquals("Error fetching key " + key, null, e);
assertEquals("Value not found for key " + key, true, vals != null & vals.size() != 0);
String keyStr = ByteUtils.getString(key.get(), "UTF-8");
if (baselineTuples != null)
assertEquals("Value does not match up ", baselineTuples.get(keyStr), ByteUtils.getString(vals.get(0).getValue(), "UTF-8"));
if (baselineVersions != null)
assertEquals("Version does not match up", baselineVersions.get(keyStr), vals.get(0).getVersion());
}
}
use of voldemort.versioning.Versioned in project voldemort by voldemort.
the class ZoneShrinkageEndToEndTest method testAllServersSendingOutSlopsCorrectly.
@Test(timeout = 60000)
public void testAllServersSendingOutSlopsCorrectly() throws InterruptedException {
final Serializer<ByteArray> slopKeySerializer = new ByteArraySerializer();
final Serializer<Slop> slopValueSerializer = new SlopSerializer();
final SlopSerializer slopSerializer = new SlopSerializer();
StoreDefinition storeDef = storeDefs.get(0);
TestSocketStoreFactory ssf = new TestSocketStoreFactory();
Map<Integer, SocketStore> slopStoresCreatedBeforeShrink = new HashMap<Integer, SocketStore>();
Map<Integer, SocketStore> slopStoresCreatedAfterShrink = new HashMap<Integer, SocketStore>();
// generate for keys each all servers that will be hosted on each server
// except itself (2*N*(N-1) keys)
// Map<Integer slopFinalDestinationNodeId, List<Pair<ByteArray key,
// Integer hostNodeId>>>
Map<Integer, List<Pair<ByteArray, Integer>>> serverKeys = new HashMap<Integer, List<Pair<ByteArray, Integer>>>();
for (Node slopFinalDestinationNode : cluster.getNodes()) {
serverKeys.put(slopFinalDestinationNode.getId(), new ArrayList<Pair<ByteArray, Integer>>());
}
// make socket stores to all servers before shrink
for (Integer nodeId : vservers.keySet()) {
SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
slopStoresCreatedBeforeShrink.put(nodeId, slopStore);
}
for (int i = 0; i < 2; i++) {
for (Integer slopHostId : vservers.keySet()) {
SocketStore slopStore = slopStoresCreatedBeforeShrink.get(slopHostId);
for (Integer destinationNodeId : vservers.keySet()) {
if (!destinationNodeId.equals(slopHostId)) {
ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
}
}
}
}
// update metadata
executeShrinkZone();
logger.info("-------------------------------");
logger.info(" CONNECTING SLOP STORES ");
logger.info("-------------------------------");
// make socket stores to all servers after shrink
for (Integer nodeId : vservers.keySet()) {
SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
slopStoresCreatedAfterShrink.put(nodeId, slopStore);
}
logger.info("-------------------------------");
logger.info(" CONNECTED SLOP STORES ");
logger.info("-------------------------------");
logger.info("-------------------------------");
logger.info(" SENDING SLOPS ");
logger.info("-------------------------------");
for (int i = 0; i < 2; i++) {
for (Integer slopHostId : vservers.keySet()) {
SocketStore slopStore = slopStoresCreatedAfterShrink.get(slopHostId);
for (Integer destinationNodeId : vservers.keySet()) {
if (!destinationNodeId.equals(slopHostId)) {
ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
}
}
}
}
logger.info("-------------------------------");
logger.info(" SENT SLOPS ");
logger.info("-------------------------------");
ServerTestUtils.waitForSlopDrain(vservers, 30000L);
// verify all proper slops is processed properly (arrived or dropped)
boolean hasError = false;
int goodCount = 0;
int errorCount = 0;
for (Integer nodeId : serverKeys.keySet()) {
VoldemortServer vs = vservers.get(nodeId);
Store<ByteArray, byte[], byte[]> store = vs.getStoreRepository().getStorageEngine(storeDef.getName());
List<Pair<ByteArray, Integer>> keySet = serverKeys.get(nodeId);
for (Pair<ByteArray, Integer> keyHostIdPair : keySet) {
ByteArray key = keyHostIdPair.getFirst();
Integer hostId = keyHostIdPair.getSecond();
Integer nodeZoneId = cluster.getNodeById(nodeId).getZoneId();
List<Versioned<byte[]>> result = store.get(key, null);
if (cluster.getNodeById(nodeId).getZoneId() == droppingZoneId) {
if (!result.isEmpty()) {
logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should be gone but exists\n", key.toString(), nodeId, nodeZoneId, hostId));
hasError = true;
errorCount++;
} else {
goodCount++;
}
} else {
if (result.isEmpty()) {
logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should exist but not\n", key.toString(), nodeId, nodeZoneId, hostId));
hasError = true;
errorCount++;
} else {
goodCount++;
}
}
}
}
logger.info(String.format("Good keys count: %d; Error keys count: %d", goodCount, errorCount));
Assert.assertFalse("Error Occurred BAD:" + errorCount + "; GOOD: " + goodCount + ". Check log.", hasError);
}
use of voldemort.versioning.Versioned in project voldemort by voldemort.
the class DynamicTimeoutStoreClientTest method test.
/**
* Test the dynamic per call timeout. We do a regular put with the default
* configured timeout. We then do a put with a dynamic timeout of 200 ms
* which is less than the delay at the server side. After this we do a get
* with a dynamic timeout of 1500 ms which should succeed and return the
* value from the first put.
*/
@Test
public void test() {
long incorrectTimeout = 200;
long correctTimeout = 1500;
String key = "a";
String value = "First";
String newValue = "Second";
try {
this.dynamicTimeoutClient.put(new ByteArray(key.getBytes()), value.getBytes());
} catch (Exception e) {
fail("Error in regular put.");
}
long startTime = System.currentTimeMillis();
try {
this.dynamicTimeoutClient.putWithCustomTimeout(new CompositePutVoldemortRequest<ByteArray, byte[]>(new ByteArray(key.getBytes()), newValue.getBytes(), incorrectTimeout));
fail("Should not reach this point. The small (incorrect) timeout did not work.");
} catch (InsufficientOperationalNodesException ion) {
System.out.println("This failed as Expected.");
}
try {
List<Versioned<byte[]>> versionedValues = this.dynamicTimeoutClient.getWithCustomTimeout(new CompositeGetVoldemortRequest<ByteArray, byte[]>(new ByteArray(key.getBytes()), correctTimeout, true));
// We only expect one value in the response since resolve conflicts
// is set to true
assertTrue(versionedValues.size() == 1);
Versioned<byte[]> versionedValue = versionedValues.get(0);
long endTime = System.currentTimeMillis();
System.out.println("Total time taken = " + (endTime - startTime));
String response = new String(versionedValue.getValue());
if (!response.equals(value)) {
fail("The returned value does not match. Expected: " + value + " but Received: " + response);
}
} catch (Exception e) {
e.printStackTrace();
fail("The dynamic per call timeout did not work !");
}
}
use of voldemort.versioning.Versioned in project voldemort by voldemort.
the class QueryKeyResultTest method testStandardCtor.
@Test
public void testStandardCtor() {
ByteArray key = new ByteArray("key".getBytes());
List<Versioned<byte[]>> values = new ArrayList<Versioned<byte[]>>(0);
Versioned<byte[]> value1 = TestUtils.getVersioned(TestUtils.randomBytes(10), 1, 1, 1);
values.add(value1);
Versioned<byte[]> value2 = TestUtils.getVersioned(TestUtils.randomBytes(10), 1, 1, 2);
values.add(value2);
QueryKeyResult queryKeyResult = new QueryKeyResult(key, values);
assertTrue(queryKeyResult.hasValues());
assertEquals(values, queryKeyResult.getValues());
assertFalse(queryKeyResult.hasException());
assertEquals(null, queryKeyResult.getException());
}
use of voldemort.versioning.Versioned in project voldemort by voldemort.
the class AbstractZonedRebalanceTest method testProxyGetDuringRebalancing.
@Test(timeout = 600000)
public void testProxyGetDuringRebalancing() throws Exception {
logger.info("Starting testProxyGetDuringRebalancing");
try {
Cluster currentCluster = ServerTestUtils.getLocalZonedCluster(4, 2, new int[] { 0, 0, 1, 1 }, new int[][] { { 0, 2, 4 }, { 6 }, { 1, 3, 5 }, { 7 } });
Cluster tmpfinalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 3, Lists.newArrayList(2));
final Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(tmpfinalCluster, 1, Lists.newArrayList(3));
final List<Integer> serverList = Arrays.asList(0, 1, 2, 3);
Map<String, String> configProps = new HashMap<String, String>();
configProps.put("admin.max.threads", "5");
final Cluster updatedCurrentCluster = startServers(currentCluster, storeDefFileWithReplication, serverList, configProps);
ExecutorService executors = Executors.newFixedThreadPool(2);
final AtomicBoolean rebalancingComplete = new AtomicBoolean(false);
final List<Exception> exceptions = Collections.synchronizedList(new ArrayList<Exception>());
String bootstrapUrl = getBootstrapUrl(updatedCurrentCluster, 0);
int maxParallel = 2;
final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, maxParallel, finalCluster);
try {
populateData(currentCluster, rwStoreDefWithReplication);
final SocketStoreClientFactory factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(getBootstrapUrl(currentCluster, 0)).setEnableLazy(false).setSocketTimeout(120, TimeUnit.SECONDS));
final StoreClient<String, String> storeClientRW = new DefaultStoreClient<String, String>(rwStoreDefWithReplication.getName(), null, factory, 3);
final CountDownLatch latch = new CountDownLatch(2);
// start get operation.
executors.execute(new Runnable() {
@Override
public void run() {
try {
List<String> keys = new ArrayList<String>(testEntries.keySet());
while (!rebalancingComplete.get()) {
// should always able to get values.
int index = (int) (Math.random() * keys.size());
// should get a valid value
try {
Versioned<String> value = storeClientRW.get(keys.get(index));
assertNotSame("StoreClient get() should not return null.", null, value);
assertEquals("Value returned should be good", new Versioned<String>(testEntries.get(keys.get(index))), value);
} catch (Exception e) {
logger.error("Exception in proxy get thread", e);
e.printStackTrace();
exceptions.add(e);
}
}
} catch (Exception e) {
logger.error("Exception in proxy get thread", e);
exceptions.add(e);
} finally {
factory.close();
latch.countDown();
}
}
});
executors.execute(new Runnable() {
@Override
public void run() {
try {
Thread.sleep(500);
rebalanceAndCheck(rebalanceKit.plan, rebalanceKit.controller, Arrays.asList(0, 1, 2, 3));
Thread.sleep(500);
rebalancingComplete.set(true);
checkConsistentMetadata(finalCluster, serverList);
} catch (Exception e) {
exceptions.add(e);
} finally {
// stop servers
try {
stopServer(serverList);
} catch (Exception e) {
throw new RuntimeException(e);
}
latch.countDown();
}
}
});
latch.await();
executors.shutdown();
executors.awaitTermination(300, TimeUnit.SECONDS);
// check No Exception
if (exceptions.size() > 0) {
for (Exception e : exceptions) {
e.printStackTrace();
}
fail("Should not see any exceptions.");
}
} finally {
// stop servers
stopServer(serverList);
}
} catch (AssertionError ae) {
logger.error("Assertion broken in testProxyGetDuringRebalancing ", ae);
throw ae;
}
}
Aggregations