use of voldemort.utils.ByteArray in project voldemort by voldemort.
the class AbstractNonZonedRebalanceTest method testRebalanceCleanSecondary.
@Test(timeout = 600000)
public void testRebalanceCleanSecondary() throws Exception {
logger.info("Starting testRebalanceCleanSecondary");
try {
Cluster currentCluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0, 3 }, { 1 }, { 2 } });
Cluster finalCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Lists.newArrayList(3));
// start servers 0 , 1, 2
Map<String, String> configProps = new HashMap<String, String>();
configProps.put("enable.repair", "true");
List<Integer> serverList = Arrays.asList(0, 1, 2);
currentCluster = startServers(currentCluster, rwStoreDefFileWithReplication, serverList, configProps);
String bootstrapUrl = getBootstrapUrl(currentCluster, 0);
final ClusterTestUtils.RebalanceKit rebalanceKit = ClusterTestUtils.getRebalanceKit(bootstrapUrl, finalCluster);
try {
AdminClient adminClient = rebalanceKit.controller.getAdminClient();
populateData(currentCluster, rwStoreDefWithReplication, adminClient, false);
// Figure out the positive and negative keys to check
List<ByteArray> positiveTestKeyList = sampleKeysFromPartition(adminClient, 0, rwStoreDefWithReplication.getName(), Arrays.asList(3), 20);
rebalanceAndCheck(rebalanceKit.plan, rebalanceKit.controller, Arrays.asList(0, 1, 2));
checkConsistentMetadata(finalCluster, serverList);
// Do the cleanup operation
for (int i = 0; i < 3; i++) {
adminClient.storeMntOps.repairJob(i);
}
// wait for the repairs to complete
for (int i = 0; i < 3; i++) {
ServerTestUtils.waitForAsyncOperationOnServer(serverMap.get(i), "Repair", 5000);
}
// do the positive tests
checkForKeyExistence(adminClient, 0, rwStoreDefWithReplication.getName(), positiveTestKeyList);
logger.info("[Secondary] Successful clean after Rebalancing");
} finally {
// stop servers
stopServer(serverList);
}
} catch (AssertionError ae) {
logger.error("Assertion broken in testRWRebalanceCleanSecondary ", ae);
throw ae;
}
}
use of voldemort.utils.ByteArray in project voldemort by voldemort.
the class AbstractRebalanceTest method checkForKeyExistence.
/**
* REFACTOR: these should belong AdminClient so existence checks can be done
* easily across the board
*
* @param admin
* @param serverId
* @param store
* @param keyList
*/
protected void checkForKeyExistence(AdminClient admin, int serverId, String store, List<ByteArray> keyList) {
// do the positive tests
Iterator<QueryKeyResult> positiveTestResultsItr = admin.streamingOps.queryKeys(serverId, store, keyList.iterator());
while (positiveTestResultsItr.hasNext()) {
QueryKeyResult item = positiveTestResultsItr.next();
ByteArray key = item.getKey();
List<Versioned<byte[]>> vals = item.getValues();
Exception e = item.getException();
assertEquals("Error fetching key " + key, null, e);
assertEquals("Value not found for key " + key, true, vals != null & vals.size() != 0);
}
}
use of voldemort.utils.ByteArray in project voldemort by voldemort.
the class AbstractRebalanceTest method checkForTupleEquivalence.
/**
* REFACTOR: these should belong AdminClient so existence checks can be done
* easily across the board
*
* @param admin
* @param serverId
* @param store
* @param keyList
*/
protected void checkForTupleEquivalence(AdminClient admin, int serverId, String store, List<ByteArray> keyList, HashMap<String, String> baselineTuples, HashMap<String, VectorClock> baselineVersions) {
// do the positive tests
Iterator<QueryKeyResult> positiveTestResultsItr = admin.streamingOps.queryKeys(serverId, store, keyList.iterator());
while (positiveTestResultsItr.hasNext()) {
QueryKeyResult item = positiveTestResultsItr.next();
ByteArray key = item.getKey();
List<Versioned<byte[]>> vals = item.getValues();
Exception e = item.getException();
assertEquals("Error fetching key " + key, null, e);
assertEquals("Value not found for key " + key, true, vals != null & vals.size() != 0);
String keyStr = ByteUtils.getString(key.get(), "UTF-8");
if (baselineTuples != null)
assertEquals("Value does not match up ", baselineTuples.get(keyStr), ByteUtils.getString(vals.get(0).getValue(), "UTF-8"));
if (baselineVersions != null)
assertEquals("Version does not match up", baselineVersions.get(keyStr), vals.get(0).getVersion());
}
}
use of voldemort.utils.ByteArray in project voldemort by voldemort.
the class AbstractRebalanceTest method checkEntriesPostRebalance.
/**
* Makes sure that all expected partition-stores are on each server after
* the rebalance.
*
* @param currentCluster
* @param finalCluster
* @param storeDefs
* @param nodeCheckList
* @param baselineTuples
* @param baselineVersions
*/
protected void checkEntriesPostRebalance(Cluster currentCluster, Cluster finalCluster, List<StoreDefinition> storeDefs, List<Integer> nodeCheckList, HashMap<String, String> baselineTuples, HashMap<String, VectorClock> baselineVersions) {
for (StoreDefinition storeDef : storeDefs) {
Map<Integer, Set<Pair<Integer, Integer>>> currentNodeToPartitionTuples = ROTestUtils.getNodeIdToAllPartitions(currentCluster, storeDef, true);
Map<Integer, Set<Pair<Integer, Integer>>> finalNodeToPartitionTuples = ROTestUtils.getNodeIdToAllPartitions(finalCluster, storeDef, true);
for (int nodeId : nodeCheckList) {
Set<Pair<Integer, Integer>> currentPartitionTuples = currentNodeToPartitionTuples.get(nodeId);
Set<Pair<Integer, Integer>> finalPartitionTuples = finalNodeToPartitionTuples.get(nodeId);
HashMap<Integer, List<Integer>> flattenedPresentTuples = ROTestUtils.flattenPartitionTuples(Utils.getAddedInTarget(currentPartitionTuples, finalPartitionTuples));
Store<ByteArray, byte[], byte[]> store = getSocketStore(storeDef.getName(), finalCluster.getNodeById(nodeId).getHost(), finalCluster.getNodeById(nodeId).getSocketPort());
checkGetEntries(finalCluster.getNodeById(nodeId), finalCluster, storeDef, store, flattenedPresentTuples, baselineTuples, baselineVersions);
}
}
}
use of voldemort.utils.ByteArray in project voldemort by voldemort.
the class ZoneShrinkageEndToEndTest method testAllServersSendingOutSlopsCorrectly.
@Test(timeout = 60000)
public void testAllServersSendingOutSlopsCorrectly() throws InterruptedException {
final Serializer<ByteArray> slopKeySerializer = new ByteArraySerializer();
final Serializer<Slop> slopValueSerializer = new SlopSerializer();
final SlopSerializer slopSerializer = new SlopSerializer();
StoreDefinition storeDef = storeDefs.get(0);
TestSocketStoreFactory ssf = new TestSocketStoreFactory();
Map<Integer, SocketStore> slopStoresCreatedBeforeShrink = new HashMap<Integer, SocketStore>();
Map<Integer, SocketStore> slopStoresCreatedAfterShrink = new HashMap<Integer, SocketStore>();
// generate for keys each all servers that will be hosted on each server
// except itself (2*N*(N-1) keys)
// Map<Integer slopFinalDestinationNodeId, List<Pair<ByteArray key,
// Integer hostNodeId>>>
Map<Integer, List<Pair<ByteArray, Integer>>> serverKeys = new HashMap<Integer, List<Pair<ByteArray, Integer>>>();
for (Node slopFinalDestinationNode : cluster.getNodes()) {
serverKeys.put(slopFinalDestinationNode.getId(), new ArrayList<Pair<ByteArray, Integer>>());
}
// make socket stores to all servers before shrink
for (Integer nodeId : vservers.keySet()) {
SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
slopStoresCreatedBeforeShrink.put(nodeId, slopStore);
}
for (int i = 0; i < 2; i++) {
for (Integer slopHostId : vservers.keySet()) {
SocketStore slopStore = slopStoresCreatedBeforeShrink.get(slopHostId);
for (Integer destinationNodeId : vservers.keySet()) {
if (!destinationNodeId.equals(slopHostId)) {
ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
}
}
}
}
// update metadata
executeShrinkZone();
logger.info("-------------------------------");
logger.info(" CONNECTING SLOP STORES ");
logger.info("-------------------------------");
// make socket stores to all servers after shrink
for (Integer nodeId : vservers.keySet()) {
SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
slopStoresCreatedAfterShrink.put(nodeId, slopStore);
}
logger.info("-------------------------------");
logger.info(" CONNECTED SLOP STORES ");
logger.info("-------------------------------");
logger.info("-------------------------------");
logger.info(" SENDING SLOPS ");
logger.info("-------------------------------");
for (int i = 0; i < 2; i++) {
for (Integer slopHostId : vservers.keySet()) {
SocketStore slopStore = slopStoresCreatedAfterShrink.get(slopHostId);
for (Integer destinationNodeId : vservers.keySet()) {
if (!destinationNodeId.equals(slopHostId)) {
ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
}
}
}
}
logger.info("-------------------------------");
logger.info(" SENT SLOPS ");
logger.info("-------------------------------");
ServerTestUtils.waitForSlopDrain(vservers, 30000L);
// verify all proper slops is processed properly (arrived or dropped)
boolean hasError = false;
int goodCount = 0;
int errorCount = 0;
for (Integer nodeId : serverKeys.keySet()) {
VoldemortServer vs = vservers.get(nodeId);
Store<ByteArray, byte[], byte[]> store = vs.getStoreRepository().getStorageEngine(storeDef.getName());
List<Pair<ByteArray, Integer>> keySet = serverKeys.get(nodeId);
for (Pair<ByteArray, Integer> keyHostIdPair : keySet) {
ByteArray key = keyHostIdPair.getFirst();
Integer hostId = keyHostIdPair.getSecond();
Integer nodeZoneId = cluster.getNodeById(nodeId).getZoneId();
List<Versioned<byte[]>> result = store.get(key, null);
if (cluster.getNodeById(nodeId).getZoneId() == droppingZoneId) {
if (!result.isEmpty()) {
logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should be gone but exists\n", key.toString(), nodeId, nodeZoneId, hostId));
hasError = true;
errorCount++;
} else {
goodCount++;
}
} else {
if (result.isEmpty()) {
logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should exist but not\n", key.toString(), nodeId, nodeZoneId, hostId));
hasError = true;
errorCount++;
} else {
goodCount++;
}
}
}
}
logger.info(String.format("Good keys count: %d; Error keys count: %d", goodCount, errorCount));
Assert.assertFalse("Error Occurred BAD:" + errorCount + "; GOOD: " + goodCount + ". Check log.", hasError);
}
Aggregations