use of voldemort.store.socket.TestSocketStoreFactory in project voldemort by voldemort.
the class ZoneShrinkageEndToEndTest method setup.
@Before
public void setup() throws IOException {
// setup and start servers
for (Node node : cluster.getNodes()) {
String tempFolderPath = TestUtils.createTempDir().getAbsolutePath();
// setup servers
SocketStoreFactory ssf = new TestSocketStoreFactory();
VoldemortConfig config = ServerTestUtils.createServerConfigWithDefs(true, node.getId(), tempFolderPath, cluster, storeDefs, new Properties());
Assert.assertTrue(config.isSlopEnabled());
Assert.assertTrue(config.isSlopPusherJobEnabled());
Assert.assertTrue(config.getAutoPurgeDeadSlops());
config.setSlopFrequencyMs(10000L);
VoldemortServer vs = ServerTestUtils.startVoldemortServer(ssf, config, cluster);
vservers.put(node.getId(), vs);
socketStoreFactories.put(node.getId(), ssf);
voldemortConfigs.put(node.getId(), config);
}
for (Node node : cluster.getNodes()) {
if (node.getZoneId() != droppingZoneId) {
survivingNodes.add(node);
}
}
bootstrapURL = survivingNodes.get(0).getSocketUrl().toString();
}
use of voldemort.store.socket.TestSocketStoreFactory in project voldemort by voldemort.
the class ZoneShrinkageEndToEndTest method testAllServersSendingOutSlopsCorrectly.
@Test(timeout = 60000)
public void testAllServersSendingOutSlopsCorrectly() throws InterruptedException {
final Serializer<ByteArray> slopKeySerializer = new ByteArraySerializer();
final Serializer<Slop> slopValueSerializer = new SlopSerializer();
final SlopSerializer slopSerializer = new SlopSerializer();
StoreDefinition storeDef = storeDefs.get(0);
TestSocketStoreFactory ssf = new TestSocketStoreFactory();
Map<Integer, SocketStore> slopStoresCreatedBeforeShrink = new HashMap<Integer, SocketStore>();
Map<Integer, SocketStore> slopStoresCreatedAfterShrink = new HashMap<Integer, SocketStore>();
// generate for keys each all servers that will be hosted on each server
// except itself (2*N*(N-1) keys)
// Map<Integer slopFinalDestinationNodeId, List<Pair<ByteArray key,
// Integer hostNodeId>>>
Map<Integer, List<Pair<ByteArray, Integer>>> serverKeys = new HashMap<Integer, List<Pair<ByteArray, Integer>>>();
for (Node slopFinalDestinationNode : cluster.getNodes()) {
serverKeys.put(slopFinalDestinationNode.getId(), new ArrayList<Pair<ByteArray, Integer>>());
}
// make socket stores to all servers before shrink
for (Integer nodeId : vservers.keySet()) {
SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
slopStoresCreatedBeforeShrink.put(nodeId, slopStore);
}
for (int i = 0; i < 2; i++) {
for (Integer slopHostId : vservers.keySet()) {
SocketStore slopStore = slopStoresCreatedBeforeShrink.get(slopHostId);
for (Integer destinationNodeId : vservers.keySet()) {
if (!destinationNodeId.equals(slopHostId)) {
ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
}
}
}
}
// update metadata
executeShrinkZone();
logger.info("-------------------------------");
logger.info(" CONNECTING SLOP STORES ");
logger.info("-------------------------------");
// make socket stores to all servers after shrink
for (Integer nodeId : vservers.keySet()) {
SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
slopStoresCreatedAfterShrink.put(nodeId, slopStore);
}
logger.info("-------------------------------");
logger.info(" CONNECTED SLOP STORES ");
logger.info("-------------------------------");
logger.info("-------------------------------");
logger.info(" SENDING SLOPS ");
logger.info("-------------------------------");
for (int i = 0; i < 2; i++) {
for (Integer slopHostId : vservers.keySet()) {
SocketStore slopStore = slopStoresCreatedAfterShrink.get(slopHostId);
for (Integer destinationNodeId : vservers.keySet()) {
if (!destinationNodeId.equals(slopHostId)) {
ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
}
}
}
}
logger.info("-------------------------------");
logger.info(" SENT SLOPS ");
logger.info("-------------------------------");
ServerTestUtils.waitForSlopDrain(vservers, 30000L);
// verify all proper slops is processed properly (arrived or dropped)
boolean hasError = false;
int goodCount = 0;
int errorCount = 0;
for (Integer nodeId : serverKeys.keySet()) {
VoldemortServer vs = vservers.get(nodeId);
Store<ByteArray, byte[], byte[]> store = vs.getStoreRepository().getStorageEngine(storeDef.getName());
List<Pair<ByteArray, Integer>> keySet = serverKeys.get(nodeId);
for (Pair<ByteArray, Integer> keyHostIdPair : keySet) {
ByteArray key = keyHostIdPair.getFirst();
Integer hostId = keyHostIdPair.getSecond();
Integer nodeZoneId = cluster.getNodeById(nodeId).getZoneId();
List<Versioned<byte[]>> result = store.get(key, null);
if (cluster.getNodeById(nodeId).getZoneId() == droppingZoneId) {
if (!result.isEmpty()) {
logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should be gone but exists\n", key.toString(), nodeId, nodeZoneId, hostId));
hasError = true;
errorCount++;
} else {
goodCount++;
}
} else {
if (result.isEmpty()) {
logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should exist but not\n", key.toString(), nodeId, nodeZoneId, hostId));
hasError = true;
errorCount++;
} else {
goodCount++;
}
}
}
}
logger.info(String.format("Good keys count: %d; Error keys count: %d", goodCount, errorCount));
Assert.assertFalse("Error Occurred BAD:" + errorCount + "; GOOD: " + goodCount + ". Check log.", hasError);
}
use of voldemort.store.socket.TestSocketStoreFactory in project voldemort by voldemort.
the class ReplaceNodeTest method testNodeDownReplacement.
@Test
public void testNodeDownReplacement() throws Exception {
final int REPLACEMENT_NODE = 0;
// This is to simulate the case where a machine failed but hard disk was intact
// In this case we will move the hard disk to another machine, edit the cluster.xml
// to point to this host and start this machine.
// The case is simulated by remembering the cluster of node A.
// Replace this node with node B. Now create a new server with cluster of
// node A ( this is to simulate the cluster.xml edit) and data directory of B
// ( this is to simulate the hard disk move). Now try replacing the node B with
// newly created node after shutting down the node B.
Cluster cluster = originalServers[nodeToBeReplaced].getMetadataStore().getCluster();
List<StoreDefinition> storeDefs = originalServers[nodeToBeReplaced].getMetadataStore().getStoreDefList();
Node node = originalServers[nodeToBeReplaced].getIdentityNode();
//Verify the node down scenario first
final boolean DO_RESTORE = false;
final boolean STOP_OLD_NODE = true;
verifyNodeReplacement(nodeToBeReplaced, otherServers, REPLACEMENT_NODE, STOP_OLD_NODE, DO_RESTORE);
// Now the replacement node is part of the original cluster.
// Stop the replacement node, assume you moved the hard disk
// to a new node ( This is done by starting another node)
// that points to the data directory of the replacement node.
String baseDirPath = otherServers[REPLACEMENT_NODE].getVoldemortConfig().getVoldemortHome();
// Using ServerTestUtils stopVoldemortServer also deletes the data
// directory so using the stop, to simulate the hard disk move.
otherServers[REPLACEMENT_NODE].stop();
VoldemortConfig config = ServerTestUtils.createServerConfigWithDefs(true, node.getId(), baseDirPath, cluster, storeDefs, new Properties());
Assert.assertTrue(config.isSlopEnabled());
Assert.assertTrue(config.isSlopPusherJobEnabled());
Assert.assertTrue(config.getAutoPurgeDeadSlops());
config.setSlopFrequencyMs(8000L);
config.setEnableNodeIdDetection(this.autoDetectNodeId);
if (this.autoDetectNodeId) {
config.setNodeIdImplementation(new NodeIdHostMatcher(nodeToBeReplaced));
}
// Though baseDir is used as part of Config, TestUtils, always appends
// it with node- (nodeId). So forcefully overwriting it here to point to
// the old directory.
config.setVoldemortHome(baseDirPath);
SocketStoreFactory ssf = new TestSocketStoreFactory();
VoldemortServer hardDiskMovedServer = ServerTestUtils.startVoldemortServer(ssf, config, cluster);
ssf.close();
otherServers[REPLACEMENT_NODE] = hardDiskMovedServer;
final boolean SKIP_RESTORE = true;
final boolean DONOT_STOP_OLD_NODE = true;
verifyNodeReplacement(nodeToBeReplaced, otherServers, REPLACEMENT_NODE, DONOT_STOP_OLD_NODE, SKIP_RESTORE);
}
use of voldemort.store.socket.TestSocketStoreFactory in project voldemort by voldemort.
the class ZoneShrinkageCLITest method setup.
public void setup() throws IOException {
// setup cluster
bsURL = cluster.getNodes().iterator().next().getSocketUrl().toString();
for (Node node : cluster.getNodes()) {
SocketStoreFactory ssf = new TestSocketStoreFactory();
VoldemortConfig config = ServerTestUtils.createServerConfigWithDefs(true, node.getId(), TestUtils.createTempDir().getAbsolutePath(), cluster, oldStores, new Properties());
VoldemortServer vs = ServerTestUtils.startVoldemortServer(ssf, config, cluster);
vservers.put(node.getId(), vs);
socketStoreFactories.put(node.getId(), ssf);
}
}
Aggregations