use of voldemort.server.VoldemortServer in project voldemort by voldemort.
the class AdminServiceFilterTest method setUp.
@Override
@Before
public void setUp() throws IOException {
cluster = ServerTestUtils.getLocalCluster(2);
VoldemortConfig config = ServerTestUtils.createServerConfig(useNio, 0, TestUtils.createTempDir().getAbsolutePath(), null, storesXmlfile, new Properties());
config.setEnableNetworkClassLoader(true);
List<StoreDefinition> storeDefs = new StoreDefinitionsMapper().readStoreList(new File(storesXmlfile));
storeDef = StoreDefinitionUtils.getStoreDefinitionWithName(storeDefs, testStoreName);
server = new VoldemortServer(config, cluster);
server.start();
adminClient = ServerTestUtils.getAdminClient(cluster);
}
use of voldemort.server.VoldemortServer in project voldemort by voldemort.
the class AbstractRebalanceTest method startServers.
// This method is susceptible to BindException issues due to TOCTOU
// problem with getLocalCluster (which is used to construct cluster that is
// passed in).
// TODO: (refactor) AbstractRebalanceTest to take advantage of
// ServerTestUtils.startVoldemortCluster.
protected Cluster startServers(Cluster cluster, String storeXmlFile, List<Integer> nodeToStart, Map<String, String> configProps) throws Exception {
for (int node : nodeToStart) {
Properties properties = new Properties();
if (null != configProps) {
for (Entry<String, String> property : configProps.entrySet()) {
properties.put(property.getKey(), property.getValue());
}
}
// turn proxy puts on
properties.put("proxy.puts.during.rebalance", "true");
properties.put("bdb.cache.size", "" + (5 * 1024 * 1024));
properties.put("bdb.one.env.per.store", "true");
VoldemortConfig config = ServerTestUtils.createServerConfig(true, node, TestUtils.createTempDir().getAbsolutePath(), null, storeXmlFile, properties);
VoldemortServer server = ServerTestUtils.startVoldemortServer(socketStoreFactory, config, cluster);
serverMap.put(node, server);
}
return cluster;
}
use of voldemort.server.VoldemortServer in project voldemort by voldemort.
the class ZoneShrinkageClientTest method teardown.
@After
public void teardown() throws IOException {
for (VoldemortServer server : servers) {
ServerTestUtils.stopVoldemortServer(server);
}
adminClient.close();
ClusterTestUtils.reset();
}
use of voldemort.server.VoldemortServer in project voldemort by voldemort.
the class ZoneShrinkageEndToEndTest method setup.
@Before
public void setup() throws IOException {
// setup and start servers
for (Node node : cluster.getNodes()) {
String tempFolderPath = TestUtils.createTempDir().getAbsolutePath();
// setup servers
SocketStoreFactory ssf = new TestSocketStoreFactory();
VoldemortConfig config = ServerTestUtils.createServerConfigWithDefs(true, node.getId(), tempFolderPath, cluster, storeDefs, new Properties());
Assert.assertTrue(config.isSlopEnabled());
Assert.assertTrue(config.isSlopPusherJobEnabled());
Assert.assertTrue(config.getAutoPurgeDeadSlops());
config.setSlopFrequencyMs(10000L);
VoldemortServer vs = ServerTestUtils.startVoldemortServer(ssf, config, cluster);
vservers.put(node.getId(), vs);
socketStoreFactories.put(node.getId(), ssf);
voldemortConfigs.put(node.getId(), config);
}
for (Node node : cluster.getNodes()) {
if (node.getZoneId() != droppingZoneId) {
survivingNodes.add(node);
}
}
bootstrapURL = survivingNodes.get(0).getSocketUrl().toString();
}
use of voldemort.server.VoldemortServer in project voldemort by voldemort.
the class ZoneShrinkageEndToEndTest method testAllServersSendingOutSlopsCorrectly.
@Test(timeout = 60000)
public void testAllServersSendingOutSlopsCorrectly() throws InterruptedException {
final Serializer<ByteArray> slopKeySerializer = new ByteArraySerializer();
final Serializer<Slop> slopValueSerializer = new SlopSerializer();
final SlopSerializer slopSerializer = new SlopSerializer();
StoreDefinition storeDef = storeDefs.get(0);
TestSocketStoreFactory ssf = new TestSocketStoreFactory();
Map<Integer, SocketStore> slopStoresCreatedBeforeShrink = new HashMap<Integer, SocketStore>();
Map<Integer, SocketStore> slopStoresCreatedAfterShrink = new HashMap<Integer, SocketStore>();
// generate for keys each all servers that will be hosted on each server
// except itself (2*N*(N-1) keys)
// Map<Integer slopFinalDestinationNodeId, List<Pair<ByteArray key,
// Integer hostNodeId>>>
Map<Integer, List<Pair<ByteArray, Integer>>> serverKeys = new HashMap<Integer, List<Pair<ByteArray, Integer>>>();
for (Node slopFinalDestinationNode : cluster.getNodes()) {
serverKeys.put(slopFinalDestinationNode.getId(), new ArrayList<Pair<ByteArray, Integer>>());
}
// make socket stores to all servers before shrink
for (Integer nodeId : vservers.keySet()) {
SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
slopStoresCreatedBeforeShrink.put(nodeId, slopStore);
}
for (int i = 0; i < 2; i++) {
for (Integer slopHostId : vservers.keySet()) {
SocketStore slopStore = slopStoresCreatedBeforeShrink.get(slopHostId);
for (Integer destinationNodeId : vservers.keySet()) {
if (!destinationNodeId.equals(slopHostId)) {
ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
}
}
}
}
// update metadata
executeShrinkZone();
logger.info("-------------------------------");
logger.info(" CONNECTING SLOP STORES ");
logger.info("-------------------------------");
// make socket stores to all servers after shrink
for (Integer nodeId : vservers.keySet()) {
SocketStore slopStore = ssf.createSocketStore(vservers.get(nodeId).getIdentityNode(), "slop");
SerializingStore.wrap(slopStore, slopKeySerializer, slopValueSerializer, new IdentitySerializer());
slopStoresCreatedAfterShrink.put(nodeId, slopStore);
}
logger.info("-------------------------------");
logger.info(" CONNECTED SLOP STORES ");
logger.info("-------------------------------");
logger.info("-------------------------------");
logger.info(" SENDING SLOPS ");
logger.info("-------------------------------");
for (int i = 0; i < 2; i++) {
for (Integer slopHostId : vservers.keySet()) {
SocketStore slopStore = slopStoresCreatedAfterShrink.get(slopHostId);
for (Integer destinationNodeId : vservers.keySet()) {
if (!destinationNodeId.equals(slopHostId)) {
ByteArray key = generateRandomKey(cluster, destinationNodeId, storeDef.getReplicationFactor());
serverKeys.get(destinationNodeId).add(new Pair<ByteArray, Integer>(key, slopHostId));
Slop slop = new Slop(storeDef.getName(), Slop.Operation.PUT, key.get(), key.get(), destinationNodeId, new Date());
slopStore.put(slop.makeKey(), new Versioned<byte[]>(slopSerializer.toBytes(slop), new VectorClock()), null);
}
}
}
}
logger.info("-------------------------------");
logger.info(" SENT SLOPS ");
logger.info("-------------------------------");
ServerTestUtils.waitForSlopDrain(vservers, 30000L);
// verify all proper slops is processed properly (arrived or dropped)
boolean hasError = false;
int goodCount = 0;
int errorCount = 0;
for (Integer nodeId : serverKeys.keySet()) {
VoldemortServer vs = vservers.get(nodeId);
Store<ByteArray, byte[], byte[]> store = vs.getStoreRepository().getStorageEngine(storeDef.getName());
List<Pair<ByteArray, Integer>> keySet = serverKeys.get(nodeId);
for (Pair<ByteArray, Integer> keyHostIdPair : keySet) {
ByteArray key = keyHostIdPair.getFirst();
Integer hostId = keyHostIdPair.getSecond();
Integer nodeZoneId = cluster.getNodeById(nodeId).getZoneId();
List<Versioned<byte[]>> result = store.get(key, null);
if (cluster.getNodeById(nodeId).getZoneId() == droppingZoneId) {
if (!result.isEmpty()) {
logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should be gone but exists\n", key.toString(), nodeId, nodeZoneId, hostId));
hasError = true;
errorCount++;
} else {
goodCount++;
}
} else {
if (result.isEmpty()) {
logger.error(String.format("Key %s for Node %d (zone %d) slopped on Node %d should exist but not\n", key.toString(), nodeId, nodeZoneId, hostId));
hasError = true;
errorCount++;
} else {
goodCount++;
}
}
}
}
logger.info(String.format("Good keys count: %d; Error keys count: %d", goodCount, errorCount));
Assert.assertFalse("Error Occurred BAD:" + errorCount + "; GOOD: " + goodCount + ". Check log.", hasError);
}
Aggregations