use of voldemort.server.VoldemortServer in project voldemort by voldemort.
the class GossiperTest method attemptStartAdditionalServer.
private Cluster attemptStartAdditionalServer() throws IOException {
// Set up a new cluster that is one bigger than the original cluster
int originalSize = cluster.getNumberOfNodes();
int numOriginalPorts = originalSize * 3;
int[] ports = new int[numOriginalPorts + 3];
for (int i = 0, j = 0; i < originalSize; i++, j += 3) {
Node node = cluster.getNodeById(i);
System.arraycopy(new int[] { node.getHttpPort(), node.getSocketPort(), node.getAdminPort() }, 0, ports, j, 3);
}
System.arraycopy(ServerTestUtils.findFreePorts(3), 0, ports, numOriginalPorts, 3);
// Create a new partitioning scheme with room for a new server
final Cluster newCluster = ServerTestUtils.getLocalCluster(originalSize + 1, ports, new int[][] { { 0, 4, 8 }, { 1, 5, 9 }, { 2, 6, 10 }, { 3, 7, 11 } });
// Create a new server
VoldemortServer newServer = ServerTestUtils.startVoldemortServer(socketStoreFactory, ServerTestUtils.createServerConfig(useNio, 3, TestUtils.createTempDir().getAbsolutePath(), null, storesXmlfile, props), newCluster);
// This step is only reached if startVoldemortServer does *not* throw a
// BindException due to TOCTOU problem with getLocalCluster
servers.add(newServer);
return newCluster;
}
use of voldemort.server.VoldemortServer in project voldemort by voldemort.
the class GossiperTest method testGossiper.
// Protect against this test running forever until the root cause of running
// forever is found.
@Test(timeout = 1800)
public void testGossiper() throws Exception {
Cluster newCluster = null;
boolean startedAdditionalServer = false;
while (!startedAdditionalServer) {
try {
newCluster = attemptStartAdditionalServer();
startedAdditionalServer = true;
} catch (IOException ioe) {
logger.warn("Caught an IOException when attempting to start additional server. Will print stacktrace and then attempt to start additional server again.");
ioe.printStackTrace();
}
}
// Get the new cluster.xml
AdminClient localAdminClient = getAdminClient(newCluster);
Versioned<String> versionedClusterXML = localAdminClient.metadataMgmtOps.getRemoteMetadata(3, MetadataStore.CLUSTER_KEY);
// Increment the version, let what would be the "donor node" know about
// it to seed the Gossip.
Version version = versionedClusterXML.getVersion();
((VectorClock) version).incrementVersion(3, ((VectorClock) version).getTimestamp() + 1);
((VectorClock) version).incrementVersion(0, ((VectorClock) version).getTimestamp() + 1);
localAdminClient.metadataMgmtOps.updateRemoteMetadata(0, MetadataStore.CLUSTER_KEY, versionedClusterXML);
localAdminClient.metadataMgmtOps.updateRemoteMetadata(3, MetadataStore.CLUSTER_KEY, versionedClusterXML);
try {
Thread.sleep(500);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
// Wait up to five seconds for Gossip to spread
final Cluster newFinalCluster = newCluster;
try {
TestUtils.assertWithBackoff(5000, new Attempt() {
public void checkCondition() {
int serversSeen = 0;
// Now verify that we have gossiped correctly
for (VoldemortServer server : servers) {
Cluster clusterAtServer = server.getMetadataStore().getCluster();
int nodeId = server.getMetadataStore().getNodeId();
assertEquals("server " + nodeId + " has heard " + " the gossip about number of nodes", clusterAtServer.getNumberOfNodes(), newFinalCluster.getNumberOfNodes());
assertEquals("server " + nodeId + " has heard " + " the gossip about partitions", clusterAtServer.getNodeById(nodeId).getPartitionIds(), newFinalCluster.getNodeById(nodeId).getPartitionIds());
serversSeen++;
}
assertEquals("saw all servers", serversSeen, servers.size());
}
});
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
use of voldemort.server.VoldemortServer in project voldemort by voldemort.
the class SlopPurgeTest method setUp.
@Before
public void setUp() throws Exception {
cluster = ServerTestUtils.getLocalZonedCluster(6, 3, new int[] { 0, 0, 1, 1, 2, 2 }, new int[][] { { 0 }, { 2 }, { 4 }, { 1 }, { 3 }, { 5 } });
servers = new VoldemortServer[cluster.getNodes().size()];
slopSerializer = new SlopSerializer();
Properties serverProperties = new Properties();
// Schedule the slop pusher far far far out in the future, so it won't
// run during the test
serverProperties.setProperty("slop.frequency.ms", "" + (Integer.MAX_VALUE));
// Also no auto purging so we are sure that the only thing deleting the
// slops is the purge job
serverProperties.setProperty("auto.purge.dead.slops", "false");
cluster = ServerTestUtils.startVoldemortCluster(servers, null, null, "test/common/voldemort/config/three-stores-with-zones.xml", serverProperties, cluster);
for (VoldemortServer server : servers) {
if (server.getIdentityNode().getId() == PURGE_SERVER_ID) {
purgedServer = server;
break;
}
}
Properties adminProperties = new Properties();
adminProperties.setProperty("max_connections", "2");
adminClient = new AdminClient(servers[0].getMetadataStore().getCluster(), new AdminClientConfig(adminProperties));
}
use of voldemort.server.VoldemortServer in project voldemort by voldemort.
the class RedirectingStoreTest method setUp.
@Before
public void setUp() throws IOException, InterruptedException {
currentCluster = ServerTestUtils.getLocalCluster(3, new int[][] { { 0, 1 }, { 2, 3 }, {} });
targetCluster = UpdateClusterUtils.createUpdatedCluster(currentCluster, 2, Arrays.asList(0));
this.primaryPartitionsMoved = Lists.newArrayList(0);
this.secondaryPartitionsMoved = Lists.newArrayList(2, 3);
this.storeDef = new StoreDefinitionBuilder().setName("test").setType(BdbStorageConfiguration.TYPE_NAME).setKeySerializer(new SerializerDefinition("string")).setValueSerializer(new SerializerDefinition("string")).setRoutingPolicy(RoutingTier.CLIENT).setRoutingStrategyType(RoutingStrategyType.CONSISTENT_STRATEGY).setReplicationFactor(2).setPreferredReads(1).setRequiredReads(1).setPreferredWrites(1).setRequiredWrites(1).build();
File tempStoreXml = new File(TestUtils.createTempDir(), "stores.xml");
FileUtils.writeStringToFile(tempStoreXml, new StoreDefinitionsMapper().writeStoreList(Lists.newArrayList(storeDef)));
this.servers = new VoldemortServer[3];
for (int nodeId = 0; nodeId < 3; nodeId++) {
this.servers[nodeId] = startServer(nodeId, tempStoreXml.getAbsolutePath(), currentCluster);
}
// Start another node for only this unit test
HashMap<ByteArray, byte[]> entrySet = ServerTestUtils.createRandomKeyValuePairs(100);
SocketStoreClientFactory factory = new SocketStoreClientFactory(new ClientConfig().setBootstrapUrls(Lists.newArrayList("tcp://" + currentCluster.getNodeById(0).getHost() + ":" + currentCluster.getNodeById(0).getSocketPort())));
StoreClient<Object, Object> storeClient = factory.getStoreClient("test");
this.primaryEntriesMoved = Maps.newHashMap();
this.secondaryEntriesMoved = Maps.newHashMap();
this.proxyPutTestPrimaryEntries = Maps.newHashMap();
this.proxyPutTestSecondaryEntries = Maps.newHashMap();
RoutingStrategy strategy = new RoutingStrategyFactory().updateRoutingStrategy(storeDef, currentCluster);
for (Entry<ByteArray, byte[]> entry : entrySet.entrySet()) {
storeClient.put(new String(entry.getKey().get()), new String(entry.getValue()));
List<Integer> pList = strategy.getPartitionList(entry.getKey().get());
if (primaryPartitionsMoved.contains(pList.get(0))) {
primaryEntriesMoved.put(entry.getKey(), entry.getValue());
} else if (secondaryPartitionsMoved.contains(pList.get(0))) {
secondaryEntriesMoved.put(entry.getKey(), entry.getValue());
}
}
// Sleep a while for the queries to go through...
// Hope the 'God of perfect timing' is on our side
Thread.sleep(500);
// steal a few primary key-value pairs for testing proxy put logic
int cnt = 0;
for (Entry<ByteArray, byte[]> entry : primaryEntriesMoved.entrySet()) {
if (cnt > 3)
break;
this.proxyPutTestPrimaryEntries.put(entry.getKey(), entry.getValue());
cnt++;
}
for (ByteArray key : this.proxyPutTestPrimaryEntries.keySet()) {
this.primaryEntriesMoved.remove(key);
}
assertTrue("Not enough primary entries", primaryEntriesMoved.size() > 1);
// steal a few secondary key-value pairs for testing proxy put logic
cnt = 0;
for (Entry<ByteArray, byte[]> entry : secondaryEntriesMoved.entrySet()) {
if (cnt > 3)
break;
this.proxyPutTestSecondaryEntries.put(entry.getKey(), entry.getValue());
cnt++;
}
for (ByteArray key : this.proxyPutTestSecondaryEntries.keySet()) {
this.secondaryEntriesMoved.remove(key);
}
assertTrue("Not enough secondary entries", primaryEntriesMoved.size() > 1);
RebalanceBatchPlan RebalanceBatchPlan = new RebalanceBatchPlan(currentCluster, targetCluster, Lists.newArrayList(storeDef));
List<RebalanceTaskInfo> plans = Lists.newArrayList(RebalanceBatchPlan.getBatchPlan());
// Set into rebalancing state
for (RebalanceTaskInfo partitionPlan : plans) {
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.SERVER_STATE_KEY, MetadataStore.VoldemortState.REBALANCING_MASTER_SERVER);
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.REBALANCING_STEAL_INFO, new RebalancerState(Lists.newArrayList(partitionPlan)));
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_CLUSTER_XML, currentCluster);
// update original storedefs
servers[partitionPlan.getStealerId()].getMetadataStore().put(MetadataStore.REBALANCING_SOURCE_STORES_XML, Lists.newArrayList(storeDef));
}
// Update the cluster metadata on all three nodes
for (VoldemortServer server : servers) {
server.getMetadataStore().put(MetadataStore.CLUSTER_KEY, targetCluster);
}
}
use of voldemort.server.VoldemortServer in project voldemort by voldemort.
the class ZoneCountWriteTest method setup.
@Before
public void setup() throws IOException {
storeDef = storeDefs.get(0);
Integer zoneCountWrite = 1;
// override
storeDef = new StoreDefinition(storeDef.getName(), storeDef.getType(), storeDef.getDescription(), storeDef.getKeySerializer(), storeDef.getValueSerializer(), storeDef.getTransformsSerializer(), storeDef.getRoutingPolicy(), storeDef.getRoutingStrategyType(), storeDef.getReplicationFactor(), storeDef.getPreferredReads(), storeDef.getRequiredReads(), storeDef.getPreferredWrites(), storeDef.getRequiredWrites(), storeDef.getViewTargetStoreName(), storeDef.getValueTransformation(), storeDef.getZoneReplicationFactor(), storeDef.getZoneCountReads(), zoneCountWrite, storeDef.getRetentionDays(), storeDef.getRetentionScanThrottleRate(), storeDef.getRetentionFrequencyDays(), storeDef.getSerializerFactory(), storeDef.getHintedHandoffStrategyType(), storeDef.getHintPrefListSize(), storeDef.getOwners(), storeDef.getMemoryFootprintMB());
storeDefs.set(0, storeDef);
SocketStoreClientFactory socketStoreClientFactory = new SocketStoreClientFactory(clientConfig);
for (Integer nodeId : cluster.getNodeIds()) {
SocketStoreFactory socketStoreFactory = new ClientRequestExecutorPool(2, 10000, 100000, 1024);
VoldemortConfig config = ServerTestUtils.createServerConfigWithDefs(true, nodeId, TestUtils.createTempDir().getAbsolutePath(), cluster, storeDefs, new Properties());
VoldemortServer vs = ServerTestUtils.startVoldemortServer(socketStoreFactory, config, cluster);
vservers.put(nodeId, vs);
}
client = socketStoreClientFactory.getRawStore(storeDef.getName(), null);
}
Aggregations