use of voldemort.cluster.Cluster in project voldemort by voldemort.
the class RebalanceRebootstrapConsistencyTest method rebalance.
/*
* simulate rebalance behavior
*/
public void rebalance() {
assert servers != null && servers.size() > 1;
VoldemortConfig config = servers.get(0).getVoldemortConfig();
adminClient = AdminClient.createTempAdminClient(config, cluster, 4);
List<Integer> partitionIds = ImmutableList.of(0, 1);
int req = adminClient.storeMntOps.migratePartitions(0, 1, testStoreNameRW, partitionIds, null, null);
adminClient.rpcOps.waitForCompletion(1, req, 5, TimeUnit.SECONDS);
Versioned<Cluster> versionedCluster = adminClient.metadataMgmtOps.getRemoteCluster(0);
Node node0 = versionedCluster.getValue().getNodeById(0);
Node node1 = versionedCluster.getValue().getNodeById(1);
Node newNode0 = new Node(node0.getId(), node0.getHost(), node0.getHttpPort(), node0.getSocketPort(), node0.getAdminPort(), ImmutableList.<Integer>of());
Node newNode1 = new Node(node1.getId(), node1.getHost(), node1.getHttpPort(), node1.getSocketPort(), node1.getAdminPort(), ImmutableList.of(0, 1));
adminClient.storeMntOps.deletePartitions(0, testStoreNameRW, ImmutableList.of(0, 1), null);
newCluster = new Cluster(cluster.getName(), ImmutableList.of(newNode0, newNode1), Lists.newArrayList(cluster.getZones()));
newstoredefs = new ArrayList<StoreDefinition>();
newstoredefs.add(rwStoreDefWithReplication2);
for (Node node : cluster.getNodes()) {
VectorClock clock = (VectorClock) versionedCluster.getVersion();
clock.incrementVersion(node.getId(), System.currentTimeMillis());
adminClient.metadataMgmtOps.updateRemoteMetadata(node.getId(), MetadataStore.STORES_KEY, new Versioned<String>(storeMapper.writeStoreList(newstoredefs), clock));
adminClient.metadataMgmtOps.updateRemoteMetadata(node.getId(), MetadataStore.CLUSTER_KEY, new Versioned<String>(clusterMapper.writeCluster(newCluster), clock));
}
adminClient.metadataMgmtOps.updateMetadataversion(adminClient.getAdminClientCluster().getNodeIds(), CLUSTER_VERSION_KEY);
}
use of voldemort.cluster.Cluster in project voldemort by voldemort.
the class Gossiper method selectPeer.
/**
* Randomly select a distinct peer. Method is <code>protected</code> rather
* than <code>private</code>, so that it may be overridden if peer selection
* logic is to be changed e.g., to add datacenter/rack awareness.
*
* @return Peer for Gossip.
*/
protected Node selectPeer() {
Cluster cluster = metadataStore.getCluster();
int nodes = cluster.getNumberOfNodes();
Node node;
do {
node = cluster.getNodeById(random.nextInt(nodes));
} while (node.getId() == metadataStore.getNodeId());
return node;
}
use of voldemort.cluster.Cluster in project voldemort by voldemort.
the class ClusterForkLiftTool method run.
@Override
public void run() {
final Cluster srcCluster = srcAdminClient.getAdminClientCluster();
try {
// process stores one-by-one
for (String store : storesList) {
logger.info("Processing store " + store);
dstStreamingClient.initStreamingSession(store, new Callable<Object>() {
@Override
public Object call() throws Exception {
return null;
}
}, new Callable<Object>() {
@Override
public Object call() throws Exception {
return null;
}
}, true);
final CountDownLatch latch = new CountDownLatch(srcCluster.getNumberOfPartitions());
StoreRoutingPlan storeInstance = new StoreRoutingPlan(srcCluster, srcStoreDefMap.get(store));
// submit work on every partition that is to be forklifted
for (Integer partitionId : partitionList) {
if (this.mode == ForkLiftTaskMode.global_resolution) {
// do thorough global resolution across replicas
SinglePartitionGloballyResolvingForkLiftTask work = new SinglePartitionGloballyResolvingForkLiftTask(storeInstance, partitionId, latch);
workerPool.submit(work);
} else if (this.mode == ForkLiftTaskMode.primary_resolution) {
// do the less cleaner, but much faster route
SinglePartitionPrimaryResolvingForkLiftTask work = new SinglePartitionPrimaryResolvingForkLiftTask(storeInstance, partitionId, latch);
workerPool.submit(work);
} else if (this.mode == ForkLiftTaskMode.no_resolution) {
// do the less cleaner, but much faster route
SinglePartitionNoResolutionForkLiftTask work = new SinglePartitionNoResolutionForkLiftTask(storeInstance, partitionId, latch);
workerPool.submit(work);
}
}
// wait till all the partitions are processed
latch.await();
dstStreamingClient.closeStreamingSession();
logger.info("Finished processing store " + store);
}
} catch (Exception e) {
logger.error("Exception running forklift tool", e);
} finally {
workerPool.shutdown();
try {
workerPool.awaitTermination(DEFAULT_WORKER_POOL_SHUTDOWN_WAIT_MINS, TimeUnit.MINUTES);
} catch (InterruptedException ie) {
logger.error("InterruptedException while waiting for worker pool to shutdown", ie);
}
srcAdminClient.close();
dstStreamingClient.getAdminClient().close();
}
}
use of voldemort.cluster.Cluster in project voldemort by voldemort.
the class RebalanceUtils method vacateZone.
/**
* Given the current cluster and a zone id that needs to be dropped, this
* method will remove all partitions from the zone that is being dropped and
* move it to the existing zones. The partitions are moved intelligently so
* as not to avoid any data movement in the existing zones.
*
* This is achieved by moving the partitions to nodes in the surviving zones
* that is zone-nry to that partition in the surviving zone.
*
* @param currentCluster Current cluster metadata
* @return Returns an interim cluster with empty partition lists on the
* nodes from the zone being dropped
*
*/
public static Cluster vacateZone(Cluster currentCluster, int dropZoneId) {
Cluster returnCluster = Cluster.cloneCluster(currentCluster);
// Go over each node in the zone being dropped
for (Integer nodeId : currentCluster.getNodeIdsInZone(dropZoneId)) {
// For each node grab all the partitions it hosts
for (Integer partitionId : currentCluster.getNodeById(nodeId).getPartitionIds()) {
// Now for each partition find a new home..which would be a node
// in one of the existing zones
int finalZoneId = -1;
int finalNodeId = -1;
int adjacentPartitionId = partitionId;
do {
adjacentPartitionId = (adjacentPartitionId + 1) % currentCluster.getNumberOfPartitions();
finalNodeId = currentCluster.getNodeForPartitionId(adjacentPartitionId).getId();
finalZoneId = currentCluster.getZoneForPartitionId(adjacentPartitionId).getId();
if (adjacentPartitionId == partitionId) {
logger.error("PartitionId " + partitionId + "stays unchanged \n");
} else {
logger.info("PartitionId " + partitionId + " goes together with partition " + adjacentPartitionId + " on node " + finalNodeId + " in zone " + finalZoneId);
returnCluster = UpdateClusterUtils.createUpdatedCluster(returnCluster, finalNodeId, Lists.newArrayList(partitionId));
}
} while (finalZoneId == dropZoneId);
}
}
return returnCluster;
}
use of voldemort.cluster.Cluster in project voldemort by voldemort.
the class ServerTestUtils method getLocalCluster.
/**
* Returns a cluster with <b>numberOfNodes</b> nodes in <b>numberOfZones</b>
* zones. It is important that <b>numberOfNodes</b> be divisible by
* <b>numberOfZones</b>
*
* @param numberOfNodes Number of nodes in the cluster
* @param partitionsPerNode Number of partitions in one node
* @param numberOfZones Number of zones
* @return Cluster
*/
public static Cluster getLocalCluster(int numberOfNodes, int partitionsPerNode, int numberOfZones) {
if (numberOfZones > 0 && numberOfNodes > 0 && numberOfNodes % numberOfZones != 0) {
throw new VoldemortException("The number of nodes (" + numberOfNodes + ") is not divisible by number of zones (" + numberOfZones + ")");
}
int[] ports = findFreePorts(3 * numberOfNodes);
List<Integer> partitions = Lists.newArrayList();
for (int i = 0; i < partitionsPerNode * numberOfNodes; i++) partitions.add(i);
Collections.shuffle(partitions);
// Generate nodes
int numberOfNodesPerZone = numberOfNodes / numberOfZones;
List<Node> nodes = new ArrayList<Node>();
for (int i = 0; i < numberOfNodes; i++) {
nodes.add(new Node(i, "localhost", ports[3 * i], ports[3 * i + 1], ports[3 * i + 2], i / numberOfNodesPerZone, partitions.subList(partitionsPerNode * i, partitionsPerNode * i + partitionsPerNode)));
}
// Generate zones
if (numberOfZones > 1) {
List<Zone> zones = getZones(numberOfZones);
return new Cluster("cluster", nodes, zones);
} else {
return new Cluster("cluster", nodes);
}
}
Aggregations