use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class BaseStreamingClient method addStoreToSession.
/**
* Add another store destination to an existing streaming session
*
* @param store the name of the store to stream to
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
protected void addStoreToSession(String store) {
Exception initializationException = null;
storeNames.add(store);
for (Node node : nodesToStream) {
SocketDestination destination = null;
SocketAndStreams sands = null;
try {
destination = new SocketDestination(node.getHost(), node.getAdminPort(), RequestFormatType.ADMIN_PROTOCOL_BUFFERS);
sands = streamingSocketPool.checkout(destination);
DataOutputStream outputStream = sands.getOutputStream();
DataInputStream inputStream = sands.getInputStream();
nodeIdStoreToSocketRequest.put(new Pair(store, node.getId()), destination);
nodeIdStoreToOutputStreamRequest.put(new Pair(store, node.getId()), outputStream);
nodeIdStoreToInputStreamRequest.put(new Pair(store, node.getId()), inputStream);
nodeIdStoreToSocketAndStreams.put(new Pair(store, node.getId()), sands);
nodeIdStoreInitialized.put(new Pair(store, node.getId()), false);
remoteStoreDefs = adminClient.metadataMgmtOps.getRemoteStoreDefList(node.getId()).getValue();
} catch (Exception e) {
logger.error(e);
try {
close(sands.getSocket());
streamingSocketPool.checkin(destination, sands);
} catch (Exception ioE) {
logger.error(ioE);
}
if (!faultyNodes.contains(node.getId()))
faultyNodes.add(node.getId());
initializationException = e;
}
}
if (initializationException != null)
throw new VoldemortException(initializationException);
if (store.equals("slop"))
return;
boolean foundStore = false;
for (StoreDefinition remoteStoreDef : remoteStoreDefs) {
if (remoteStoreDef.getName().equals(store)) {
RoutingStrategyFactory factory = new RoutingStrategyFactory();
RoutingStrategy storeRoutingStrategy = factory.updateRoutingStrategy(remoteStoreDef, adminClient.getAdminClientCluster());
storeToRoutingStrategy.put(store, storeRoutingStrategy);
validateSufficientNodesAvailable(blackListedNodes, remoteStoreDef);
foundStore = true;
break;
}
}
if (!foundStore) {
logger.error("Store Name not found on the cluster");
throw new VoldemortException("Store Name not found on the cluster");
}
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class RebalancePlan method plan.
/**
* Create a plan. The plan consists of batches. Each batch involves the
* movement of no more than batchSize primary partitions. The movement of a
* single primary partition may require migration of other n-ary replicas,
* and potentially deletions. Migrating a primary or n-ary partition
* requires migrating one partition-store for every store hosted at that
* partition.
*/
private void plan() {
// Mapping of stealer node to list of primary partitions being moved
final TreeMultimap<Integer, Integer> stealerToStolenPrimaryPartitions = TreeMultimap.create();
// Output initial and final cluster
if (outputDir != null)
RebalanceUtils.dumpClusters(currentCluster, finalCluster, outputDir);
// Determine which partitions must be stolen
for (Node stealerNode : finalCluster.getNodes()) {
List<Integer> stolenPrimaryPartitions = RebalanceUtils.getStolenPrimaryPartitions(currentCluster, finalCluster, stealerNode.getId());
if (stolenPrimaryPartitions.size() > 0) {
numPrimaryPartitionMoves += stolenPrimaryPartitions.size();
stealerToStolenPrimaryPartitions.putAll(stealerNode.getId(), stolenPrimaryPartitions);
}
}
// Determine plan batch-by-batch
int batches = 0;
Cluster batchCurrentCluster = Cluster.cloneCluster(currentCluster);
List<StoreDefinition> batchCurrentStoreDefs = this.currentStoreDefs;
List<StoreDefinition> batchFinalStoreDefs = this.finalStoreDefs;
Cluster batchFinalCluster = RebalanceUtils.getInterimCluster(this.currentCluster, this.finalCluster);
while (!stealerToStolenPrimaryPartitions.isEmpty()) {
int partitions = 0;
List<Entry<Integer, Integer>> partitionsMoved = Lists.newArrayList();
for (Entry<Integer, Integer> stealerToPartition : stealerToStolenPrimaryPartitions.entries()) {
partitionsMoved.add(stealerToPartition);
batchFinalCluster = UpdateClusterUtils.createUpdatedCluster(batchFinalCluster, stealerToPartition.getKey(), Lists.newArrayList(stealerToPartition.getValue()));
partitions++;
if (partitions == batchSize)
break;
}
// Remove the partitions moved
for (Iterator<Entry<Integer, Integer>> partitionMoved = partitionsMoved.iterator(); partitionMoved.hasNext(); ) {
Entry<Integer, Integer> entry = partitionMoved.next();
stealerToStolenPrimaryPartitions.remove(entry.getKey(), entry.getValue());
}
if (outputDir != null)
RebalanceUtils.dumpClusters(batchCurrentCluster, batchFinalCluster, outputDir, "batch-" + Integer.toString(batches) + ".");
// Generate a plan to compute the tasks
final RebalanceBatchPlan RebalanceBatchPlan = new RebalanceBatchPlan(batchCurrentCluster, batchCurrentStoreDefs, batchFinalCluster, batchFinalStoreDefs);
batchPlans.add(RebalanceBatchPlan);
numXZonePartitionStoreMoves += RebalanceBatchPlan.getCrossZonePartitionStoreMoves();
numPartitionStoreMoves += RebalanceBatchPlan.getPartitionStoreMoves();
nodeMoveMap.add(RebalanceBatchPlan.getNodeMoveMap());
zoneMoveMap.add(RebalanceBatchPlan.getZoneMoveMap());
batches++;
batchCurrentCluster = Cluster.cloneCluster(batchFinalCluster);
// batchCurrentStoreDefs can only be different from
// batchFinalStoreDefs for the initial batch.
batchCurrentStoreDefs = batchFinalStoreDefs;
}
logger.info(this);
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class PartitionBalanceUtils method analyzeInvalidMetadataRate.
// TODO: (refactor) separate analysis from pretty printing and add a unit
// test for the analysis sub-method.
/**
* Compares current cluster with final cluster. Uses pertinent store defs
* for each cluster to determine if a node that hosts a zone-primary in the
* current cluster will no longer host any zone-nary in the final cluster.
* This check is the precondition for a server returning an invalid metadata
* exception to a client on a normal-case put or get. Normal-case being that
* the zone-primary receives the pseudo-master put or the get operation.
*
* @param currentCluster
* @param currentStoreDefs
* @param finalCluster
* @param finalStoreDefs
* @return pretty-printed string documenting invalid metadata rates for each
* zone.
*/
public static String analyzeInvalidMetadataRate(final Cluster currentCluster, List<StoreDefinition> currentStoreDefs, final Cluster finalCluster, List<StoreDefinition> finalStoreDefs) {
StringBuilder sb = new StringBuilder();
sb.append("Dump of invalid metadata rates per zone").append(Utils.NEWLINE);
HashMap<StoreDefinition, Integer> uniqueStores = StoreDefinitionUtils.getUniqueStoreDefinitionsWithCounts(currentStoreDefs);
for (StoreDefinition currentStoreDef : uniqueStores.keySet()) {
sb.append("Store exemplar: " + currentStoreDef.getName()).append(Utils.NEWLINE).append("\tThere are " + uniqueStores.get(currentStoreDef) + " other similar stores.").append(Utils.NEWLINE);
StoreRoutingPlan currentSRP = new StoreRoutingPlan(currentCluster, currentStoreDef);
StoreDefinition finalStoreDef = StoreUtils.getStoreDef(finalStoreDefs, currentStoreDef.getName());
StoreRoutingPlan finalSRP = new StoreRoutingPlan(finalCluster, finalStoreDef);
// Only care about existing zones
for (int zoneId : currentCluster.getZoneIds()) {
int zonePrimariesCount = 0;
int invalidMetadata = 0;
// Examine nodes in current cluster in existing zone.
for (int nodeId : currentCluster.getNodeIdsInZone(zoneId)) {
// For every zone-primary in current cluster
for (int zonePrimaryPartitionId : currentSRP.getZonePrimaryPartitionIds(nodeId)) {
zonePrimariesCount++;
// InvalidMetadataException will fire.
if (!finalSRP.getZoneNAryPartitionIds(nodeId).contains(zonePrimaryPartitionId)) {
invalidMetadata++;
}
}
}
float rate = invalidMetadata / (float) zonePrimariesCount;
sb.append("\tZone " + zoneId).append(" : total zone primaries " + zonePrimariesCount).append(", # that trigger invalid metadata " + invalidMetadata).append(" => " + rate).append(Utils.NEWLINE);
}
}
return sb.toString();
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class RebalanceUtils method dropZone.
/**
* Similar to {@link RebalanceUtils#vacateZone(Cluster, int)}, takes the
* current store definitions in the cluster and creates store definitions
* with the specified zone effectively dropped.
*
* In order to drop a zone, we adjust the total replication factor and
* remove zone replication factor for the dropped zone
*
* @param currentStoreDefs
* @param dropZoneId
* @return the adjusted list of store definitions
*/
public static List<StoreDefinition> dropZone(List<StoreDefinition> currentStoreDefs, int dropZoneId) {
List<StoreDefinition> adjustedStoreDefList = new ArrayList<StoreDefinition>();
for (StoreDefinition storeDef : currentStoreDefs) {
HashMap<Integer, Integer> zoneRepFactorMap = storeDef.getZoneReplicationFactor();
if (!zoneRepFactorMap.containsKey(dropZoneId)) {
throw new VoldemortException("Store " + storeDef.getName() + " does not have replication factor for zone " + dropZoneId);
}
StoreDefinitionBuilder adjustedStoreDefBuilder = StoreDefinitionUtils.getBuilderForStoreDef(storeDef);
if (!storeDef.hasPreferredReads()) {
adjustedStoreDefBuilder.setPreferredReads(null);
}
if (!storeDef.hasPreferredWrites()) {
adjustedStoreDefBuilder.setPreferredWrites(null);
}
// Copy all zone replication factor entries except for dropped zone
HashMap<Integer, Integer> adjustedZoneRepFactorMap = new HashMap<Integer, Integer>();
for (Integer zoneId : zoneRepFactorMap.keySet()) {
if (zoneId != dropZoneId) {
adjustedZoneRepFactorMap.put(zoneId, zoneRepFactorMap.get(zoneId));
}
}
adjustedStoreDefBuilder.setZoneReplicationFactor(adjustedZoneRepFactorMap);
// adjust the replication factor
int zoneRepFactor = zoneRepFactorMap.get(dropZoneId);
adjustedStoreDefBuilder.setReplicationFactor(adjustedStoreDefBuilder.getReplicationFactor() - zoneRepFactor);
adjustedStoreDefList.add(adjustedStoreDefBuilder.build());
}
return adjustedStoreDefList;
}
use of voldemort.store.StoreDefinition in project voldemort by voldemort.
the class StoreDefinitionUtils method validateNewStoreDefsAreNonBreaking.
/**
* Ensure that new store definitions that are specified for an update do not include breaking changes to the store.
* @param oldStoreDefs
* @param newStoreDefs
*/
public static void validateNewStoreDefsAreNonBreaking(List<StoreDefinition> oldStoreDefs, List<StoreDefinition> newStoreDefs) {
Map<String, StoreDefinition> oldStoreMap = new HashMap<String, StoreDefinition>();
Map<String, StoreDefinition> newStoreMap = new HashMap<String, StoreDefinition>();
for (StoreDefinition storeDef : oldStoreDefs) {
oldStoreMap.put(storeDef.getName(), storeDef);
}
for (StoreDefinition storeDef : newStoreDefs) {
newStoreMap.put(storeDef.getName(), storeDef);
}
for (String storeName : oldStoreMap.keySet()) {
if (newStoreMap.containsKey(storeName)) {
validateNewStoreDefIsNonBreaking(oldStoreMap.get(storeName), newStoreMap.get(storeName));
}
}
}
Aggregations