use of org.apache.helix.controller.rebalancer.topology.Topology in project helix by apache.
the class AbstractEvenDistributionRebalanceStrategy method computePartitionAssignment.
/**
* Force uniform distribution based on the parent strategy class's calculation result.
*
* @param allNodes All instances
* @param liveNodes List of live instances
* @param currentMapping current replica mapping
* @param clusterData cluster data
* @return
* @throws HelixException
*/
@Override
public ZNRecord computePartitionAssignment(final List<String> allNodes, final List<String> liveNodes, final Map<String, Map<String, String>> currentMapping, ClusterDataCache clusterData) throws HelixException {
// Round 1: Calculate mapping using the base strategy.
// Note to use all nodes for minimizing the influence of live node changes to mapping.
ZNRecord origAssignment = getBaseRebalanceStrategy().computePartitionAssignment(allNodes, allNodes, currentMapping, clusterData);
Map<String, List<String>> origPartitionMap = origAssignment.getListFields();
// Try to re-assign if the original map is not empty
if (!origPartitionMap.isEmpty()) {
// Transform current assignment to instance->partitions map, and get total partitions
Map<String, List<String>> nodeToPartitionMap = convertMap(origPartitionMap);
Map<String, List<String>> finalPartitionMap = null;
// Round 2: Rebalance mapping using card dealing algorithm. For ensuring evenness distribution.
Topology allNodeTopo = new Topology(allNodes, allNodes, clusterData.getInstanceConfigMap(), clusterData.getClusterConfig());
CardDealingAdjustmentAlgorithm cardDealer = new CardDealingAdjustmentAlgorithm(allNodeTopo, _replica);
if (cardDealer.computeMapping(nodeToPartitionMap, _resourceName.hashCode())) {
// Round 3: Reorder preference Lists to ensure participants' orders (so as the states) are uniform.
finalPartitionMap = shufflePreferenceList(nodeToPartitionMap);
if (!liveNodes.containsAll(allNodes)) {
try {
// Round 4: Re-mapping the partitions on non-live nodes using consistent hashing for reducing movement.
ConsistentHashingAdjustmentAlgorithm hashPlacement = new ConsistentHashingAdjustmentAlgorithm(allNodeTopo, liveNodes);
if (hashPlacement.computeMapping(nodeToPartitionMap, _resourceName.hashCode())) {
// Since mapping is changed by hashPlacement, need to adjust nodes order.
Map<String, List<String>> adjustedPartitionMap = convertMap(nodeToPartitionMap);
for (String partition : adjustedPartitionMap.keySet()) {
List<String> preSelectedList = finalPartitionMap.get(partition);
Set<String> adjustedNodeList = new HashSet<>(adjustedPartitionMap.get(partition));
List<String> finalNodeList = adjustedPartitionMap.get(partition);
int index = 0;
// 1. Add the ones in pre-selected node list first, in order
for (String node : preSelectedList) {
if (adjustedNodeList.remove(node)) {
finalNodeList.set(index++, node);
}
}
// 2. Add the rest of nodes to the map
for (String node : adjustedNodeList) {
finalNodeList.set(index++, node);
}
}
finalPartitionMap = adjustedPartitionMap;
} else {
// Adjustment failed, the final partition map is not valid
finalPartitionMap = null;
}
} catch (ExecutionException e) {
_logger.error("Failed to perform consistent hashing partition assigner.", e);
finalPartitionMap = null;
}
}
}
if (null != finalPartitionMap) {
ZNRecord result = new ZNRecord(_resourceName);
result.setListFields(finalPartitionMap);
return result;
}
}
// Force even is not possible, fallback to use default strategy
if (_logger.isDebugEnabled()) {
_logger.debug("Force even distribution is not possible, using the default strategy: " + getBaseRebalanceStrategy().getClass().getSimpleName());
}
if (liveNodes.equals(allNodes)) {
return origAssignment;
} else {
// need to re-calculate since node list is different.
return getBaseRebalanceStrategy().computePartitionAssignment(allNodes, liveNodes, currentMapping, clusterData);
}
}
use of org.apache.helix.controller.rebalancer.topology.Topology in project helix by apache.
the class TestTopology method testCreateClusterTopologyWithDefaultTopology.
@Test
public void testCreateClusterTopologyWithDefaultTopology() {
ClusterConfig clusterConfig = new ClusterConfig("Test_Cluster");
clusterConfig.setTopologyAwareEnabled(true);
List<String> allNodes = new ArrayList<String>();
List<String> liveNodes = new ArrayList<String>();
Map<String, InstanceConfig> instanceConfigMap = new HashMap<String, InstanceConfig>();
Map<String, Integer> nodeToWeightMap = new HashMap<String, Integer>();
for (int i = 0; i < 100; i++) {
String instance = "localhost_" + i;
InstanceConfig config = new InstanceConfig(instance);
String zoneId = "rack_" + i / 10;
config.setZoneId(zoneId);
config.setHostName(instance);
config.setPort("9000");
allNodes.add(instance);
int weight = 0;
if (i % 10 != 0) {
liveNodes.add(instance);
weight = 1000;
if (i % 3 == 0) {
// set random instance weight.
weight = (i + 1) * 100;
config.setWeight(weight);
}
}
instanceConfigMap.put(instance, config);
if (!nodeToWeightMap.containsKey(zoneId)) {
nodeToWeightMap.put(zoneId, 0);
}
nodeToWeightMap.put(zoneId, nodeToWeightMap.get(zoneId) + weight);
}
Topology topo = new Topology(allNodes, liveNodes, instanceConfigMap, clusterConfig);
Assert.assertTrue(topo.getEndNodeType().equals(Topology.Types.INSTANCE.name()));
Assert.assertTrue(topo.getFaultZoneType().equals(Topology.Types.ZONE.name()));
List<Node> faultZones = topo.getFaultZones();
Assert.assertEquals(faultZones.size(), 10);
Node root = topo.getRootNode();
Assert.assertEquals(root.getChildrenCount(Topology.Types.ZONE.name()), 10);
Assert.assertEquals(root.getChildrenCount(topo.getEndNodeType()), 100);
// validate weights.
for (Node rack : root.getChildren()) {
Assert.assertEquals(rack.getWeight(), (long) nodeToWeightMap.get(rack.getName()));
}
}
use of org.apache.helix.controller.rebalancer.topology.Topology in project helix by apache.
the class TestTopology method testCreateClusterTopology.
@Test
public void testCreateClusterTopology() {
ClusterConfig clusterConfig = new ClusterConfig("Test_Cluster");
String topology = "/Rack/Sub-Rack/Host/Instance";
clusterConfig.setTopology(topology);
clusterConfig.setFaultZoneType("Sub-Rack");
clusterConfig.setTopologyAwareEnabled(true);
List<String> allNodes = new ArrayList<String>();
List<String> liveNodes = new ArrayList<String>();
Map<String, InstanceConfig> instanceConfigMap = new HashMap<String, InstanceConfig>();
Map<String, Integer> nodeToWeightMap = new HashMap<String, Integer>();
for (int i = 0; i < 100; i++) {
String instance = "localhost_" + i;
InstanceConfig config = new InstanceConfig(instance);
String rack_id = "rack_" + i / 25;
String sub_rack_id = "subrack-" + i / 5;
String domain = String.format("Rack=%s, Sub-Rack=%s, Host=%s", rack_id, sub_rack_id, instance);
config.setDomain(domain);
config.setHostName(instance);
config.setPort("9000");
allNodes.add(instance);
int weight = 0;
if (i % 10 != 0) {
liveNodes.add(instance);
weight = 1000;
if (i % 3 == 0) {
// set random instance weight.
weight = (i + 1) * 100;
config.setWeight(weight);
}
}
instanceConfigMap.put(instance, config);
if (!nodeToWeightMap.containsKey(rack_id)) {
nodeToWeightMap.put(rack_id, 0);
}
nodeToWeightMap.put(rack_id, nodeToWeightMap.get(rack_id) + weight);
if (!nodeToWeightMap.containsKey(sub_rack_id)) {
nodeToWeightMap.put(sub_rack_id, 0);
}
nodeToWeightMap.put(sub_rack_id, nodeToWeightMap.get(sub_rack_id) + weight);
}
Topology topo = new Topology(allNodes, liveNodes, instanceConfigMap, clusterConfig);
Assert.assertTrue(topo.getEndNodeType().equals("Instance"));
Assert.assertTrue(topo.getFaultZoneType().equals("Sub-Rack"));
List<Node> faultZones = topo.getFaultZones();
Assert.assertEquals(faultZones.size(), 20);
Node root = topo.getRootNode();
Assert.assertEquals(root.getChildrenCount("Rack"), 4);
Assert.assertEquals(root.getChildrenCount("Sub-Rack"), 20);
Assert.assertEquals(root.getChildrenCount("Host"), 100);
Assert.assertEquals(root.getChildrenCount("Instance"), 100);
// validate weights.
for (Node rack : root.getChildren()) {
Assert.assertEquals(rack.getWeight(), (long) nodeToWeightMap.get(rack.getName()));
for (Node subRack : rack.getChildren()) {
Assert.assertEquals(subRack.getWeight(), (long) nodeToWeightMap.get(subRack.getName()));
}
}
}
use of org.apache.helix.controller.rebalancer.topology.Topology in project helix by apache.
the class CrushRebalanceStrategy method computePartitionAssignment.
/**
* Compute the preference lists and (optional partition-state mapping) for the given resource.
*
* @param allNodes All instances
* @param liveNodes List of live instances
* @param currentMapping current replica mapping
* @param clusterData cluster data
* @return
* @throws HelixException if a map can not be found
*/
@Override
public ZNRecord computePartitionAssignment(final List<String> allNodes, final List<String> liveNodes, final Map<String, Map<String, String>> currentMapping, ClusterDataCache clusterData) throws HelixException {
Map<String, InstanceConfig> instanceConfigMap = clusterData.getInstanceConfigMap();
_clusterTopo = new Topology(allNodes, liveNodes, instanceConfigMap, clusterData.getClusterConfig());
Node topNode = _clusterTopo.getRootNode();
Map<String, List<String>> newPreferences = new HashMap<String, List<String>>();
for (int i = 0; i < _partitions.size(); i++) {
String partitionName = _partitions.get(i);
long data = partitionName.hashCode();
// apply the placement rules
List<Node> selected = select(topNode, data, _replicas);
if (selected.size() < _replicas) {
Log.error(String.format("Can not find enough node for resource %s partition %s, required %d, find %d", _resourceName, partitionName, _replicas, selected.size()));
}
List<String> nodeList = new ArrayList<String>();
for (int j = 0; j < selected.size(); j++) {
nodeList.add(selected.get(j).getName());
}
newPreferences.put(partitionName, nodeList);
}
ZNRecord result = new ZNRecord(_resourceName);
result.setListFields(newPreferences);
return result;
}
use of org.apache.helix.controller.rebalancer.topology.Topology in project helix by apache.
the class MultiRoundCrushRebalanceStrategy method computePartitionAssignment.
/**
* Compute the preference lists and (optional partition-state mapping) for the given resource.
*
* @param allNodes All instances
* @param liveNodes List of live instances
* @param currentMapping current replica mapping
* @param clusterData cluster data
* @return
* @throws HelixException if a map can not be found
*/
@Override
public ZNRecord computePartitionAssignment(final List<String> allNodes, final List<String> liveNodes, final Map<String, Map<String, String>> currentMapping, ClusterDataCache clusterData) throws HelixException {
Map<String, InstanceConfig> instanceConfigMap = clusterData.getInstanceConfigMap();
_clusterTopo = new Topology(allNodes, liveNodes, instanceConfigMap, clusterData.getClusterConfig());
Node root = _clusterTopo.getRootNode();
Map<String, List<Node>> zoneMapping = new HashMap<String, List<Node>>();
for (int i = 0; i < _partitions.size(); i++) {
String partitionName = _partitions.get(i);
long pData = partitionName.hashCode();
// select zones for this partition
List<Node> zones = select(root, _clusterTopo.getFaultZoneType(), pData, _replicas);
zoneMapping.put(partitionName, zones);
}
/* map the position in preference list to the state */
Map<Integer, String> idxStateMap = new HashMap<Integer, String>();
int i = 0;
for (Map.Entry<String, Integer> e : _stateCountMap.entrySet()) {
String state = e.getKey();
int count = e.getValue();
for (int j = 0; j < count; j++) {
idxStateMap.put(i + j, state);
}
i += count;
}
// Final mapping <partition, state> -> list(node)
Map<String, Map<String, List<Node>>> partitionStateMapping = new HashMap<String, Map<String, List<Node>>>();
for (Node zone : _clusterTopo.getFaultZones()) {
// partition state -> list(partitions)
LinkedHashMap<String, List<String>> statePartitionMap = new LinkedHashMap<String, List<String>>();
// TODO: move this outside?
for (Map.Entry<String, List<Node>> e : zoneMapping.entrySet()) {
String partition = e.getKey();
List<Node> zones = e.getValue();
for (int k = 0; k < zones.size(); k++) {
if (zones.get(k).equals(zone)) {
String state = idxStateMap.get(k);
if (!statePartitionMap.containsKey(state)) {
statePartitionMap.put(state, new ArrayList<String>());
}
statePartitionMap.get(state).add(partition);
}
}
}
for (String state : _stateCountMap.keySet()) {
List<String> partitions = statePartitionMap.get(state);
if (partitions != null && !partitions.isEmpty()) {
Map<String, Node> assignments = singleZoneMapping(zone, partitions);
for (String partition : assignments.keySet()) {
Node node = assignments.get(partition);
if (!partitionStateMapping.containsKey(partition)) {
partitionStateMapping.put(partition, new HashMap<String, List<Node>>());
}
Map<String, List<Node>> stateMapping = partitionStateMapping.get(partition);
if (!stateMapping.containsKey(state)) {
stateMapping.put(state, new ArrayList<Node>());
}
stateMapping.get(state).add(node);
}
}
}
}
return generateZNRecord(_resourceName, _partitions, partitionStateMapping);
}
Aggregations