Search in sources :

Example 1 with CardDealingAdjustmentAlgorithm

use of org.apache.helix.controller.rebalancer.strategy.crushMapping.CardDealingAdjustmentAlgorithm in project helix by apache.

the class AbstractEvenDistributionRebalanceStrategy method computePartitionAssignment.

/**
 * Force uniform distribution based on the parent strategy class's calculation result.
 *
 * @param allNodes       All instances
 * @param liveNodes      List of live instances
 * @param currentMapping current replica mapping
 * @param clusterData    cluster data
 * @return
 * @throws HelixException
 */
@Override
public ZNRecord computePartitionAssignment(final List<String> allNodes, final List<String> liveNodes, final Map<String, Map<String, String>> currentMapping, ClusterDataCache clusterData) throws HelixException {
    // Round 1: Calculate mapping using the base strategy.
    // Note to use all nodes for minimizing the influence of live node changes to mapping.
    ZNRecord origAssignment = getBaseRebalanceStrategy().computePartitionAssignment(allNodes, allNodes, currentMapping, clusterData);
    Map<String, List<String>> origPartitionMap = origAssignment.getListFields();
    // Try to re-assign if the original map is not empty
    if (!origPartitionMap.isEmpty()) {
        // Transform current assignment to instance->partitions map, and get total partitions
        Map<String, List<String>> nodeToPartitionMap = convertMap(origPartitionMap);
        Map<String, List<String>> finalPartitionMap = null;
        // Round 2: Rebalance mapping using card dealing algorithm. For ensuring evenness distribution.
        Topology allNodeTopo = new Topology(allNodes, allNodes, clusterData.getInstanceConfigMap(), clusterData.getClusterConfig());
        CardDealingAdjustmentAlgorithm cardDealer = new CardDealingAdjustmentAlgorithm(allNodeTopo, _replica);
        if (cardDealer.computeMapping(nodeToPartitionMap, _resourceName.hashCode())) {
            // Round 3: Reorder preference Lists to ensure participants' orders (so as the states) are uniform.
            finalPartitionMap = shufflePreferenceList(nodeToPartitionMap);
            if (!liveNodes.containsAll(allNodes)) {
                try {
                    // Round 4: Re-mapping the partitions on non-live nodes using consistent hashing for reducing movement.
                    ConsistentHashingAdjustmentAlgorithm hashPlacement = new ConsistentHashingAdjustmentAlgorithm(allNodeTopo, liveNodes);
                    if (hashPlacement.computeMapping(nodeToPartitionMap, _resourceName.hashCode())) {
                        // Since mapping is changed by hashPlacement, need to adjust nodes order.
                        Map<String, List<String>> adjustedPartitionMap = convertMap(nodeToPartitionMap);
                        for (String partition : adjustedPartitionMap.keySet()) {
                            List<String> preSelectedList = finalPartitionMap.get(partition);
                            Set<String> adjustedNodeList = new HashSet<>(adjustedPartitionMap.get(partition));
                            List<String> finalNodeList = adjustedPartitionMap.get(partition);
                            int index = 0;
                            // 1. Add the ones in pre-selected node list first, in order
                            for (String node : preSelectedList) {
                                if (adjustedNodeList.remove(node)) {
                                    finalNodeList.set(index++, node);
                                }
                            }
                            // 2. Add the rest of nodes to the map
                            for (String node : adjustedNodeList) {
                                finalNodeList.set(index++, node);
                            }
                        }
                        finalPartitionMap = adjustedPartitionMap;
                    } else {
                        // Adjustment failed, the final partition map is not valid
                        finalPartitionMap = null;
                    }
                } catch (ExecutionException e) {
                    _logger.error("Failed to perform consistent hashing partition assigner.", e);
                    finalPartitionMap = null;
                }
            }
        }
        if (null != finalPartitionMap) {
            ZNRecord result = new ZNRecord(_resourceName);
            result.setListFields(finalPartitionMap);
            return result;
        }
    }
    // Force even is not possible, fallback to use default strategy
    if (_logger.isDebugEnabled()) {
        _logger.debug("Force even distribution is not possible, using the default strategy: " + getBaseRebalanceStrategy().getClass().getSimpleName());
    }
    if (liveNodes.equals(allNodes)) {
        return origAssignment;
    } else {
        // need to re-calculate since node list is different.
        return getBaseRebalanceStrategy().computePartitionAssignment(allNodes, liveNodes, currentMapping, clusterData);
    }
}
Also used : ConsistentHashingAdjustmentAlgorithm(org.apache.helix.controller.rebalancer.strategy.crushMapping.ConsistentHashingAdjustmentAlgorithm) Topology(org.apache.helix.controller.rebalancer.topology.Topology) ExecutionException(java.util.concurrent.ExecutionException) CardDealingAdjustmentAlgorithm(org.apache.helix.controller.rebalancer.strategy.crushMapping.CardDealingAdjustmentAlgorithm) ZNRecord(org.apache.helix.ZNRecord)

Aggregations

ExecutionException (java.util.concurrent.ExecutionException)1 ZNRecord (org.apache.helix.ZNRecord)1 CardDealingAdjustmentAlgorithm (org.apache.helix.controller.rebalancer.strategy.crushMapping.CardDealingAdjustmentAlgorithm)1 ConsistentHashingAdjustmentAlgorithm (org.apache.helix.controller.rebalancer.strategy.crushMapping.ConsistentHashingAdjustmentAlgorithm)1 Topology (org.apache.helix.controller.rebalancer.topology.Topology)1