Search in sources :

Example 56 with ClusterConfig

use of org.apache.helix.model.ClusterConfig in project helix by apache.

the class ClusterAccessor method updateClusterConfig.

@POST
@Path("{clusterId}/configs")
public Response updateClusterConfig(@PathParam("clusterId") String clusterId, @QueryParam("command") String commandStr, String content) {
    Command command;
    try {
        command = getCommand(commandStr);
    } catch (HelixException ex) {
        return badRequest(ex.getMessage());
    }
    ZNRecord record;
    try {
        record = toZNRecord(content);
    } catch (IOException e) {
        _logger.error("Failed to deserialize user's input " + content + ", Exception: " + e);
        return badRequest("Input is not a valid ZNRecord!");
    }
    if (!record.getId().equals(clusterId)) {
        return badRequest("ID does not match the cluster name in input!");
    }
    ClusterConfig config = new ClusterConfig(record);
    ConfigAccessor configAccessor = getConfigAccessor();
    try {
        switch(command) {
            case update:
                configAccessor.updateClusterConfig(clusterId, config);
                break;
            case delete:
                {
                    HelixConfigScope clusterScope = new HelixConfigScopeBuilder(HelixConfigScope.ConfigScopeProperty.CLUSTER).forCluster(clusterId).build();
                    configAccessor.remove(clusterScope, config.getRecord());
                }
                break;
            default:
                return badRequest("Unsupported command " + commandStr);
        }
    } catch (HelixException ex) {
        return notFound(ex.getMessage());
    } catch (Exception ex) {
        _logger.error("Failed to " + command + " cluster config, cluster " + clusterId + " new config: " + content + ", Exception: " + ex);
        return serverError(ex);
    }
    return OK();
}
Also used : HelixException(org.apache.helix.HelixException) HelixConfigScopeBuilder(org.apache.helix.model.builder.HelixConfigScopeBuilder) IOException(java.io.IOException) ConfigAccessor(org.apache.helix.ConfigAccessor) HelixConfigScope(org.apache.helix.model.HelixConfigScope) ZNRecord(org.apache.helix.ZNRecord) HelixException(org.apache.helix.HelixException) IOException(java.io.IOException) ClusterConfig(org.apache.helix.model.ClusterConfig) Path(javax.ws.rs.Path) POST(javax.ws.rs.POST)

Example 57 with ClusterConfig

use of org.apache.helix.model.ClusterConfig in project helix by apache.

the class ZKHelixAdmin method enableBatchInstances.

private void enableBatchInstances(final String clusterName, final List<String> instances, final boolean enabled, BaseDataAccessor<ZNRecord> baseAccessor) {
    // include tests.
    if (true) {
        throw new HelixException("Current batch enable/disable instances are temporarily disabled!");
    }
    String path = PropertyPathBuilder.clusterConfig(clusterName);
    if (!baseAccessor.exists(path, 0)) {
        throw new HelixException("Cluster " + clusterName + ": cluster config does not exist");
    }
    baseAccessor.update(path, new DataUpdater<ZNRecord>() {

        @Override
        public ZNRecord update(ZNRecord currentData) {
            if (currentData == null) {
                throw new HelixException("Cluster: " + clusterName + ": cluster config is null");
            }
            ClusterConfig clusterConfig = new ClusterConfig(currentData);
            Map<String, String> disabledInstances = new TreeMap<>();
            if (clusterConfig.getDisabledInstances() != null) {
                disabledInstances.putAll(clusterConfig.getDisabledInstances());
            }
            if (enabled) {
                disabledInstances.keySet().removeAll(instances);
            } else {
                for (String disabledInstance : instances) {
                    if (!disabledInstances.containsKey(disabledInstance)) {
                        disabledInstances.put(disabledInstance, String.valueOf(System.currentTimeMillis()));
                    }
                }
            }
            clusterConfig.setDisabledInstances(disabledInstances);
            return clusterConfig.getRecord();
        }
    }, AccessOption.PERSISTENT);
}
Also used : HelixException(org.apache.helix.HelixException) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) ZNRecord(org.apache.helix.ZNRecord) ClusterConfig(org.apache.helix.model.ClusterConfig)

Example 58 with ClusterConfig

use of org.apache.helix.model.ClusterConfig in project helix by apache.

the class PersistAssignmentStage method process.

@Override
public void process(ClusterEvent event) throws Exception {
    ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name());
    ClusterConfig clusterConfig = cache.getClusterConfig();
    if (!clusterConfig.isPersistBestPossibleAssignment() && !clusterConfig.isPersistIntermediateAssignment()) {
        return;
    }
    BestPossibleStateOutput bestPossibleAssignment = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
    HelixManager helixManager = event.getAttribute(AttributeName.helixmanager.name());
    HelixDataAccessor accessor = helixManager.getHelixDataAccessor();
    PropertyKey.Builder keyBuilder = accessor.keyBuilder();
    Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name());
    for (String resourceId : bestPossibleAssignment.resourceSet()) {
        Resource resource = resourceMap.get(resourceId);
        if (resource != null) {
            final IdealState idealState = cache.getIdealState(resourceId);
            if (idealState == null) {
                LOG.warn("IdealState not found for resource " + resourceId);
                continue;
            }
            IdealState.RebalanceMode mode = idealState.getRebalanceMode();
            if (!mode.equals(IdealState.RebalanceMode.SEMI_AUTO) && !mode.equals(IdealState.RebalanceMode.FULL_AUTO)) {
                // do not persist assignment for resource in neither semi or full auto.
                continue;
            }
            boolean needPersist = false;
            if (mode.equals(IdealState.RebalanceMode.FULL_AUTO)) {
                // persist preference list in ful-auto mode.
                Map<String, List<String>> newLists = bestPossibleAssignment.getPreferenceLists(resourceId);
                if (newLists != null && hasPreferenceListChanged(newLists, idealState)) {
                    idealState.setPreferenceLists(newLists);
                    needPersist = true;
                }
            }
            PartitionStateMap partitionStateMap = bestPossibleAssignment.getPartitionStateMap(resourceId);
            if (clusterConfig.isPersistIntermediateAssignment()) {
                IntermediateStateOutput intermediateAssignment = event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
                partitionStateMap = intermediateAssignment.getPartitionStateMap(resourceId);
            }
            // TODO: temporary solution for Espresso/Dbus backcompatible, should remove this.
            Map<Partition, Map<String, String>> assignmentToPersist = convertAssignmentPersisted(resource, idealState, partitionStateMap.getStateMap());
            if (assignmentToPersist != null && hasInstanceMapChanged(assignmentToPersist, idealState)) {
                for (Partition partition : assignmentToPersist.keySet()) {
                    Map<String, String> instanceMap = assignmentToPersist.get(partition);
                    idealState.setInstanceStateMap(partition.getPartitionName(), instanceMap);
                }
                needPersist = true;
            }
            if (needPersist) {
                // Update instead of set to ensure any intermediate changes that the controller does not update are kept.
                accessor.updateProperty(keyBuilder.idealStates(resourceId), new DataUpdater<ZNRecord>() {

                    @Override
                    public ZNRecord update(ZNRecord current) {
                        if (current != null) {
                            // Overwrite MapFields and ListFields items with the same key.
                            // Note that default merge will keep old values in the maps or lists unchanged, which is not desired.
                            current.getMapFields().clear();
                            current.getMapFields().putAll(idealState.getRecord().getMapFields());
                            current.getListFields().putAll(idealState.getRecord().getListFields());
                        }
                        return current;
                    }
                }, idealState);
            }
        }
    }
}
Also used : Partition(org.apache.helix.model.Partition) HelixManager(org.apache.helix.HelixManager) Resource(org.apache.helix.model.Resource) IdealState(org.apache.helix.model.IdealState) PartitionStateMap(org.apache.helix.controller.common.PartitionStateMap) HelixDataAccessor(org.apache.helix.HelixDataAccessor) List(java.util.List) HashMap(java.util.HashMap) PartitionStateMap(org.apache.helix.controller.common.PartitionStateMap) Map(java.util.Map) PropertyKey(org.apache.helix.PropertyKey) ZNRecord(org.apache.helix.ZNRecord) ClusterConfig(org.apache.helix.model.ClusterConfig)

Example 59 with ClusterConfig

use of org.apache.helix.model.ClusterConfig in project helix by apache.

the class ResourceComputationStage method process.

@Override
public void process(ClusterEvent event) throws Exception {
    ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name());
    if (cache == null) {
        throw new StageException("Missing attributes in event:" + event + ". Requires DataCache");
    }
    Map<String, IdealState> idealStates = cache.getIdealStates();
    Map<String, Resource> resourceMap = new LinkedHashMap<String, Resource>();
    Map<String, Resource> resourceToRebalance = new LinkedHashMap<>();
    if (idealStates != null && idealStates.size() > 0) {
        for (IdealState idealState : idealStates.values()) {
            if (idealState == null) {
                continue;
            }
            Set<String> partitionSet = idealState.getPartitionSet();
            String resourceName = idealState.getResourceName();
            if (!resourceMap.containsKey(resourceName)) {
                Resource resource = new Resource(resourceName, cache.getClusterConfig(), cache.getResourceConfig(resourceName));
                resourceMap.put(resourceName, resource);
                if (!idealState.isValid() && !cache.isTaskCache() || idealState.getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME) && cache.isTaskCache() || !idealState.getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME) && !cache.isTaskCache()) {
                    resourceToRebalance.put(resourceName, resource);
                }
                resource.setStateModelDefRef(idealState.getStateModelDefRef());
                resource.setStateModelFactoryName(idealState.getStateModelFactoryName());
                resource.setBucketSize(idealState.getBucketSize());
                boolean batchMessageMode = idealState.getBatchMessageMode();
                ClusterConfig clusterConfig = cache.getClusterConfig();
                if (clusterConfig != null) {
                    batchMessageMode |= clusterConfig.getBatchMessageMode();
                }
                resource.setBatchMessageMode(batchMessageMode);
                resource.setResourceGroupName(idealState.getResourceGroupName());
                resource.setResourceTag(idealState.getInstanceGroupTag());
            }
            for (String partition : partitionSet) {
                addPartition(partition, resourceName, resourceMap);
            }
        }
    }
    // It's important to get partitions from CurrentState as well since the
    // idealState might be removed.
    Map<String, LiveInstance> availableInstances = cache.getLiveInstances();
    if (availableInstances != null && availableInstances.size() > 0) {
        for (LiveInstance instance : availableInstances.values()) {
            String instanceName = instance.getInstanceName();
            String clientSessionId = instance.getSessionId();
            Map<String, CurrentState> currentStateMap = cache.getCurrentState(instanceName, clientSessionId);
            if (currentStateMap == null || currentStateMap.size() == 0) {
                continue;
            }
            for (CurrentState currentState : currentStateMap.values()) {
                String resourceName = currentState.getResourceName();
                Map<String, String> resourceStateMap = currentState.getPartitionStateMap();
                if (resourceStateMap.keySet().isEmpty()) {
                    // don't include empty current state for dropped resource
                    continue;
                }
                // don't overwrite ideal state settings
                if (!resourceMap.containsKey(resourceName)) {
                    addResource(resourceName, resourceMap);
                    Resource resource = resourceMap.get(resourceName);
                    resource.setStateModelDefRef(currentState.getStateModelDefRef());
                    resource.setStateModelFactoryName(currentState.getStateModelFactoryName());
                    resource.setBucketSize(currentState.getBucketSize());
                    resource.setBatchMessageMode(currentState.getBatchMessageMode());
                    if (resource.getStateModelDefRef() == null && !cache.isTaskCache() || resource.getStateModelDefRef() != null && (resource.getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME) && cache.isTaskCache() || !resource.getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME) && !cache.isTaskCache())) {
                        resourceToRebalance.put(resourceName, resource);
                    }
                    IdealState idealState = idealStates.get(resourceName);
                    if (idealState != null) {
                        resource.setResourceGroupName(idealState.getResourceGroupName());
                        resource.setResourceTag(idealState.getInstanceGroupTag());
                    }
                }
                if (currentState.getStateModelDefRef() == null) {
                    LOG.error("state model def is null." + "resource:" + currentState.getResourceName() + ", partitions: " + currentState.getPartitionStateMap().keySet() + ", states: " + currentState.getPartitionStateMap().values());
                    throw new StageException("State model def is null for resource:" + currentState.getResourceName());
                }
                for (String partition : resourceStateMap.keySet()) {
                    addPartition(partition, resourceName, resourceMap);
                }
            }
        }
    }
    event.addAttribute(AttributeName.RESOURCES.name(), resourceMap);
    event.addAttribute(AttributeName.RESOURCES_TO_REBALANCE.name(), resourceToRebalance);
}
Also used : StageException(org.apache.helix.controller.pipeline.StageException) Resource(org.apache.helix.model.Resource) IdealState(org.apache.helix.model.IdealState) LinkedHashMap(java.util.LinkedHashMap) LiveInstance(org.apache.helix.model.LiveInstance) CurrentState(org.apache.helix.model.CurrentState) ClusterConfig(org.apache.helix.model.ClusterConfig)

Example 60 with ClusterConfig

use of org.apache.helix.model.ClusterConfig in project helix by apache.

the class TargetExteralViewCalcStage method process.

@Override
public void process(ClusterEvent event) throws Exception {
    ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name());
    ClusterConfig clusterConfig = cache.getClusterConfig();
    if (cache.isTaskCache() || !clusterConfig.isTargetExternalViewEnabled()) {
        return;
    }
    HelixManager helixManager = event.getAttribute(AttributeName.helixmanager.name());
    HelixDataAccessor accessor = helixManager.getHelixDataAccessor();
    if (!accessor.getBaseDataAccessor().exists(accessor.keyBuilder().targetExternalViews().getPath(), AccessOption.PERSISTENT)) {
        accessor.getBaseDataAccessor().create(accessor.keyBuilder().targetExternalViews().getPath(), null, AccessOption.PERSISTENT);
    }
    BestPossibleStateOutput bestPossibleAssignments = event.getAttribute(AttributeName.BEST_POSSIBLE_STATE.name());
    IntermediateStateOutput intermediateAssignments = event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
    Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES.name());
    List<PropertyKey> keys = new ArrayList<>();
    List<ExternalView> targetExternalViews = new ArrayList<>();
    for (String resourceName : bestPossibleAssignments.resourceSet()) {
        if (cache.getIdealState(resourceName) == null || cache.getIdealState(resourceName).isExternalViewDisabled()) {
            continue;
        }
        Resource resource = resourceMap.get(resourceName);
        if (resource != null) {
            PartitionStateMap partitionStateMap = intermediateAssignments.getPartitionStateMap(resourceName);
            Map<String, Map<String, String>> intermediateAssignment = convertToMapFields(partitionStateMap.getStateMap());
            Map<String, List<String>> preferenceLists = bestPossibleAssignments.getPreferenceLists(resourceName);
            boolean needPersist = false;
            ExternalView targetExternalView = cache.getTargetExternalView(resourceName);
            if (targetExternalView == null) {
                targetExternalView = new ExternalView(resourceName);
                targetExternalView.getRecord().getSimpleFields().putAll(cache.getIdealState(resourceName).getRecord().getSimpleFields());
                needPersist = true;
            }
            if (preferenceLists != null && !targetExternalView.getRecord().getListFields().equals(preferenceLists)) {
                targetExternalView.getRecord().setListFields(preferenceLists);
                needPersist = true;
            }
            if (intermediateAssignment != null && !targetExternalView.getRecord().getMapFields().equals(intermediateAssignment)) {
                targetExternalView.getRecord().setMapFields(intermediateAssignment);
                needPersist = true;
            }
            if (needPersist) {
                keys.add(accessor.keyBuilder().targetExternalView(resourceName));
                targetExternalViews.add(targetExternalView);
                cache.updateTargetExternalView(resourceName, targetExternalView);
            }
        }
    }
    accessor.setChildren(keys, targetExternalViews);
}
Also used : ExternalView(org.apache.helix.model.ExternalView) HelixManager(org.apache.helix.HelixManager) Resource(org.apache.helix.model.Resource) ArrayList(java.util.ArrayList) PartitionStateMap(org.apache.helix.controller.common.PartitionStateMap) HelixDataAccessor(org.apache.helix.HelixDataAccessor) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) PartitionStateMap(org.apache.helix.controller.common.PartitionStateMap) Map(java.util.Map) PropertyKey(org.apache.helix.PropertyKey) ClusterConfig(org.apache.helix.model.ClusterConfig)

Aggregations

ClusterConfig (org.apache.helix.model.ClusterConfig)61 Test (org.testng.annotations.Test)23 ConfigAccessor (org.apache.helix.ConfigAccessor)17 ZNRecord (org.apache.helix.ZNRecord)13 IdealState (org.apache.helix.model.IdealState)10 InstanceConfig (org.apache.helix.model.InstanceConfig)9 ArrayList (java.util.ArrayList)8 HashMap (java.util.HashMap)8 Map (java.util.Map)8 Resource (org.apache.helix.model.Resource)7 HelixDataAccessor (org.apache.helix.HelixDataAccessor)6 HelixException (org.apache.helix.HelixException)6 StateTransitionThrottleConfig (org.apache.helix.api.config.StateTransitionThrottleConfig)6 ClusterControllerManager (org.apache.helix.integration.manager.ClusterControllerManager)6 List (java.util.List)5 ZKHelixDataAccessor (org.apache.helix.manager.zk.ZKHelixDataAccessor)5 BeforeClass (org.testng.annotations.BeforeClass)5 HelixManager (org.apache.helix.HelixManager)4 MockParticipantManager (org.apache.helix.integration.manager.MockParticipantManager)4 ExternalView (org.apache.helix.model.ExternalView)4