use of org.apache.helix.model.LiveInstance in project helix by apache.
the class AutoRebalancer method computeNewIdealState.
@Override
public IdealState computeNewIdealState(String resourceName, IdealState currentIdealState, CurrentStateOutput currentStateOutput, ClusterDataCache clusterData) {
IdealState cachedIdealState = getCachedIdealState(resourceName, clusterData);
if (cachedIdealState != null) {
LOG.debug("Use cached IdealState for " + resourceName);
return cachedIdealState;
}
LOG.info("Computing IdealState for " + resourceName);
List<String> partitions = new ArrayList<String>(currentIdealState.getPartitionSet());
String stateModelName = currentIdealState.getStateModelDefRef();
StateModelDefinition stateModelDef = clusterData.getStateModelDef(stateModelName);
if (stateModelDef == null) {
LOG.error("State Model Definition null for resource: " + resourceName);
throw new HelixException("State Model Definition null for resource: " + resourceName);
}
Map<String, LiveInstance> liveInstance = clusterData.getLiveInstances();
int replicas = currentIdealState.getReplicaCount(liveInstance.size());
LinkedHashMap<String, Integer> stateCountMap = stateModelDef.getStateCountMap(liveInstance.size(), replicas);
List<String> liveNodes = new ArrayList<String>(liveInstance.keySet());
List<String> allNodes = new ArrayList<String>(clusterData.getInstanceConfigMap().keySet());
allNodes.removeAll(clusterData.getDisabledInstances());
liveNodes.retainAll(allNodes);
Map<String, Map<String, String>> currentMapping = currentMapping(currentStateOutput, resourceName, partitions, stateCountMap);
// If there are nodes tagged with resource name, use only those nodes
Set<String> taggedNodes = new HashSet<String>();
Set<String> taggedLiveNodes = new HashSet<String>();
if (currentIdealState.getInstanceGroupTag() != null) {
for (String instanceName : allNodes) {
if (clusterData.getInstanceConfigMap().get(instanceName).containsTag(currentIdealState.getInstanceGroupTag())) {
taggedNodes.add(instanceName);
if (liveInstance.containsKey(instanceName)) {
taggedLiveNodes.add(instanceName);
}
}
}
if (!taggedLiveNodes.isEmpty()) {
// live nodes exist that have this tag
if (LOG.isInfoEnabled()) {
LOG.info("found the following participants with tag " + currentIdealState.getInstanceGroupTag() + " for " + resourceName + ": " + taggedLiveNodes);
}
} else if (taggedNodes.isEmpty()) {
// no live nodes and no configured nodes have this tag
LOG.warn("Resource " + resourceName + " has tag " + currentIdealState.getInstanceGroupTag() + " but no configured participants have this tag");
} else {
// configured nodes have this tag, but no live nodes have this tag
LOG.warn("Resource " + resourceName + " has tag " + currentIdealState.getInstanceGroupTag() + " but no live participants have this tag");
}
allNodes = new ArrayList<String>(taggedNodes);
liveNodes = new ArrayList<String>(taggedLiveNodes);
}
// sort node lists to ensure consistent preferred assignments
Collections.sort(allNodes);
Collections.sort(liveNodes);
int maxPartition = currentIdealState.getMaxPartitionsPerInstance();
_rebalanceStrategy = getRebalanceStrategy(currentIdealState.getRebalanceStrategy(), partitions, resourceName, stateCountMap, maxPartition);
ZNRecord newMapping = _rebalanceStrategy.computePartitionAssignment(allNodes, liveNodes, currentMapping, clusterData);
if (LOG.isDebugEnabled()) {
LOG.debug("currentMapping: " + currentMapping);
LOG.debug("stateCountMap: " + stateCountMap);
LOG.debug("liveNodes: " + liveNodes);
LOG.debug("allNodes: " + allNodes);
LOG.debug("maxPartition: " + maxPartition);
LOG.debug("newMapping: " + newMapping);
}
IdealState newIdealState = new IdealState(resourceName);
newIdealState.getRecord().setSimpleFields(currentIdealState.getRecord().getSimpleFields());
newIdealState.setRebalanceMode(RebalanceMode.FULL_AUTO);
newIdealState.getRecord().setListFields(newMapping.getListFields());
return newIdealState;
}
use of org.apache.helix.model.LiveInstance in project helix by apache.
the class TaskAssignmentStage method batchMessage.
List<Message> batchMessage(Builder keyBuilder, List<Message> messages, Map<String, Resource> resourceMap, Map<String, LiveInstance> liveInstanceMap, HelixManagerProperties properties) {
// group messages by its CurrentState path + "/" + fromState + "/" + toState
Map<String, Message> batchMessages = new HashMap<String, Message>();
List<Message> outputMessages = new ArrayList<Message>();
Iterator<Message> iter = messages.iterator();
while (iter.hasNext()) {
Message message = iter.next();
String resourceName = message.getResourceName();
Resource resource = resourceMap.get(resourceName);
String instanceName = message.getTgtName();
LiveInstance liveInstance = liveInstanceMap.get(instanceName);
String participantVersion = null;
if (liveInstance != null) {
participantVersion = liveInstance.getHelixVersion();
}
if (resource == null || !resource.getBatchMessageMode() || participantVersion == null || !properties.isFeatureSupported("batch_message", participantVersion)) {
outputMessages.add(message);
continue;
}
String key = keyBuilder.currentState(message.getTgtName(), message.getTgtSessionId(), message.getResourceName()).getPath() + "/" + message.getFromState() + "/" + message.getToState();
if (!batchMessages.containsKey(key)) {
Message batchMessage = new Message(message.getRecord());
batchMessage.setBatchMessageMode(true);
outputMessages.add(batchMessage);
batchMessages.put(key, batchMessage);
}
batchMessages.get(key).addPartitionName(message.getPartitionName());
}
return outputMessages;
}
use of org.apache.helix.model.LiveInstance in project helix by apache.
the class ServiceDiscovery method refreshCache.
private void refreshCache() {
Builder propertyKeyBuilder = new PropertyKey.Builder(cluster);
HelixDataAccessor helixDataAccessor = admin.getHelixDataAccessor();
List<LiveInstance> liveInstances = helixDataAccessor.getChildValues(propertyKeyBuilder.liveInstances());
refreshCache(liveInstances);
}
use of org.apache.helix.model.LiveInstance in project helix by apache.
the class ServiceDiscovery method refreshCache.
private void refreshCache(List<LiveInstance> liveInstances) {
List<ServiceMetadata> services = new ArrayList<ServiceMetadata>();
for (LiveInstance liveInstance : liveInstances) {
ServiceMetadata metadata = new ServiceMetadata();
ZNRecord rec = liveInstance.getRecord();
metadata.setPort(Integer.parseInt(rec.getSimpleField("PORT")));
metadata.setHost(rec.getSimpleField("HOST"));
metadata.setServiceName(rec.getSimpleField("SERVICE_NAME"));
services.add(metadata);
}
// protect against multiple threads updating this
synchronized (this) {
cache = services;
}
}
use of org.apache.helix.model.LiveInstance in project helix by apache.
the class TestHelixAdminScenariosRest method testStartCluster.
@Test
public void testStartCluster() throws Exception {
final String clusterName = "clusterTestStartCluster";
final String controllerClusterName = "controllerClusterTestStartCluster";
Map<String, MockParticipantManager> participants = new HashMap<String, MockParticipantManager>();
Map<String, ClusterDistributedController> distControllers = new HashMap<String, ClusterDistributedController>();
// setup cluster
addCluster(clusterName);
addInstancesToCluster(clusterName, "localhost:123", 6, null);
addResource(clusterName, "db_11", 8);
rebalanceResource(clusterName, "db_11");
addCluster(controllerClusterName);
addInstancesToCluster(controllerClusterName, "controller_900", 2, null);
// start mock nodes
for (int i = 0; i < 6; i++) {
String instanceName = "localhost_123" + i;
MockParticipantManager participant = new MockParticipantManager(ZK_ADDR, clusterName, instanceName);
participant.syncStart();
participants.put(instanceName, participant);
}
// start controller nodes
for (int i = 0; i < 2; i++) {
String controllerName = "controller_900" + i;
ClusterDistributedController distController = new ClusterDistributedController(ZK_ADDR, controllerClusterName, controllerName);
distController.syncStart();
distControllers.put(controllerName, distController);
}
Thread.sleep(100);
// activate clusters
// wrong grand clustername
String clusterUrl = getClusterUrl(clusterName);
assertSuccessPostOperation(clusterUrl, activateClusterCmd("nonExistCluster", true), true);
// wrong cluster name
clusterUrl = getClusterUrl("nonExistCluster");
assertSuccessPostOperation(clusterUrl, activateClusterCmd(controllerClusterName, true), true);
clusterUrl = getClusterUrl(clusterName);
assertSuccessPostOperation(clusterUrl, activateClusterCmd(controllerClusterName, true), false);
Thread.sleep(500);
deleteUrl(clusterUrl, true);
// verify leader node
HelixDataAccessor accessor = distControllers.get("controller_9001").getHelixDataAccessor();
LiveInstance controllerLeader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
Assert.assertTrue(controllerLeader.getInstanceName().startsWith("controller_900"));
accessor = participants.get("localhost_1232").getHelixDataAccessor();
LiveInstance leader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
for (int i = 0; i < 5; i++) {
if (leader != null) {
break;
}
Thread.sleep(1000);
leader = accessor.getProperty(accessor.keyBuilder().controllerLeader());
}
Assert.assertTrue(leader.getInstanceName().startsWith("controller_900"));
boolean verifyResult = ClusterStateVerifier.verifyByZkCallback(new MasterNbInExtViewVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
verifyResult = ClusterStateVerifier.verifyByZkCallback(new BestPossAndExtViewZkVerifier(ZK_ADDR, clusterName));
Assert.assertTrue(verifyResult);
Thread.sleep(1000);
// clean up
for (ClusterDistributedController controller : distControllers.values()) {
controller.syncStop();
}
for (MockParticipantManager participant : participants.values()) {
participant.syncStop();
}
}
Aggregations