use of org.apache.helix.controller.stages.ClusterDataCache in project helix by apache.
the class TestClusterDataCacheSelectiveUpdate method testSelectiveUpdates.
@Test(dependsOnMethods = { "testUpdateOnNotification" })
public void testSelectiveUpdates() throws Exception {
MockZkHelixDataAccessor accessor = new MockZkHelixDataAccessor(CLUSTER_NAME, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
ClusterDataCache cache = new ClusterDataCache("CLUSTER_" + TestHelper.getTestClassName());
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.IDEALSTATES), 1);
Assert.assertEquals(accessor.getReadCount(PropertyType.LIVEINSTANCES), NODE_NR);
Assert.assertEquals(accessor.getReadCount(PropertyType.CURRENTSTATES), NODE_NR);
Assert.assertEquals(accessor.getReadCount(PropertyType.CONFIGS), NODE_NR + 1);
accessor.clearReadCounters();
// refresh again should read nothing
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.IDEALSTATES), 0);
Assert.assertEquals(accessor.getReadCount(PropertyType.LIVEINSTANCES), 0);
Assert.assertEquals(accessor.getReadCount(PropertyType.CURRENTSTATES), 0);
Assert.assertEquals(accessor.getReadCount(PropertyType.CONFIGS), 1);
// add a new resource
_setupTool.addResourceToCluster(CLUSTER_NAME, "TestDB_1", _PARTITIONS, STATE_MODEL);
_setupTool.rebalanceStorageCluster(CLUSTER_NAME, "TestDB_1", _replica);
Thread.sleep(100);
HelixClusterVerifier _clusterVerifier = new BestPossibleExternalViewVerifier.Builder(CLUSTER_NAME).setZkAddr(ZK_ADDR).build();
Assert.assertTrue(_clusterVerifier.verify());
accessor.clearReadCounters();
// refresh again should read only new current states and new idealstate
cache.notifyDataChange(HelixConstants.ChangeType.IDEAL_STATE);
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.CURRENTSTATES), NODE_NR);
Assert.assertEquals(accessor.getReadCount(PropertyType.IDEALSTATES), 1);
// Add more resources
accessor.clearReadCounters();
_setupTool.addResourceToCluster(CLUSTER_NAME, "TestDB_2", _PARTITIONS, STATE_MODEL);
_setupTool.rebalanceStorageCluster(CLUSTER_NAME, "TestDB_2", _replica);
_setupTool.addResourceToCluster(CLUSTER_NAME, "TestDB_3", _PARTITIONS, STATE_MODEL);
_setupTool.rebalanceStorageCluster(CLUSTER_NAME, "TestDB_3", _replica);
// Totally four resources. Two of them are newly added.
cache.notifyDataChange(HelixConstants.ChangeType.IDEAL_STATE);
cache.refresh(accessor);
Assert.assertEquals(accessor.getReadCount(PropertyType.IDEALSTATES), 2);
}
use of org.apache.helix.controller.stages.ClusterDataCache in project helix by apache.
the class TestSkipBestPossibleCalculation method test.
@Test()
public void test() throws Exception {
int numResource = 5;
for (int i = 0; i < numResource; i++) {
String dbName = "TestDB_" + i;
_setupTool.addResourceToCluster(CLUSTER_NAME, dbName, _PARTITIONS, STATE_MODEL, IdealState.RebalanceMode.CUSTOMIZED.name());
_setupTool.rebalanceResource(CLUSTER_NAME, dbName, 3);
}
ClusterDataCache cache = new ClusterDataCache("CLUSTER_" + TestHelper.getTestClassName());
cache.setTaskCache(false);
cache.refresh(_manager.getHelixDataAccessor());
ClusterEvent event = new ClusterEvent(CLUSTER_NAME, ClusterEventType.IdealStateChange);
event.addAttribute(AttributeName.ClusterDataCache.name(), cache);
runStage(_manager, event, new ResourceComputationStage());
runStage(_manager, event, new CurrentStateComputationStage());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), 0);
runStage(_manager, event, new BestPossibleStateCalcStage());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), numResource);
cache.notifyDataChange(HelixConstants.ChangeType.INSTANCE_CONFIG);
cache.refresh(_manager.getHelixDataAccessor());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), 0);
runStage(_manager, event, new BestPossibleStateCalcStage());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), numResource);
cache.notifyDataChange(HelixConstants.ChangeType.IDEAL_STATE);
cache.refresh(_manager.getHelixDataAccessor());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), 0);
runStage(_manager, event, new BestPossibleStateCalcStage());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), numResource);
cache.notifyDataChange(HelixConstants.ChangeType.LIVE_INSTANCE);
cache.refresh(_manager.getHelixDataAccessor());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), 0);
runStage(_manager, event, new BestPossibleStateCalcStage());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), numResource);
cache.requireFullRefresh();
cache.refresh(_manager.getHelixDataAccessor());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), 0);
runStage(_manager, event, new BestPossibleStateCalcStage());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), numResource);
cache.notifyDataChange(HelixConstants.ChangeType.CURRENT_STATE);
cache.refresh(_manager.getHelixDataAccessor());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), numResource);
cache.notifyDataChange(HelixConstants.ChangeType.RESOURCE_CONFIG);
cache.refresh(_manager.getHelixDataAccessor());
Assert.assertEquals(cache.getCachedResourceAssignments().size(), 0);
}
use of org.apache.helix.controller.stages.ClusterDataCache in project helix by apache.
the class TestP2PMessageSemiAuto method verifyP2PMessage.
private void verifyP2PMessage(String dbName, String instance, String expectedState, String expectedTriggerHost) {
ClusterDataCache dataCache = new ClusterDataCache(CLUSTER_NAME);
dataCache.refresh(_accessor);
Map<String, LiveInstance> liveInstanceMap = dataCache.getLiveInstances();
LiveInstance liveInstance = liveInstanceMap.get(instance);
Map<String, CurrentState> currentStateMap = dataCache.getCurrentState(instance, liveInstance.getSessionId());
Assert.assertNotNull(currentStateMap);
CurrentState currentState = currentStateMap.get(dbName);
Assert.assertNotNull(currentState);
Assert.assertEquals(currentState.getPartitionStateMap().size(), PARTITION_NUMBER);
for (String partition : currentState.getPartitionStateMap().keySet()) {
String state = currentState.getState(partition);
Assert.assertEquals(state, expectedState, dbName + " Partition " + partition + "'s state is different as expected!");
String triggerHost = currentState.getTriggerHost(partition);
Assert.assertEquals(triggerHost, expectedTriggerHost, "Partition " + partition + "'s transition to Master was not triggered by expected host!");
}
}
use of org.apache.helix.controller.stages.ClusterDataCache in project helix by apache.
the class BestPossibleExternalViewVerifier method verifyState.
@Override
protected synchronized boolean verifyState() {
try {
PropertyKey.Builder keyBuilder = _accessor.keyBuilder();
// read cluster once and do verification
ClusterDataCache cache = new ClusterDataCache();
cache.refresh(_accessor);
Map<String, IdealState> idealStates = cache.getIdealStates();
if (idealStates == null) {
// ideal state is null because ideal state is dropped
idealStates = Collections.emptyMap();
}
// filter out all resources that use Task state model
Iterator<Map.Entry<String, IdealState>> it = idealStates.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<String, IdealState> pair = it.next();
if (pair.getValue().getStateModelDefRef().equals(TaskConstants.STATE_MODEL_NAME)) {
it.remove();
}
}
// verify live instances.
if (_expectLiveInstances != null && !_expectLiveInstances.isEmpty()) {
Set<String> actualLiveNodes = cache.getLiveInstances().keySet();
if (!_expectLiveInstances.equals(actualLiveNodes)) {
LOG.warn("Live instances are not as expected. Actual live nodes: " + actualLiveNodes.toString());
return false;
}
}
Map<String, ExternalView> extViews = _accessor.getChildValuesMap(keyBuilder.externalViews());
if (extViews == null) {
extViews = Collections.emptyMap();
}
// Filter resources if requested
if (_resources != null && !_resources.isEmpty()) {
idealStates.keySet().retainAll(_resources);
extViews.keySet().retainAll(_resources);
}
// add empty idealState for the resource
for (String resource : extViews.keySet()) {
if (!idealStates.containsKey(resource)) {
ExternalView ev = extViews.get(resource);
IdealState is = new IdealState(resource);
is.getRecord().setSimpleFields(ev.getRecord().getSimpleFields());
idealStates.put(resource, is);
}
}
// calculate best possible state
BestPossibleStateOutput bestPossOutput = calcBestPossState(cache);
Map<String, Map<Partition, Map<String, String>>> bestPossStateMap = bestPossOutput.getStateMap();
// set error states
if (_errStates != null) {
for (String resourceName : _errStates.keySet()) {
Map<String, String> partErrStates = _errStates.get(resourceName);
for (String partitionName : partErrStates.keySet()) {
String instanceName = partErrStates.get(partitionName);
if (!bestPossStateMap.containsKey(resourceName)) {
bestPossStateMap.put(resourceName, new HashMap<Partition, Map<String, String>>());
}
Partition partition = new Partition(partitionName);
if (!bestPossStateMap.get(resourceName).containsKey(partition)) {
bestPossStateMap.get(resourceName).put(partition, new HashMap<String, String>());
}
bestPossStateMap.get(resourceName).get(partition).put(instanceName, HelixDefinedState.ERROR.toString());
}
}
}
for (String resourceName : idealStates.keySet()) {
ExternalView extView = extViews.get(resourceName);
IdealState is = idealStates.get(resourceName);
if (extView == null) {
if (is.isExternalViewDisabled()) {
continue;
} else {
LOG.error("externalView for " + resourceName + " is not available");
return false;
}
}
// step 0: remove empty map and DROPPED state from best possible state
PartitionStateMap bpStateMap = bestPossOutput.getPartitionStateMap(resourceName);
StateModelDefinition stateModelDef = cache.getStateModelDef(is.getStateModelDefRef());
if (stateModelDef == null) {
LOG.error("State model definition " + is.getStateModelDefRef() + " for resource not found!" + is.getResourceName());
return false;
}
boolean result = verifyExternalView(extView, bpStateMap, stateModelDef);
if (!result) {
if (LOG.isDebugEnabled()) {
LOG.debug("verifyExternalView fails for " + resourceName + "! ExternalView: " + extView + " BestPossibleState: " + bpStateMap);
} else {
LOG.warn("verifyExternalView fails for " + resourceName + "! ExternalView does not match BestPossibleState");
}
return false;
}
}
return true;
} catch (Exception e) {
LOG.error("exception in verification", e);
return false;
}
}
use of org.apache.helix.controller.stages.ClusterDataCache in project helix by apache.
the class TestAutoRebalanceStrategyImbalanceAssignment method testAssignment.
private void testAssignment(int nPartitions, int nReplicas, int nNode) {
final List<String> instanceNames = new ArrayList<>();
for (int i = 0; i < nNode; i++) {
instanceNames.add("localhost_" + i);
}
List<String> partitions = new ArrayList<>(nPartitions);
for (int i = 0; i < nPartitions; i++) {
partitions.add(Integer.toString(i));
}
LinkedHashMap<String, Integer> states = new LinkedHashMap<>(2);
states.put("OFFLINE", 0);
states.put("ONLINE", nReplicas);
AutoRebalanceStrategy strategy = new AutoRebalanceStrategy(resourceName, partitions, states);
ZNRecord record = strategy.computePartitionAssignment(instanceNames, instanceNames, new HashMap<String, Map<String, String>>(0), new ClusterDataCache());
for (Map<String, String> stateMapping : record.getMapFields().values()) {
Assert.assertEquals(stateMapping.size(), nReplicas);
}
}
Aggregations