use of org.apache.helix.zookeeper.datamodel.ZNRecordBucketizer in project helix by apache.
the class HelixStateTransitionHandler method updateZKCurrentState.
// Update the ZK current state of the node
private void updateZKCurrentState() {
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
String partitionKey = _message.getPartitionName();
String resource = _message.getResourceName();
String sessionId = _message.getTgtSessionId();
String instanceName = _manager.getInstanceName();
try {
// We did not update _stateModel for DROPPED state, so it won't match _stateModel.
if (!_stateModel.getCurrentState().equals(_currentStateDelta.getState(partitionKey)) && !_currentStateDelta.getState(partitionKey).equalsIgnoreCase(HelixDefinedState.DROPPED.toString())) {
logger.warn("_stateModel is already updated by TaskRunner. Skip ZK update in StateTransitionHandler");
return;
}
int bucketSize = _message.getBucketSize();
ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(bucketSize);
PropertyKey key = _isTaskMessage && !_isTaskCurrentStatePathDisabled ? accessor.keyBuilder().taskCurrentState(instanceName, sessionId, resource, bucketizer.getBucketName(partitionKey)) : accessor.keyBuilder().currentState(instanceName, sessionId, resource, bucketizer.getBucketName(partitionKey));
if (_message.getAttribute(Attributes.PARENT_MSG_ID) == null) {
// normal message
if (!accessor.updateProperty(key, _currentStateDelta)) {
throw new HelixException("Fails to persist current state back to ZK for resource " + resource + " partition: " + _message.getPartitionName());
}
} else {
// sub-message of a batch message
ConcurrentHashMap<String, CurrentStateUpdate> csUpdateMap = (ConcurrentHashMap<String, CurrentStateUpdate>) _notificationContext.get(MapKey.CURRENT_STATE_UPDATE.toString());
csUpdateMap.put(partitionKey, new CurrentStateUpdate(key, _currentStateDelta));
}
} catch (Exception e) {
logger.error("Error when updating current-state ", e);
StateTransitionError error = new StateTransitionError(ErrorType.FRAMEWORK, ErrorCode.ERROR, e);
_stateModel.rollbackOnError(_message, _notificationContext, error);
_statusUpdateUtil.logError(_message, HelixStateTransitionHandler.class, e, "Error when update current-state ", _manager);
}
}
use of org.apache.helix.zookeeper.datamodel.ZNRecordBucketizer in project helix by apache.
the class ZKHelixDataAccessor method setChildren.
@Override
public <T extends HelixProperty> boolean[] setChildren(List<PropertyKey> keys, List<T> children) {
int options = -1;
List<String> paths = new ArrayList<String>();
List<ZNRecord> records = new ArrayList<ZNRecord>();
List<List<String>> bucketizedPaths = new ArrayList<List<String>>(Collections.<List<String>>nCopies(keys.size(), null));
List<List<ZNRecord>> bucketizedRecords = new ArrayList<List<ZNRecord>>(Collections.<List<ZNRecord>>nCopies(keys.size(), null));
for (int i = 0; i < keys.size(); i++) {
PropertyKey key = keys.get(i);
PropertyType type = key.getType();
String path = key.getPath();
paths.add(path);
options = constructOptions(type);
HelixProperty value = children.get(i);
switch(type) {
case EXTERNALVIEW:
if (value.getBucketSize() == 0) {
records.add(value.getRecord());
} else {
ZNRecord metaRecord = new ZNRecord(value.getId());
metaRecord.setSimpleFields(value.getRecord().getSimpleFields());
records.add(metaRecord);
ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(value.getBucketSize());
Map<String, ZNRecord> map = bucketizer.bucketize(value.getRecord());
List<String> childBucketizedPaths = new ArrayList<String>();
List<ZNRecord> childBucketizedRecords = new ArrayList<ZNRecord>();
for (String bucketName : map.keySet()) {
childBucketizedPaths.add(path + "/" + bucketName);
childBucketizedRecords.add(map.get(bucketName));
}
bucketizedPaths.set(i, childBucketizedPaths);
bucketizedRecords.set(i, childBucketizedRecords);
}
break;
case STATEMODELDEFS:
if (value.isValid()) {
records.add(value.getRecord());
}
break;
default:
records.add(value.getRecord());
break;
}
}
// set non-bucketized nodes or parent nodes of bucketized nodes
boolean[] success = _baseDataAccessor.setChildren(paths, records, options);
// set bucketized nodes
List<String> allBucketizedPaths = new ArrayList<String>();
List<ZNRecord> allBucketizedRecords = new ArrayList<ZNRecord>();
for (int i = 0; i < keys.size(); i++) {
if (success[i] && bucketizedPaths.get(i) != null) {
allBucketizedPaths.addAll(bucketizedPaths.get(i));
allBucketizedRecords.addAll(bucketizedRecords.get(i));
}
}
// TODO: set success accordingly
_baseDataAccessor.setChildren(allBucketizedPaths, allBucketizedRecords, options);
return success;
}
use of org.apache.helix.zookeeper.datamodel.ZNRecordBucketizer in project helix by apache.
the class ParticipantManager method carryOverPreviousCurrentState.
/**
* carry over current-states from last sessions
* set to initial state for current session only when state doesn't exist in current session
*/
public static synchronized void carryOverPreviousCurrentState(HelixDataAccessor dataAccessor, String instanceName, String sessionId, StateMachineEngine stateMachineEngine, boolean setToInitState) {
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
List<String> sessions = dataAccessor.getChildNames(keyBuilder.sessions(instanceName));
for (String session : sessions) {
if (session.equals(sessionId)) {
continue;
}
// Ignore if any current states in the previous folder cannot be read.
List<CurrentState> lastCurStates = dataAccessor.getChildValues(keyBuilder.currentStates(instanceName, session), false);
for (CurrentState lastCurState : lastCurStates) {
LOG.info("Carrying over old session: " + session + ", resource: " + lastCurState.getId() + " to current session: " + sessionId + ", setToInitState: " + setToInitState);
String stateModelDefRef = lastCurState.getStateModelDefRef();
if (stateModelDefRef == null) {
LOG.error("skip carry-over because previous current state doesn't have a state model definition. previous current-state: " + lastCurState);
continue;
}
// Note: this check is not necessary due to TaskCurrentStates, but keep it for backwards compatibility
if (stateModelDefRef.equals(TaskConstants.STATE_MODEL_NAME)) {
continue;
}
StateModelDefinition stateModelDef = dataAccessor.getProperty(keyBuilder.stateModelDef(stateModelDefRef));
String initState = stateModelDef.getInitialState();
Map<String, String> partitionExpectedStateMap = new HashMap<>();
if (setToInitState) {
lastCurState.getPartitionStateMap().keySet().forEach(partition -> partitionExpectedStateMap.put(partition, initState));
} else {
String factoryName = lastCurState.getStateModelFactoryName();
StateModelFactory<? extends StateModel> stateModelFactory = stateMachineEngine.getStateModelFactory(stateModelDefRef, factoryName);
lastCurState.getPartitionStateMap().keySet().forEach(partition -> {
StateModel stateModel = stateModelFactory.getStateModel(lastCurState.getResourceName(), partition);
if (stateModel != null) {
partitionExpectedStateMap.put(partition, stateModel.getCurrentState());
}
});
}
BaseDataAccessor<ZNRecord> baseAccessor = dataAccessor.getBaseDataAccessor();
String curStatePath = keyBuilder.currentState(instanceName, sessionId, lastCurState.getResourceName()).getPath();
if (lastCurState.getBucketSize() > 0) {
// update parent node
ZNRecord metaRecord = new ZNRecord(lastCurState.getId());
metaRecord.setSimpleFields(lastCurState.getRecord().getSimpleFields());
DataUpdater<ZNRecord> metaRecordUpdater = new CurStateCarryOverUpdater(sessionId, partitionExpectedStateMap, new CurrentState(metaRecord));
boolean success = baseAccessor.update(curStatePath, metaRecordUpdater, AccessOption.PERSISTENT);
if (success) {
// update current state buckets
ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(lastCurState.getBucketSize());
Map<String, ZNRecord> map = bucketizer.bucketize(lastCurState.getRecord());
List<String> paths = new ArrayList<String>();
List<DataUpdater<ZNRecord>> updaters = new ArrayList<DataUpdater<ZNRecord>>();
for (String bucketName : map.keySet()) {
paths.add(curStatePath + "/" + bucketName);
updaters.add(new CurStateCarryOverUpdater(sessionId, partitionExpectedStateMap, new CurrentState(map.get(bucketName))));
}
baseAccessor.updateChildren(paths, updaters, AccessOption.PERSISTENT);
}
} else {
dataAccessor.getBaseDataAccessor().update(curStatePath, new CurStateCarryOverUpdater(sessionId, partitionExpectedStateMap, lastCurState), AccessOption.PERSISTENT);
}
}
}
/**
* remove previous current state parent nodes
*/
for (String session : sessions) {
if (session.equals(sessionId)) {
continue;
}
PropertyKey currentStatesProperty = keyBuilder.currentStates(instanceName, session);
String path = currentStatesProperty.getPath();
LOG.info("Removing current states from previous sessions. path: {}", path);
if (!dataAccessor.removeProperty(currentStatesProperty)) {
throw new ZkClientException("Failed to delete " + path);
}
}
}
use of org.apache.helix.zookeeper.datamodel.ZNRecordBucketizer in project helix by apache.
the class ZKHelixDataAccessor method setProperty.
@Override
public <T extends HelixProperty> boolean setProperty(PropertyKey key, T value) {
PropertyType type = key.getType();
if (!value.isValid()) {
throw new HelixMetaDataAccessException("The ZNRecord for " + type + " is not valid.");
}
String path = key.getPath();
int options = constructOptions(type);
boolean success = false;
switch(type) {
case IDEALSTATES:
case EXTERNALVIEW:
// check if bucketized
if (value.getBucketSize() > 0) {
// set parent node
ZNRecord metaRecord = new ZNRecord(value.getId());
metaRecord.setSimpleFields(value.getRecord().getSimpleFields());
success = _baseDataAccessor.set(path, metaRecord, options);
if (success) {
ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(value.getBucketSize());
Map<String, ZNRecord> map = bucketizer.bucketize(value.getRecord());
List<String> paths = new ArrayList<String>();
List<ZNRecord> bucketizedRecords = new ArrayList<ZNRecord>();
for (String bucketName : map.keySet()) {
paths.add(path + "/" + bucketName);
bucketizedRecords.add(map.get(bucketName));
}
// TODO: set success accordingly
_baseDataAccessor.setChildren(paths, bucketizedRecords, options);
}
} else {
success = _baseDataAccessor.set(path, value.getRecord(), options);
}
break;
default:
success = _baseDataAccessor.set(path, value.getRecord(), options);
break;
}
return success;
}
Aggregations