use of org.apache.helix.zookeeper.zkclient.DataUpdater in project helix by apache.
the class ZKHelixAdmin method processMaintenanceMode.
/**
* Helper method for enabling/disabling maintenance mode.
* @param clusterName
* @param enabled
* @param reason
* @param internalReason
* @param customFields
* @param triggeringEntity
*/
private void processMaintenanceMode(String clusterName, final boolean enabled, final String reason, final MaintenanceSignal.AutoTriggerReason internalReason, final Map<String, String> customFields, final MaintenanceSignal.TriggeringEntity triggeringEntity) {
HelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_zkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
logger.info("Cluster {} {} {} maintenance mode for reason {}.", clusterName, triggeringEntity == MaintenanceSignal.TriggeringEntity.CONTROLLER ? "automatically" : "manually", enabled ? "enters" : "exits", reason == null ? "NULL" : reason);
final long currentTime = System.currentTimeMillis();
if (!enabled) {
// Exit maintenance mode
accessor.removeProperty(keyBuilder.maintenance());
} else {
// Enter maintenance mode
MaintenanceSignal maintenanceSignal = new MaintenanceSignal(MAINTENANCE_ZNODE_ID);
if (reason != null) {
maintenanceSignal.setReason(reason);
}
maintenanceSignal.setTimestamp(currentTime);
maintenanceSignal.setTriggeringEntity(triggeringEntity);
switch(triggeringEntity) {
case CONTROLLER:
// autoEnable
maintenanceSignal.setAutoTriggerReason(internalReason);
break;
case USER:
case UNKNOWN:
// manuallyEnable
if (customFields != null && !customFields.isEmpty()) {
// Enter all custom fields provided by the user
Map<String, String> simpleFields = maintenanceSignal.getRecord().getSimpleFields();
for (Map.Entry<String, String> entry : customFields.entrySet()) {
if (!simpleFields.containsKey(entry.getKey())) {
simpleFields.put(entry.getKey(), entry.getValue());
}
}
}
break;
}
if (!accessor.createMaintenance(maintenanceSignal)) {
throw new HelixException("Failed to create maintenance signal!");
}
}
// Record a MaintenanceSignal history
if (!accessor.getBaseDataAccessor().update(keyBuilder.controllerLeaderHistory().getPath(), (DataUpdater<ZNRecord>) oldRecord -> {
try {
if (oldRecord == null) {
oldRecord = new ZNRecord(PropertyType.HISTORY.toString());
}
return new ControllerHistory(oldRecord).updateMaintenanceHistory(enabled, reason, currentTime, internalReason, customFields, triggeringEntity);
} catch (IOException e) {
logger.error("Failed to update maintenance history! Exception: {}", e);
return oldRecord;
}
}, AccessOption.PERSISTENT)) {
logger.error("Failed to write maintenance history to ZK!");
}
}
use of org.apache.helix.zookeeper.zkclient.DataUpdater in project helix by apache.
the class ZKUtil method createOrUpdate.
public static void createOrUpdate(RealmAwareZkClient client, String path, final ZNRecord record, final boolean persistent, final boolean mergeOnUpdate) {
int retryCount = 0;
while (retryCount < RETRYLIMIT) {
try {
if (client.exists(path)) {
DataUpdater<ZNRecord> updater = new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
if (currentData != null && mergeOnUpdate) {
currentData.update(record);
return currentData;
}
return record;
}
};
client.updateDataSerialized(path, updater);
} else {
CreateMode mode = (persistent) ? CreateMode.PERSISTENT : CreateMode.EPHEMERAL;
client.create(path, record, mode);
}
break;
} catch (Exception e) {
retryCount = retryCount + 1;
logger.warn("Exception trying to update " + path + " Exception:" + e.getMessage() + ". Will retry.");
}
}
}
use of org.apache.helix.zookeeper.zkclient.DataUpdater in project helix by apache.
the class ZKUtil method createOrReplace.
public static void createOrReplace(RealmAwareZkClient client, String path, final ZNRecord record, final boolean persistent) {
int retryCount = 0;
while (retryCount < RETRYLIMIT) {
try {
if (client.exists(path)) {
DataUpdater<Object> updater = new DataUpdater<Object>() {
@Override
public Object update(Object currentData) {
return record;
}
};
client.updateDataSerialized(path, updater);
} else {
CreateMode mode = (persistent) ? CreateMode.PERSISTENT : CreateMode.EPHEMERAL;
client.create(path, record, mode);
}
break;
} catch (Exception e) {
retryCount = retryCount + 1;
logger.warn("Exception trying to createOrReplace " + path + " Exception:" + e.getMessage() + ". Will retry.");
}
}
}
use of org.apache.helix.zookeeper.zkclient.DataUpdater in project helix by apache.
the class ZKUtil method createOrMerge.
public static void createOrMerge(RealmAwareZkClient client, String path, final ZNRecord record, final boolean persistent, final boolean mergeOnUpdate) {
int retryCount = 0;
while (retryCount < RETRYLIMIT) {
try {
if (client.exists(path)) {
DataUpdater<ZNRecord> updater = new DataUpdater<ZNRecord>() {
@Override
public ZNRecord update(ZNRecord currentData) {
if (currentData != null && mergeOnUpdate) {
currentData.merge(record);
return currentData;
}
return record;
}
};
client.updateDataSerialized(path, updater);
} else {
CreateMode mode = (persistent) ? CreateMode.PERSISTENT : CreateMode.EPHEMERAL;
if (record.getDeltaList().size() > 0) {
ZNRecord value = new ZNRecord(record.getId());
value.merge(record);
client.create(path, value, mode);
} else {
client.create(path, record, mode);
}
}
break;
} catch (Exception e) {
retryCount = retryCount + 1;
logger.warn("Exception trying to update " + path + " Exception:" + e.getMessage() + ". Will retry.");
}
}
}
use of org.apache.helix.zookeeper.zkclient.DataUpdater in project helix by apache.
the class ParticipantManager method carryOverPreviousCurrentState.
/**
* carry over current-states from last sessions
* set to initial state for current session only when state doesn't exist in current session
*/
public static synchronized void carryOverPreviousCurrentState(HelixDataAccessor dataAccessor, String instanceName, String sessionId, StateMachineEngine stateMachineEngine, boolean setToInitState) {
PropertyKey.Builder keyBuilder = dataAccessor.keyBuilder();
List<String> sessions = dataAccessor.getChildNames(keyBuilder.sessions(instanceName));
for (String session : sessions) {
if (session.equals(sessionId)) {
continue;
}
// Ignore if any current states in the previous folder cannot be read.
List<CurrentState> lastCurStates = dataAccessor.getChildValues(keyBuilder.currentStates(instanceName, session), false);
for (CurrentState lastCurState : lastCurStates) {
LOG.info("Carrying over old session: " + session + ", resource: " + lastCurState.getId() + " to current session: " + sessionId + ", setToInitState: " + setToInitState);
String stateModelDefRef = lastCurState.getStateModelDefRef();
if (stateModelDefRef == null) {
LOG.error("skip carry-over because previous current state doesn't have a state model definition. previous current-state: " + lastCurState);
continue;
}
// Note: this check is not necessary due to TaskCurrentStates, but keep it for backwards compatibility
if (stateModelDefRef.equals(TaskConstants.STATE_MODEL_NAME)) {
continue;
}
StateModelDefinition stateModelDef = dataAccessor.getProperty(keyBuilder.stateModelDef(stateModelDefRef));
String initState = stateModelDef.getInitialState();
Map<String, String> partitionExpectedStateMap = new HashMap<>();
if (setToInitState) {
lastCurState.getPartitionStateMap().keySet().forEach(partition -> partitionExpectedStateMap.put(partition, initState));
} else {
String factoryName = lastCurState.getStateModelFactoryName();
StateModelFactory<? extends StateModel> stateModelFactory = stateMachineEngine.getStateModelFactory(stateModelDefRef, factoryName);
lastCurState.getPartitionStateMap().keySet().forEach(partition -> {
StateModel stateModel = stateModelFactory.getStateModel(lastCurState.getResourceName(), partition);
if (stateModel != null) {
partitionExpectedStateMap.put(partition, stateModel.getCurrentState());
}
});
}
BaseDataAccessor<ZNRecord> baseAccessor = dataAccessor.getBaseDataAccessor();
String curStatePath = keyBuilder.currentState(instanceName, sessionId, lastCurState.getResourceName()).getPath();
if (lastCurState.getBucketSize() > 0) {
// update parent node
ZNRecord metaRecord = new ZNRecord(lastCurState.getId());
metaRecord.setSimpleFields(lastCurState.getRecord().getSimpleFields());
DataUpdater<ZNRecord> metaRecordUpdater = new CurStateCarryOverUpdater(sessionId, partitionExpectedStateMap, new CurrentState(metaRecord));
boolean success = baseAccessor.update(curStatePath, metaRecordUpdater, AccessOption.PERSISTENT);
if (success) {
// update current state buckets
ZNRecordBucketizer bucketizer = new ZNRecordBucketizer(lastCurState.getBucketSize());
Map<String, ZNRecord> map = bucketizer.bucketize(lastCurState.getRecord());
List<String> paths = new ArrayList<String>();
List<DataUpdater<ZNRecord>> updaters = new ArrayList<DataUpdater<ZNRecord>>();
for (String bucketName : map.keySet()) {
paths.add(curStatePath + "/" + bucketName);
updaters.add(new CurStateCarryOverUpdater(sessionId, partitionExpectedStateMap, new CurrentState(map.get(bucketName))));
}
baseAccessor.updateChildren(paths, updaters, AccessOption.PERSISTENT);
}
} else {
dataAccessor.getBaseDataAccessor().update(curStatePath, new CurStateCarryOverUpdater(sessionId, partitionExpectedStateMap, lastCurState), AccessOption.PERSISTENT);
}
}
}
/**
* remove previous current state parent nodes
*/
for (String session : sessions) {
if (session.equals(sessionId)) {
continue;
}
PropertyKey currentStatesProperty = keyBuilder.currentStates(instanceName, session);
String path = currentStatesProperty.getPath();
LOG.info("Removing current states from previous sessions. path: {}", path);
if (!dataAccessor.removeProperty(currentStatesProperty)) {
throw new ZkClientException("Failed to delete " + path);
}
}
}
Aggregations