use of org.apache.helix.PropertyKey in project helix by apache.
the class JobRebalancer method computeBestPossiblePartitionState.
@Override
public ResourceAssignment computeBestPossiblePartitionState(ClusterDataCache clusterData, IdealState taskIs, Resource resource, CurrentStateOutput currStateOutput) {
final String jobName = resource.getResourceName();
LOG.debug("Computer Best Partition for job: " + jobName);
// Fetch job configuration
JobConfig jobCfg = clusterData.getJobConfig(jobName);
if (jobCfg == null) {
LOG.error("Job configuration is NULL for " + jobName);
return buildEmptyAssignment(jobName, currStateOutput);
}
String workflowResource = jobCfg.getWorkflow();
// Fetch workflow configuration and context
WorkflowConfig workflowCfg = clusterData.getWorkflowConfig(workflowResource);
if (workflowCfg == null) {
LOG.error("Workflow configuration is NULL for " + jobName);
return buildEmptyAssignment(jobName, currStateOutput);
}
WorkflowContext workflowCtx = clusterData.getWorkflowContext(workflowResource);
if (workflowCtx == null) {
LOG.error("Workflow context is NULL for " + jobName);
return buildEmptyAssignment(jobName, currStateOutput);
}
TargetState targetState = workflowCfg.getTargetState();
if (targetState != TargetState.START && targetState != TargetState.STOP) {
LOG.info("Target state is " + targetState.name() + " for workflow " + workflowResource + ".Stop scheduling job " + jobName);
return buildEmptyAssignment(jobName, currStateOutput);
}
// Stop current run of the job if workflow or job is already in final state (failed or completed)
TaskState workflowState = workflowCtx.getWorkflowState();
TaskState jobState = workflowCtx.getJobState(jobName);
// The job is already in a final state (completed/failed).
if (workflowState == TaskState.FAILED || workflowState == TaskState.COMPLETED || jobState == TaskState.FAILED || jobState == TaskState.COMPLETED) {
LOG.info(String.format("Workflow %s or job %s is already failed or completed, workflow state (%s), job state (%s), clean up job IS.", workflowResource, jobName, workflowState, jobState));
TaskUtil.cleanupJobIdealStateExtView(_manager.getHelixDataAccessor(), jobName);
_rebalanceScheduler.removeScheduledRebalance(jobName);
return buildEmptyAssignment(jobName, currStateOutput);
}
if (!isWorkflowReadyForSchedule(workflowCfg)) {
LOG.info("Job is not ready to be run since workflow is not ready " + jobName);
return buildEmptyAssignment(jobName, currStateOutput);
}
if (!isJobStarted(jobName, workflowCtx) && !isJobReadyToSchedule(jobName, workflowCfg, workflowCtx, getInCompleteJobCount(workflowCfg, workflowCtx), clusterData.getJobConfigMap())) {
LOG.info("Job is not ready to run " + jobName);
return buildEmptyAssignment(jobName, currStateOutput);
}
// Fetch any existing context information from the property store.
JobContext jobCtx = clusterData.getJobContext(jobName);
if (jobCtx == null) {
jobCtx = new JobContext(new ZNRecord(TaskUtil.TASK_CONTEXT_KW));
jobCtx.setStartTime(System.currentTimeMillis());
jobCtx.setName(jobName);
workflowCtx.setJobState(jobName, TaskState.IN_PROGRESS);
}
if (!TaskState.TIMED_OUT.equals(workflowCtx.getJobState(jobName))) {
scheduleRebalanceForTimeout(jobCfg.getJobId(), jobCtx.getStartTime(), jobCfg.getTimeout());
}
// Grab the old assignment, or an empty one if it doesn't exist
ResourceAssignment prevAssignment = getPrevResourceAssignment(jobName);
if (prevAssignment == null) {
prevAssignment = new ResourceAssignment(jobName);
}
// Will contain the list of partitions that must be explicitly dropped from the ideal state that
// is stored in zk.
// Fetch the previous resource assignment from the property store. This is required because of
// HELIX-230.
Set<String> liveInstances = jobCfg.getInstanceGroupTag() == null ? clusterData.getEnabledLiveInstances() : clusterData.getEnabledLiveInstancesWithTag(jobCfg.getInstanceGroupTag());
if (liveInstances.isEmpty()) {
LOG.error("No available instance found for job!");
}
Set<Integer> partitionsToDrop = new TreeSet<Integer>();
ResourceAssignment newAssignment = computeResourceMapping(jobName, workflowCfg, jobCfg, prevAssignment, liveInstances, currStateOutput, workflowCtx, jobCtx, partitionsToDrop, clusterData);
HelixDataAccessor accessor = _manager.getHelixDataAccessor();
PropertyKey propertyKey = accessor.keyBuilder().idealStates(jobName);
taskIs = clusterData.getIdealState(jobName);
if (!partitionsToDrop.isEmpty() && taskIs != null) {
for (Integer pId : partitionsToDrop) {
taskIs.getRecord().getMapFields().remove(pName(jobName, pId));
}
accessor.setProperty(propertyKey, taskIs);
}
// Update Workflow and Job context in data cache and ZK.
clusterData.updateJobContext(jobName, jobCtx, _manager.getHelixDataAccessor());
clusterData.updateWorkflowContext(workflowResource, workflowCtx, _manager.getHelixDataAccessor());
setPrevResourceAssignment(jobName, newAssignment);
LOG.debug("Job " + jobName + " new assignment " + Arrays.toString(newAssignment.getMappedPartitions().toArray()));
return newAssignment;
}
use of org.apache.helix.PropertyKey in project helix by apache.
the class TaskRunner method setRequestedState.
/**
* Request a state change for a specific task.
*
* @param accessor connected Helix data accessor
* @param instance the instance serving the task
* @param sessionId the current session of the instance
* @param resource the job name
* @param partition the task partition name
* @param state the requested state
* @return true if the request was persisted, false otherwise
*/
private static boolean setRequestedState(HelixDataAccessor accessor, String instance, String sessionId, String resource, String partition, TaskPartitionState state) {
LOG.debug(String.format("Requesting a state transition to %s for partition %s.", state, partition));
try {
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
PropertyKey key = keyBuilder.currentState(instance, sessionId, resource);
CurrentState currStateDelta = new CurrentState(resource);
currStateDelta.setRequestedState(partition, state.name());
return accessor.updateProperty(key, currStateDelta);
} catch (Exception e) {
LOG.error(String.format("Error when requesting a state transition to %s for partition %s.", state, partition), e);
return false;
}
}
use of org.apache.helix.PropertyKey in project helix by apache.
the class TaskUtil method cleanupIdealStateExtView.
/**
* Cleans up IdealState and external view associated with a job/workflow resource.
*/
private static boolean cleanupIdealStateExtView(final HelixDataAccessor accessor, String workflowJobResource) {
boolean success = true;
PropertyKey isKey = accessor.keyBuilder().idealStates(workflowJobResource);
if (accessor.getProperty(isKey) != null) {
if (!accessor.removeProperty(isKey)) {
LOG.warn(String.format("Error occurred while trying to remove IdealState for %s. Failed to remove node %s.", workflowJobResource, isKey));
success = false;
}
}
// Delete external view
PropertyKey evKey = accessor.keyBuilder().externalView(workflowJobResource);
if (accessor.getProperty(evKey) != null) {
if (!accessor.removeProperty(evKey)) {
LOG.warn(String.format("Error occurred while trying to remove ExternalView of resource %s. Failed to remove node %s.", workflowJobResource, evKey));
success = false;
}
}
return success;
}
use of org.apache.helix.PropertyKey in project helix by apache.
the class ZKHelixDataAccessor method createChildren.
@Override
public <T extends HelixProperty> boolean[] createChildren(List<PropertyKey> keys, List<T> children) {
// TODO: add validation
int options = -1;
List<String> paths = new ArrayList<String>();
List<ZNRecord> records = new ArrayList<ZNRecord>();
for (int i = 0; i < keys.size(); i++) {
PropertyKey key = keys.get(i);
PropertyType type = key.getType();
String path = key.getPath();
paths.add(path);
HelixProperty value = children.get(i);
records.add(value.getRecord());
options = constructOptions(type);
}
return _baseDataAccessor.createChildren(paths, records, options);
}
use of org.apache.helix.PropertyKey in project helix by apache.
the class ZKHelixDataAccessor method getPropertyStats.
@Override
public List<HelixProperty.Stat> getPropertyStats(List<PropertyKey> keys) {
if (keys == null || keys.size() == 0) {
return Collections.emptyList();
}
List<HelixProperty.Stat> propertyStats = new ArrayList<>(keys.size());
List<String> paths = new ArrayList<>(keys.size());
for (PropertyKey key : keys) {
paths.add(key.getPath());
}
Stat[] zkStats = _baseDataAccessor.getStats(paths, 0);
for (int i = 0; i < keys.size(); i++) {
Stat zkStat = zkStats[i];
HelixProperty.Stat propertyStat = null;
if (zkStat != null) {
propertyStat = new HelixProperty.Stat(zkStat.getVersion(), zkStat.getCtime(), zkStat.getMtime());
}
propertyStats.add(propertyStat);
}
return propertyStats;
}
Aggregations