use of com.linkedin.thirdeye.datalayer.dto.JobDTO in project pinot by linkedin.
the class MonitorJobRunner method createJob.
public Long createJob() {
Long jobExecutionId = null;
try {
LOG.info("Creating monitor job");
JobDTO jobSpec = new JobDTO();
jobSpec.setJobName(monitorJobContext.getJobName());
jobSpec.setScheduleStartTime(System.currentTimeMillis());
jobSpec.setStatus(JobStatus.SCHEDULED);
jobExecutionId = jobDAO.save(jobSpec);
LOG.info("Created JobSpec {} with jobExecutionId {}", jobSpec, jobExecutionId);
} catch (Exception e) {
LOG.error("Exception in creating monitor job", e);
}
return jobExecutionId;
}
use of com.linkedin.thirdeye.datalayer.dto.JobDTO in project pinot by linkedin.
the class AlertJobRunner method createJob.
private long createJob(DateTime monitoringWindowStartTime, DateTime monitoringWindowEndTime) {
Long jobExecutionId = null;
try {
JobDTO jobSpec = new JobDTO();
jobSpec.setJobName(alertJobContext.getJobName());
jobSpec.setWindowStartTime(monitoringWindowStartTime.getMillis());
jobSpec.setWindowEndTime(monitoringWindowEndTime.getMillis());
jobSpec.setScheduleStartTime(System.currentTimeMillis());
jobSpec.setStatus(JobStatus.SCHEDULED);
jobExecutionId = jobDAO.save(jobSpec);
LOG.info("Created alert job {} with jobExecutionId {}", jobSpec, jobExecutionId);
} catch (Exception e) {
LOG.error("Exception in creating alert job", e);
}
return jobExecutionId;
}
use of com.linkedin.thirdeye.datalayer.dto.JobDTO in project pinot by linkedin.
the class AlertJobRunnerV2 method createJob.
private long createJob(DateTime monitoringWindowStartTime, DateTime monitoringWindowEndTime) {
Long jobExecutionId = null;
try {
JobDTO jobSpec = new JobDTO();
jobSpec.setJobName(alertJobContext.getJobName());
jobSpec.setWindowStartTime(monitoringWindowStartTime.getMillis());
jobSpec.setWindowEndTime(monitoringWindowEndTime.getMillis());
jobSpec.setScheduleStartTime(System.currentTimeMillis());
jobSpec.setStatus(JobConstants.JobStatus.SCHEDULED);
jobExecutionId = jobDAO.save(jobSpec);
LOG.info("Created alert job {} with jobExecutionId {}", jobSpec, jobExecutionId);
} catch (Exception e) {
LOG.error("Exception in creating alert job", e);
}
return jobExecutionId;
}
use of com.linkedin.thirdeye.datalayer.dto.JobDTO in project pinot by linkedin.
the class DetectionJobRunner method createJob.
private long createJob(String jobName, DateTime monitoringWindowStartTime, DateTime monitoringWindowEndTime) {
Long jobExecutionId = null;
try {
JobDTO jobSpec = new JobDTO();
jobSpec.setJobName(jobName);
jobSpec.setWindowStartTime(monitoringWindowStartTime.getMillis());
jobSpec.setWindowEndTime(monitoringWindowEndTime.getMillis());
jobSpec.setScheduleStartTime(System.currentTimeMillis());
jobSpec.setStatus(JobStatus.SCHEDULED);
jobExecutionId = DAO_REGISTRY.getJobDAO().save(jobSpec);
LOG.info("Created anomalyJobSpec {} with jobExecutionId {}", jobSpec, jobExecutionId);
} catch (Exception e) {
LOG.error("Exception in creating detection job", e);
}
return jobExecutionId;
}
use of com.linkedin.thirdeye.datalayer.dto.JobDTO in project pinot by linkedin.
the class DetectionJobScheduler method synchronousBackFill.
/**
* Different from asynchronous backfill in runBackfill, it will return after the backfill is done.
* This function monitors the backfill task status, and return once the tasks are completed.
* @param functionId
* the function id to be backfilled
* @param backfillStartTime
* the monitor start time for backfill
* @param backfillEndTime
* the monitor end time for backfill
* @param force
* set to false to resume from previous backfill if there exists any
*/
public void synchronousBackFill(long functionId, DateTime backfillStartTime, DateTime backfillEndTime, boolean force) {
Long jobExecutionId = runBackfill(functionId, backfillStartTime, backfillEndTime, force);
if (jobExecutionId == null) {
LOG.warn("Unable to perform backfill on function Id {} between {} and {}", functionId, backfillStartTime, backfillEndTime);
return;
}
TaskManager taskDAO = DAO_REGISTRY.getTaskDAO();
JobManager jobDAO = DAO_REGISTRY.getJobDAO();
JobDTO jobDTO = null;
List<TaskDTO> scheduledTaskDTO = taskDAO.findByJobIdStatusNotIn(jobExecutionId, TaskConstants.TaskStatus.COMPLETED);
int retryCounter = 0;
while (scheduledTaskDTO.size() > 0) {
boolean hasFailedTask = false;
for (TaskDTO taskDTO : scheduledTaskDTO) {
if (taskDTO.getStatus() == TaskConstants.TaskStatus.FAILED) {
if (retryCounter >= MAX_BACKFILL_RETRY) {
LOG.warn("The backfill of anomaly function {} (task id: {}) failed. Stop retry.", functionId, jobExecutionId);
// Set job to be failed
jobDTO = jobDAO.findById(jobExecutionId);
if (jobDTO.getStatus() != JobConstants.JobStatus.FAILED) {
jobDTO.setStatus(JobConstants.JobStatus.FAILED);
jobDAO.save(jobDTO);
}
return;
}
hasFailedTask = true;
LOG.warn("The backfill of anomaly function {} (task id: {}) failed. Retry count: {}", functionId, jobExecutionId, retryCounter);
}
}
if (hasFailedTask) {
retryCounter++;
jobExecutionId = runBackfill(functionId, backfillStartTime, backfillEndTime, force);
}
try {
TimeUnit.SECONDS.sleep(SYNC_SLEEP_SECONDS);
} catch (InterruptedException e) {
LOG.warn("The monitoring thread for anomaly function {} (task id: {}) backfill is awakened.", functionId, jobExecutionId);
}
scheduledTaskDTO = taskDAO.findByJobIdStatusNotIn(jobExecutionId, TaskConstants.TaskStatus.COMPLETED);
}
// Set job to be completed
jobDTO = jobDAO.findById(jobExecutionId);
if (jobDTO.getStatus() != JobConstants.JobStatus.COMPLETED) {
jobDTO.setStatus(JobConstants.JobStatus.COMPLETED);
jobDAO.save(jobDTO);
}
}
Aggregations