use of com.linkedin.thirdeye.datalayer.dto.JobDTO in project pinot by linkedin.
the class AnomalyApplicationEndToEndTest method testThirdeyeAnomalyApplication.
@Test(enabled = true)
public void testThirdeyeAnomalyApplication() throws Exception {
Assert.assertNotNull(daoRegistry.getJobDAO());
// setup caches and config
setup();
Assert.assertNotNull(daoRegistry.getJobDAO());
// startDataCompletenessChecker
startDataCompletenessScheduler();
Thread.sleep(10000);
int jobSizeDataCompleteness = jobDAO.findAll().size();
int taskSizeDataCompleteness = taskDAO.findAll().size();
Assert.assertTrue(jobSizeDataCompleteness == 1);
Assert.assertTrue(taskSizeDataCompleteness == 2);
JobDTO jobDTO = jobDAO.findAll().get(0);
Assert.assertTrue(jobDTO.getJobName().startsWith(TaskType.DATA_COMPLETENESS.toString()));
List<TaskDTO> taskDTOs = taskDAO.findAll();
for (TaskDTO taskDTO : taskDTOs) {
Assert.assertEquals(taskDTO.getTaskType(), TaskType.DATA_COMPLETENESS);
Assert.assertEquals(taskDTO.getStatus(), TaskStatus.WAITING);
DataCompletenessTaskInfo taskInfo = (DataCompletenessTaskInfo) TaskInfoFactory.getTaskInfoFromTaskType(taskDTO.getTaskType(), taskDTO.getTaskInfo());
Assert.assertTrue((taskInfo.getDataCompletenessType() == DataCompletenessType.CHECKER) || (taskInfo.getDataCompletenessType() == DataCompletenessType.CLEANUP));
}
// start detection scheduler
startDetectionScheduler();
// start alert scheduler
startAlertScheduler();
// check for number of entries in tasks and jobs
Thread.sleep(10000);
int jobSize1 = jobDAO.findAll().size();
int taskSize1 = taskDAO.findAll().size();
Assert.assertTrue(jobSize1 > 0);
Assert.assertTrue(taskSize1 > 0);
Thread.sleep(10000);
int jobSize2 = jobDAO.findAll().size();
int taskSize2 = taskDAO.findAll().size();
Assert.assertTrue(jobSize2 > jobSize1);
Assert.assertTrue(taskSize2 > taskSize1);
tasks = taskDAO.findAll();
// check for task type
int detectionCount = 0;
int alertCount = 0;
for (TaskDTO task : tasks) {
if (task.getTaskType().equals(TaskType.ANOMALY_DETECTION)) {
detectionCount++;
} else if (task.getTaskType().equals(TaskType.ALERT)) {
alertCount++;
}
}
Assert.assertTrue(detectionCount > 0);
Assert.assertTrue(alertCount > 0);
// check for task status
tasks = taskDAO.findAll();
for (TaskDTO task : tasks) {
Assert.assertEquals(task.getStatus(), TaskStatus.WAITING);
}
// start monitor
startMonitor();
// check for monitor tasks
Thread.sleep(5000);
tasks = taskDAO.findAll();
int monitorCount = 0;
for (TaskDTO task : tasks) {
if (task.getTaskType().equals(TaskType.MONITOR)) {
monitorCount++;
}
}
Assert.assertEquals(monitorCount, 2);
// check for job status
jobs = jobDAO.findAll();
for (JobDTO job : jobs) {
Assert.assertEquals(job.getStatus(), JobStatus.SCHEDULED);
}
// start task drivers
startWorker();
// check for change in task status to COMPLETED
Thread.sleep(30000);
tasks = taskDAO.findAll();
int completedCount = 0;
for (TaskDTO task : tasks) {
if (task.getStatus().equals(TaskStatus.COMPLETED)) {
completedCount++;
}
}
Assert.assertTrue(completedCount > 0);
// Raw anomalies of the same function and dimensions should have been merged by the worker, so we
// check if any raw anomalies present, whose existence means the worker fails the synchronous merge.
List<RawAnomalyResultDTO> rawAnomalies = rawAnomalyResultDAO.findUnmergedByFunctionId(functionId);
Assert.assertTrue(rawAnomalies.size() == 0);
// check merged anomalies
List<MergedAnomalyResultDTO> mergedAnomalies = mergedAnomalyResultDAO.findByFunctionId(functionId);
Assert.assertTrue(mergedAnomalies.size() > 0);
// THE FOLLOWING TEST FAILS OCCASIONALLY DUE TO MACHINE COMPUTATION POWER
// TODO: Move test away from Thread.sleep
// check for job status COMPLETED
// jobs = jobDAO.findAll();
// int completedJobCount = 0;
// for (JobDTO job : jobs) {
// int attempt = 0;
// while (attempt < 3 && !job.getStatus().equals(JobStatus.COMPLETED)) {
// LOG.info("Checking job status with attempt : {}", attempt + 1);
// Thread.sleep(5_000);
// attempt++;
// }
// if (job.getStatus().equals(JobStatus.COMPLETED)) {
// completedJobCount ++;
// }
// }
// Assert.assertTrue(completedJobCount > 0);
// stop schedulers
cleanup();
}
use of com.linkedin.thirdeye.datalayer.dto.JobDTO in project pinot by linkedin.
the class AbstractManagerTestBase method getTestJobSpec.
JobDTO getTestJobSpec() {
JobDTO jobSpec = new JobDTO();
jobSpec.setJobName("Test_Anomaly_Job");
jobSpec.setStatus(JobConstants.JobStatus.SCHEDULED);
jobSpec.setScheduleStartTime(System.currentTimeMillis());
jobSpec.setWindowStartTime(new DateTime().minusHours(20).getMillis());
jobSpec.setWindowEndTime(new DateTime().minusHours(10).getMillis());
return jobSpec;
}
use of com.linkedin.thirdeye.datalayer.dto.JobDTO in project pinot by linkedin.
the class TestAnomalyTaskManager method testCreate.
@Test
public void testCreate() throws JsonProcessingException {
JobDTO testAnomalyJobSpec = getTestJobSpec();
anomalyJobId = jobDAO.save(testAnomalyJobSpec);
anomalyTaskId1 = taskDAO.save(getTestTaskSpec(testAnomalyJobSpec));
Assert.assertNotNull(anomalyTaskId1);
anomalyTaskId2 = taskDAO.save(getTestTaskSpec(testAnomalyJobSpec));
Assert.assertNotNull(anomalyTaskId2);
}
use of com.linkedin.thirdeye.datalayer.dto.JobDTO in project pinot by linkedin.
the class TestAnomalyJobManager method testUpdateStatusAndJobEndTime.
@Test(dependsOnMethods = { "testFindAll" })
public void testUpdateStatusAndJobEndTime() {
JobStatus status = JobStatus.COMPLETED;
long jobEndTime = System.currentTimeMillis();
jobDAO.updateStatusAndJobEndTimeForJobIds(Sets.newHashSet(anomalyJobId1, anomalyJobId3), status, jobEndTime);
JobDTO anomalyJob = jobDAO.findById(anomalyJobId1);
Assert.assertEquals(anomalyJob.getStatus(), status);
Assert.assertEquals(anomalyJob.getScheduleEndTime(), jobEndTime);
printAll("After testUpdateStatusAndJobEndTime");
}
use of com.linkedin.thirdeye.datalayer.dto.JobDTO in project pinot by linkedin.
the class DetectionJobScheduler method computeResumeStartTime.
/**
* Returns the start time of the first detection job for the current backfill. The start time is determined in the
* following:
* 1. If there exists any previously left detection job, then start backfill from that job.
* 1a. if that job is finished, then start a job next to it.
* 1b. if that job is unfinished, then restart that job.
* 2. If there exists no previous left job, then start the job from the beginning.
*
* @param cronExpression the cron expression that is used to calculate the alignment of start time.
* @return the start time for the first detection job of this backfilling.
*/
private DateTime computeResumeStartTime(long functionId, CronExpression cronExpression, DateTime backfillStartTime, DateTime backfillEndTime) {
DateTime currentStart;
JobDTO previousJob = getPreviousJob(functionId, backfillStartTime.getMillis(), backfillEndTime.getMillis());
if (previousJob != null) {
long previousStartTime = previousJob.getWindowStartTime();
cleanUpJob(previousJob);
if (previousJob.getStatus().equals(JobConstants.JobStatus.COMPLETED)) {
// Schedule a job after previous job
currentStart = new DateTime(cronExpression.getNextValidTimeAfter(new Date(previousStartTime)));
} else {
// Reschedule the previous incomplete job
currentStart = new DateTime(previousStartTime);
}
LOG.info("Backfill starting from {} for function {} because a previous unfinished job found.", currentStart, functionId);
} else {
// Schedule a job starting from the beginning
currentStart = backfillStartTime;
}
return currentStart;
}
Aggregations