use of com.linkedin.thirdeye.datalayer.dto.TaskDTO in project pinot by linkedin.
the class AnomalyApplicationEndToEndTest method testThirdeyeAnomalyApplication.
@Test(enabled = true)
public void testThirdeyeAnomalyApplication() throws Exception {
Assert.assertNotNull(daoRegistry.getJobDAO());
// setup caches and config
setup();
Assert.assertNotNull(daoRegistry.getJobDAO());
// startDataCompletenessChecker
startDataCompletenessScheduler();
Thread.sleep(10000);
int jobSizeDataCompleteness = jobDAO.findAll().size();
int taskSizeDataCompleteness = taskDAO.findAll().size();
Assert.assertTrue(jobSizeDataCompleteness == 1);
Assert.assertTrue(taskSizeDataCompleteness == 2);
JobDTO jobDTO = jobDAO.findAll().get(0);
Assert.assertTrue(jobDTO.getJobName().startsWith(TaskType.DATA_COMPLETENESS.toString()));
List<TaskDTO> taskDTOs = taskDAO.findAll();
for (TaskDTO taskDTO : taskDTOs) {
Assert.assertEquals(taskDTO.getTaskType(), TaskType.DATA_COMPLETENESS);
Assert.assertEquals(taskDTO.getStatus(), TaskStatus.WAITING);
DataCompletenessTaskInfo taskInfo = (DataCompletenessTaskInfo) TaskInfoFactory.getTaskInfoFromTaskType(taskDTO.getTaskType(), taskDTO.getTaskInfo());
Assert.assertTrue((taskInfo.getDataCompletenessType() == DataCompletenessType.CHECKER) || (taskInfo.getDataCompletenessType() == DataCompletenessType.CLEANUP));
}
// start detection scheduler
startDetectionScheduler();
// start alert scheduler
startAlertScheduler();
// check for number of entries in tasks and jobs
Thread.sleep(10000);
int jobSize1 = jobDAO.findAll().size();
int taskSize1 = taskDAO.findAll().size();
Assert.assertTrue(jobSize1 > 0);
Assert.assertTrue(taskSize1 > 0);
Thread.sleep(10000);
int jobSize2 = jobDAO.findAll().size();
int taskSize2 = taskDAO.findAll().size();
Assert.assertTrue(jobSize2 > jobSize1);
Assert.assertTrue(taskSize2 > taskSize1);
tasks = taskDAO.findAll();
// check for task type
int detectionCount = 0;
int alertCount = 0;
for (TaskDTO task : tasks) {
if (task.getTaskType().equals(TaskType.ANOMALY_DETECTION)) {
detectionCount++;
} else if (task.getTaskType().equals(TaskType.ALERT)) {
alertCount++;
}
}
Assert.assertTrue(detectionCount > 0);
Assert.assertTrue(alertCount > 0);
// check for task status
tasks = taskDAO.findAll();
for (TaskDTO task : tasks) {
Assert.assertEquals(task.getStatus(), TaskStatus.WAITING);
}
// start monitor
startMonitor();
// check for monitor tasks
Thread.sleep(5000);
tasks = taskDAO.findAll();
int monitorCount = 0;
for (TaskDTO task : tasks) {
if (task.getTaskType().equals(TaskType.MONITOR)) {
monitorCount++;
}
}
Assert.assertEquals(monitorCount, 2);
// check for job status
jobs = jobDAO.findAll();
for (JobDTO job : jobs) {
Assert.assertEquals(job.getStatus(), JobStatus.SCHEDULED);
}
// start task drivers
startWorker();
// check for change in task status to COMPLETED
Thread.sleep(30000);
tasks = taskDAO.findAll();
int completedCount = 0;
for (TaskDTO task : tasks) {
if (task.getStatus().equals(TaskStatus.COMPLETED)) {
completedCount++;
}
}
Assert.assertTrue(completedCount > 0);
// Raw anomalies of the same function and dimensions should have been merged by the worker, so we
// check if any raw anomalies present, whose existence means the worker fails the synchronous merge.
List<RawAnomalyResultDTO> rawAnomalies = rawAnomalyResultDAO.findUnmergedByFunctionId(functionId);
Assert.assertTrue(rawAnomalies.size() == 0);
// check merged anomalies
List<MergedAnomalyResultDTO> mergedAnomalies = mergedAnomalyResultDAO.findByFunctionId(functionId);
Assert.assertTrue(mergedAnomalies.size() > 0);
// THE FOLLOWING TEST FAILS OCCASIONALLY DUE TO MACHINE COMPUTATION POWER
// TODO: Move test away from Thread.sleep
// check for job status COMPLETED
// jobs = jobDAO.findAll();
// int completedJobCount = 0;
// for (JobDTO job : jobs) {
// int attempt = 0;
// while (attempt < 3 && !job.getStatus().equals(JobStatus.COMPLETED)) {
// LOG.info("Checking job status with attempt : {}", attempt + 1);
// Thread.sleep(5_000);
// attempt++;
// }
// if (job.getStatus().equals(JobStatus.COMPLETED)) {
// completedJobCount ++;
// }
// }
// Assert.assertTrue(completedJobCount > 0);
// stop schedulers
cleanup();
}
use of com.linkedin.thirdeye.datalayer.dto.TaskDTO in project pinot by linkedin.
the class TestAnomalyTaskManager method testUpdateStatusAndWorkerId.
@Test(dependsOnMethods = { "testFindAll" })
public void testUpdateStatusAndWorkerId() {
TaskStatus newStatus = TaskStatus.RUNNING;
Long workerId = 1L;
TaskDTO taskDTO = taskDAO.findById(anomalyTaskId1);
boolean status = taskDAO.updateStatusAndWorkerId(workerId, anomalyTaskId1, allowedOldTaskStatus, newStatus, taskDTO.getVersion());
TaskDTO anomalyTask = taskDAO.findById(anomalyTaskId1);
Assert.assertTrue(status);
Assert.assertEquals(anomalyTask.getStatus(), newStatus);
Assert.assertEquals(anomalyTask.getWorkerId(), workerId);
Assert.assertEquals(anomalyTask.getVersion(), taskDTO.getVersion() + 1);
}
use of com.linkedin.thirdeye.datalayer.dto.TaskDTO in project pinot by linkedin.
the class TestAnomalyTaskManager method testFindByJobIdStatusNotIn.
@Test(dependsOnMethods = { "testUpdateStatusAndTaskEndTime" })
public void testFindByJobIdStatusNotIn() {
TaskStatus status = TaskStatus.COMPLETED;
List<TaskDTO> anomalyTaskSpecs = taskDAO.findByJobIdStatusNotIn(anomalyJobId, status);
Assert.assertEquals(anomalyTaskSpecs.size(), 1);
}
use of com.linkedin.thirdeye.datalayer.dto.TaskDTO in project pinot by linkedin.
the class MonitorTaskRunner method findIncompleteJobsWithStatusScheduled.
private Set<Long> findIncompleteJobsWithStatusScheduled() {
Set<Long> incompleteScheduledJobIds = new HashSet<>();
List<TaskDTO> incompleteTasks = DAO_REGISTRY.getTaskDAO().findByStatusNotIn(TaskStatus.COMPLETED);
for (TaskDTO task : incompleteTasks) {
incompleteScheduledJobIds.add(task.getJobId());
}
return incompleteScheduledJobIds;
}
use of com.linkedin.thirdeye.datalayer.dto.TaskDTO in project pinot by linkedin.
the class TaskDriver method start.
public void start() throws Exception {
for (int i = 0; i < MAX_PARALLEL_TASK; i++) {
Callable callable = () -> {
while (!shutdown) {
LOG.info(Thread.currentThread().getId() + " : Finding next task to execute for threadId:{}", Thread.currentThread().getId());
// select a task to execute, and update it to RUNNING
TaskDTO anomalyTaskSpec = acquireTask();
try {
LOG.info(Thread.currentThread().getId() + " : Executing task: {} {}", anomalyTaskSpec.getId(), anomalyTaskSpec.getTaskInfo());
// execute the selected task
TaskType taskType = anomalyTaskSpec.getTaskType();
TaskRunner taskRunner = TaskRunnerFactory.getTaskRunnerFromTaskType(taskType);
TaskInfo taskInfo = TaskInfoFactory.getTaskInfoFromTaskType(taskType, anomalyTaskSpec.getTaskInfo());
LOG.info(Thread.currentThread().getId() + " : Task Info {}", taskInfo);
List<TaskResult> taskResults = taskRunner.execute(taskInfo, taskContext);
LOG.info(Thread.currentThread().getId() + " : DONE Executing task: {}", anomalyTaskSpec.getId());
// update status to COMPLETED
updateStatusAndTaskEndTime(anomalyTaskSpec.getId(), TaskStatus.RUNNING, TaskStatus.COMPLETED);
} catch (Exception e) {
LOG.error("Exception in electing and executing task", e);
try {
// update task status failed
updateStatusAndTaskEndTime(anomalyTaskSpec.getId(), TaskStatus.RUNNING, TaskStatus.FAILED);
} catch (Exception e1) {
LOG.error("Error in updating failed status", e1);
}
}
}
return 0;
};
taskExecutorService.submit(callable);
LOG.info(Thread.currentThread().getId() + " : Started task driver");
}
}
Aggregations