use of com.linkedin.thirdeye.datalayer.dto.AnomalyFunctionDTO in project pinot by linkedin.
the class DetectionJobScheduler method runBackfill.
/**
* Sequentially performs anomaly detection for all the monitoring windows that are located between backfillStartTime
* and backfillEndTime. A lightweight job is performed right after each detection job and notified is set to false in
* order to silence the mail alerts.
*
* NOTE: We assume that the backfill window for the same function DOES NOT overlap. In other words, this function
* does not guarantees correctness of the detections result if it is invoked twice with the same parameters.
*
* @param functionId the id of the anomaly function, which has to be an active function
* @param backfillStartTime the start time for backfilling
* @param backfillEndTime the end time for backfilling
* @param force set to false to resume from previous backfill if there exists any
* @return task id
*/
public Long runBackfill(long functionId, DateTime backfillStartTime, DateTime backfillEndTime, boolean force) {
AnomalyFunctionDTO anomalyFunction = DAO_REGISTRY.getAnomalyFunctionDAO().findById(functionId);
Long jobId = null;
String dataset = anomalyFunction.getCollection();
boolean isActive = anomalyFunction.getIsActive();
if (!isActive) {
LOG.info("Skipping function {}", functionId);
return null;
}
BackfillKey backfillKey = new BackfillKey(functionId, backfillStartTime, backfillEndTime);
Thread returnedThread = existingBackfillJobs.putIfAbsent(backfillKey, Thread.currentThread());
// If returned thread is not current thread, then a backfill job is already running
if (returnedThread != null) {
LOG.info("Function: {} Dataset: {} Aborting... An existing back-fill job is running...", functionId, dataset);
return null;
}
try {
CronExpression cronExpression = null;
try {
cronExpression = new CronExpression(anomalyFunction.getCron());
} catch (ParseException e) {
LOG.error("Function: {} Dataset: {} Failed to parse cron expression", functionId, dataset);
return null;
}
long monitoringWindowSize = TimeUnit.MILLISECONDS.convert(anomalyFunction.getWindowSize(), anomalyFunction.getWindowUnit());
DateTime currentStart;
if (force) {
currentStart = backfillStartTime;
} else {
currentStart = computeResumeStartTime(functionId, cronExpression, backfillStartTime, backfillEndTime);
}
DateTime currentEnd = currentStart.plus(monitoringWindowSize);
// Make the end time inclusive
DateTime endBoundary = new DateTime(cronExpression.getNextValidTimeAfter(backfillEndTime.toDate()));
List<Long> startTimes = new ArrayList<>();
List<Long> endTimes = new ArrayList<>();
LOG.info("Function: {} Dataset: {} Begin regenerate anomalies for each monitoring window between {} and {}", functionId, dataset, currentStart, endBoundary);
while (currentEnd.isBefore(endBoundary)) {
if (Thread.currentThread().isInterrupted()) {
LOG.info("Function: {} Dataset: {} Terminating adhoc function.", functionId, dataset);
return null;
}
String monitoringWindowStart = ISODateTimeFormat.dateHourMinute().print(currentStart);
String monitoringWindowEnd = ISODateTimeFormat.dateHourMinute().print(currentEnd);
LOG.info("Function: {} Dataset: {} Adding adhoc time range {}({}) to {}({})", functionId, dataset, currentStart, monitoringWindowStart, currentEnd, monitoringWindowEnd);
startTimes.add(currentStart.getMillis());
endTimes.add(currentEnd.getMillis());
currentStart = new DateTime(cronExpression.getNextValidTimeAfter(currentStart.toDate()));
currentEnd = currentStart.plus(monitoringWindowSize);
}
// If any time periods found, for which detection needs to be run, run anomaly function update detection status
List<DetectionStatusDTO> findAllInTimeRange = DAO_REGISTRY.getDetectionStatusDAO().findAllInTimeRangeForFunctionAndDetectionRun(backfillStartTime.getMillis(), currentStart.getMillis(), functionId, false);
jobId = runAnomalyFunctionAndUpdateDetectionStatus(startTimes, endTimes, anomalyFunction, findAllInTimeRange);
LOG.info("Function: {} Dataset: {} Generated job for detecting anomalies for each monitoring window " + "whose start is located in range {} -- {}", functionId, dataset, backfillStartTime, currentStart);
} finally {
existingBackfillJobs.remove(backfillKey, Thread.currentThread());
}
return jobId;
}
use of com.linkedin.thirdeye.datalayer.dto.AnomalyFunctionDTO in project pinot by linkedin.
the class DetectionJobScheduler method run.
/**
* Reads all active anomaly functions
* For each function, finds all time periods for which detection needs to be run
* Calls run anomaly function for all those periods, and updates detection status
* {@inheritDoc}
* @see java.lang.Runnable#run()
*/
public void run() {
// read all anomaly functions
LOG.info("Reading all anomaly functions");
List<AnomalyFunctionDTO> anomalyFunctions = DAO_REGISTRY.getAnomalyFunctionDAO().findAllActiveFunctions();
// for each active anomaly function
for (AnomalyFunctionDTO anomalyFunction : anomalyFunctions) {
try {
LOG.info("Function: {}", anomalyFunction);
long functionId = anomalyFunction.getId();
String dataset = anomalyFunction.getCollection();
DatasetConfigDTO datasetConfig = CACHE_REGISTRY.getDatasetConfigCache().get(dataset);
DateTimeZone dateTimeZone = Utils.getDataTimeZone(dataset);
DateTime currentDateTime = new DateTime(dateTimeZone);
// find last entry into detectionStatus table, for this function
DetectionStatusDTO lastEntryForFunction = DAO_REGISTRY.getDetectionStatusDAO().findLatestEntryForFunctionId(functionId);
LOG.info("Function: {} Dataset: {} Last entry is {}", functionId, dataset, lastEntryForFunction);
// calculate entries from last entry to current time
Map<String, Long> newEntries = DetectionJobSchedulerUtils.getNewEntries(currentDateTime, lastEntryForFunction, anomalyFunction, datasetConfig, dateTimeZone);
LOG.info("Function: {} Dataset: {} Creating {} new entries {}", functionId, dataset, newEntries.size(), newEntries);
// create these entries
for (Entry<String, Long> entry : newEntries.entrySet()) {
DetectionStatusDTO detectionStatus = new DetectionStatusDTO();
detectionStatus.setDataset(anomalyFunction.getCollection());
detectionStatus.setFunctionId(functionId);
detectionStatus.setDateToCheckInSDF(entry.getKey());
detectionStatus.setDateToCheckInMS(entry.getValue());
DAO_REGISTRY.getDetectionStatusDAO().save(detectionStatus);
}
// find all entries in the past 3 days, which are still isRun = false
List<DetectionStatusDTO> entriesInLast3Days = DAO_REGISTRY.getDetectionStatusDAO().findAllInTimeRangeForFunctionAndDetectionRun(currentDateTime.minusDays(3).getMillis(), currentDateTime.getMillis(), functionId, false);
Collections.sort(entriesInLast3Days);
LOG.info("Function: {} Dataset: {} Entries in last 3 days {}", functionId, dataset, entriesInLast3Days);
// for each entry, collect startTime and endTime
List<Long> startTimes = new ArrayList<>();
List<Long> endTimes = new ArrayList<>();
List<DetectionStatusDTO> detectionStatusToUpdate = new ArrayList<>();
for (DetectionStatusDTO detectionStatus : entriesInLast3Days) {
try {
LOG.info("Function: {} Dataset: {} Entry : {}", functionId, dataset, detectionStatus);
long dateToCheck = detectionStatus.getDateToCheckInMS();
// check availability for monitoring window - delay
long endTime = dateToCheck - TimeUnit.MILLISECONDS.convert(anomalyFunction.getWindowDelay(), anomalyFunction.getWindowDelayUnit());
long startTime = endTime - TimeUnit.MILLISECONDS.convert(anomalyFunction.getWindowSize(), anomalyFunction.getWindowUnit());
LOG.info("Function: {} Dataset: {} Checking start:{} {} to end:{} {}", functionId, dataset, startTime, new DateTime(startTime, dateTimeZone), endTime, new DateTime(endTime, dateTimeZone));
boolean pass = checkIfDetectionRunCriteriaMet(startTime, endTime, datasetConfig, anomalyFunction);
if (pass) {
startTimes.add(startTime);
endTimes.add(endTime);
detectionStatusToUpdate.add(detectionStatus);
} else {
LOG.warn("Function: {} Dataset: {} Data incomplete for monitoring window {} ({}) to {} ({}), skipping anomaly detection", functionId, dataset, startTime, new DateTime(startTime), endTime, new DateTime(endTime));
// TODO: Send email to owners/dev team
}
} catch (Exception e) {
LOG.error("Function: {} Dataset: {} Exception in preparing entry {}", functionId, dataset, detectionStatus, e);
}
}
// If any time periods found, for which detection needs to be run
runAnomalyFunctionAndUpdateDetectionStatus(startTimes, endTimes, anomalyFunction, detectionStatusToUpdate);
} catch (Exception e) {
LOG.error("Function: {} Dataset: {} Exception in running anomaly function {}", anomalyFunction.getId(), anomalyFunction.getCollection(), anomalyFunction, e);
}
}
}
use of com.linkedin.thirdeye.datalayer.dto.AnomalyFunctionDTO in project pinot by linkedin.
the class AnomalyMergeExecutor method run.
/**
* Performs asynchronous merge base on function id and dimensions.
*/
public void run() {
ExecutorService taskExecutorService = Executors.newFixedThreadPool(5);
List<Future<Integer>> taskCallbacks = new ArrayList<>();
List<AnomalyFunctionDTO> activeFunctions = anomalyFunctionDAO.findAllActiveFunctions();
// for each anomaly function, find raw unmerged results and perform merge
for (AnomalyFunctionDTO function : activeFunctions) {
Callable<Integer> task = () -> {
final boolean isBackfill = false;
// TODO : move merge config within the AnomalyFunction; Every function should have its own merge config.
AnomalyMergeConfig anomalyMergeConfig = function.getAnomalyMergeConfig();
if (anomalyMergeConfig == null) {
anomalyMergeConfig = DEFAULT_MERGE_CONFIG;
}
return mergeAnomalies(function, anomalyMergeConfig, isBackfill);
};
Future<Integer> taskFuture = taskExecutorService.submit(task);
taskCallbacks.add(taskFuture);
}
// wait till all the tasks complete
try {
for (Future<Integer> future : taskCallbacks) {
future.get();
}
} catch (Exception e) {
LOG.error("Error in merge execution", e);
}
}
use of com.linkedin.thirdeye.datalayer.dto.AnomalyFunctionDTO in project pinot by linkedin.
the class DetectionTaskRunner method handleResults.
private void handleResults(List<RawAnomalyResultDTO> results) {
for (RawAnomalyResultDTO result : results) {
try {
// Properties that always come from the function spec
AnomalyFunctionDTO spec = anomalyFunction.getSpec();
// make sure score and weight are valid numbers
result.setScore(normalize(result.getScore()));
result.setWeight(normalize(result.getWeight()));
result.setFunction(spec);
} catch (Exception e) {
LOG.error("Exception in saving anomaly result : " + result.toString(), e);
}
}
}
use of com.linkedin.thirdeye.datalayer.dto.AnomalyFunctionDTO in project pinot by linkedin.
the class DataCompletenessJobRunner method run.
@Override
public void run() {
DateTime now = new DateTime();
long checkDurationEndTime = now.getMillis();
long checkDurationStartTime = now.minus(TimeUnit.MILLISECONDS.convert(DataCompletenessConstants.LOOKBACK_TIME_DURATION, DataCompletenessConstants.LOOKBACK_TIMEUNIT)).getMillis();
String checkerEndTime = dateTimeFormatter.print(checkDurationEndTime);
String checkerStartTime = dateTimeFormatter.print(checkDurationStartTime);
String jobName = String.format("%s-%s-%s", TaskType.DATA_COMPLETENESS.toString(), checkerStartTime, checkerEndTime);
dataCompletenessJobContext.setCheckDurationStartTime(checkDurationStartTime);
dataCompletenessJobContext.setCheckDurationEndTime(checkDurationEndTime);
dataCompletenessJobContext.setJobName(jobName);
Set<String> datasetsToCheck = new HashSet<>();
for (AnomalyFunctionDTO anomalyFunction : DAO_REGISTRY.getAnomalyFunctionDAO().findAllActiveFunctions()) {
datasetsToCheck.add(anomalyFunction.getCollection());
}
for (DatasetConfigDTO datasetConfig : DAO_REGISTRY.getDatasetConfigDAO().findActiveRequiresCompletenessCheck()) {
datasetsToCheck.add(datasetConfig.getDataset());
}
dataCompletenessJobContext.setDatasetsToCheck(Lists.newArrayList(datasetsToCheck));
// create data completeness job
long jobExecutionId = createJob();
dataCompletenessJobContext.setJobExecutionId(jobExecutionId);
// create data completeness tasks
createTasks();
}
Aggregations