use of org.apache.plc4x.java.scraper.util.PercentageAboveThreshold in project plc4x by apache.
the class TriggeredScraperImpl method start.
/**
* Start the scraping.
*/
// ToDo code-refactoring and improved testing --> PLC4X-90
@Override
public void start() {
// Schedule all jobs
LOGGER.info("Starting jobs...");
// start iterating over all available jobs
for (ScrapeJob job : jobs) {
// iterate over all source the jobs shall performed on
for (Map.Entry<String, String> sourceEntry : job.getSourceConnections().entrySet()) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Register task for job {} for conn {} ({}) at rate {} ms", job.getJobName(), sourceEntry.getKey(), sourceEntry.getValue(), job.getScrapeRate());
}
// create the regarding triggered scraper task
TriggeredScraperTask triggeredScraperTask;
try {
triggeredScraperTask = new TriggeredScraperTask(driverManager, job.getJobName(), sourceEntry.getKey(), sourceEntry.getValue(), job.getFields(), futureTimeOut, executorService, resultHandler, (TriggeredScrapeJobImpl) job, triggerCollector);
// Add task to internal list
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Task {} added to scheduling", triggeredScraperTask);
}
registerTaskMBean(triggeredScraperTask);
tasks.put(job, triggeredScraperTask);
ScheduledFuture<?> future = scheduler.scheduleAtFixedRate(triggeredScraperTask, 0, job.getScrapeRate(), TimeUnit.MILLISECONDS);
// Store the handle for stopping, etc.
scraperTaskMap.put(triggeredScraperTask, future);
} catch (ScraperException e) {
LOGGER.warn("Error executing the job {} for conn {} ({}) at rate {} ms", job.getJobName(), sourceEntry.getKey(), sourceEntry.getValue(), job.getScrapeRate(), e);
}
}
}
// Add statistics tracker
statisticsLogger = scheduler.scheduleAtFixedRate(() -> {
for (Map.Entry<ScrapeJob, ScraperTask> entry : tasks.entries()) {
DescriptiveStatistics statistics = entry.getValue().getLatencyStatistics();
String msg = String.format(Locale.ENGLISH, "Job statistics (%s, %s) number of requests: %d (%d success, %.1f %% failed, %.1f %% too slow), min latency: %.2f ms, mean latency: %.2f ms, median: %.2f ms", entry.getValue().getJobName(), entry.getValue().getConnectionAlias(), entry.getValue().getRequestCounter(), entry.getValue().getSuccessfullRequestCounter(), entry.getValue().getPercentageFailed(), statistics.apply(new PercentageAboveThreshold(entry.getKey().getScrapeRate() * 1e6)), statistics.getMin() * 1e-6, statistics.getMean() * 1e-6, statistics.getPercentile(50) * 1e-6);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug(msg);
}
}
}, 1_000, 1_000, TimeUnit.MILLISECONDS);
}
use of org.apache.plc4x.java.scraper.util.PercentageAboveThreshold in project plc4x by apache.
the class ScraperImpl method start.
@Override
public void start() {
// Schedule all jobs
LOGGER.info("Starting jobs...");
jobs.stream().flatMap(job -> job.getSourceConnections().entrySet().stream().map(entry -> Triple.of(job, entry.getKey(), entry.getValue()))).forEach(tuple -> {
LOGGER.debug("Register task for job {} for conn {} ({}) at rate {} ms", tuple.getLeft().getJobName(), tuple.getMiddle(), tuple.getRight(), tuple.getLeft().getScrapeRate());
ScraperTask task = new ScraperTaskImpl(driverManager, tuple.getLeft().getJobName(), tuple.getMiddle(), tuple.getRight(), tuple.getLeft().getFields(), 1_000, handlerPool, resultHandler);
// Add task to internal list
tasks.put(tuple.getLeft(), task);
ScheduledFuture<?> future = scheduler.scheduleAtFixedRate(task, 0, tuple.getLeft().getScrapeRate(), TimeUnit.MILLISECONDS);
// Store the handle for stopping, etc.
futures.put(task, future);
});
// Add statistics tracker
scheduler.scheduleAtFixedRate(() -> {
for (Map.Entry<ScrapeJob, ScraperTask> entry : tasks.entries()) {
DescriptiveStatistics statistics = entry.getValue().getLatencyStatistics();
String msg = String.format(Locale.ENGLISH, "Job statistics (%s, %s) number of requests: %d (%d success, %.1f %% failed, %.1f %% too slow), min latency: %.2f ms, mean latency: %.2f ms, median: %.2f ms", entry.getValue().getJobName(), entry.getValue().getConnectionAlias(), entry.getValue().getRequestCounter(), entry.getValue().getSuccessfullRequestCounter(), entry.getValue().getPercentageFailed(), statistics.apply(new PercentageAboveThreshold(entry.getKey().getScrapeRate() * 1e6)), statistics.getMin() * 1e-6, statistics.getMean() * 1e-6, statistics.getPercentile(50) * 1e-6);
LOGGER.debug(msg);
}
}, 1_000, 1_000, TimeUnit.MILLISECONDS);
}
Aggregations