use of com.codahale.metrics.Timer.Context in project torodb by torodb.
the class SimpleAnalyzedOplogBatchExecutorTest method testVisit_SingleOp_Success.
@Test
public void testVisit_SingleOp_Success() throws Exception {
//GIVEN
OplogOperation operation = mock(OplogOperation.class);
SingleOpAnalyzedOplogBatch batch = new SingleOpAnalyzedOplogBatch(operation);
ApplierContext applierContext = new ApplierContext.Builder().setReapplying(true).setUpdatesAsUpserts(true).build();
Timer timer = mock(Timer.class);
Context context = mock(Context.class);
given(metrics.getSingleOpTimer(operation)).willReturn(timer);
given(timer.time()).willReturn(context);
doNothing().when(executor).execute(any(OplogOperation.class), any());
//WHEN
OplogOperation result = executor.visit(batch, applierContext);
//THEN
then(metrics).should().getSingleOpTimer(operation);
then(timer).should().time();
then(context).should().close();
then(executor).should(times(1)).execute(operation, applierContext);
assertEquals(operation, result);
}
use of com.codahale.metrics.Timer.Context in project torodb by torodb.
the class SimpleAnalyzedOplogBatchExecutorTest method testVisit_SingleOp_UserEx.
@Test
public void testVisit_SingleOp_UserEx() throws Exception {
//GIVEN
OplogOperation operation = mock(OplogOperation.class);
SingleOpAnalyzedOplogBatch batch = new SingleOpAnalyzedOplogBatch(operation);
ApplierContext applierContext = new ApplierContext.Builder().setReapplying(true).setUpdatesAsUpserts(true).build();
Timer timer = mock(Timer.class);
Context context = mock(Context.class);
given(metrics.getSingleOpTimer(operation)).willReturn(timer);
given(timer.time()).willReturn(context);
doThrow(new DatabaseNotFoundException("test")).when(executor).execute(operation, applierContext);
//WHEN
try {
executor.visit(batch, applierContext);
fail("An exception was expected");
} catch (RetrierGiveUpException | RetrierAbortException ignore) {
}
//THEN
then(metrics).should().getSingleOpTimer(operation);
then(timer).should().time();
then(executor).should(times(1)).execute(operation, applierContext);
}
use of com.codahale.metrics.Timer.Context in project torodb by torodb.
the class SimpleAnalyzedOplogBatchExecutor method visit.
@Override
public OplogOperation visit(CudAnalyzedOplogBatch batch, ApplierContext arg) throws RetrierGiveUpException {
metrics.getCudBatchSize().update(batch.getOriginalBatch().size());
try (Context context = metrics.getCudBatchTimer().time()) {
try {
execute(batch, arg);
} catch (UserException | NamespaceJobExecutionException ex) {
throw new RetrierGiveUpException("Unexpected exception while replying", ex);
} catch (RollbackException ex) {
ApplierContext retryingReplingContext = new ApplierContext.Builder().setReapplying(true).setUpdatesAsUpserts(true).build();
retrier.retry(() -> {
try {
execute(batch, retryingReplingContext);
return Empty.getInstance();
} catch (UserException | NamespaceJobExecutionException ex2) {
throw new RetrierAbortException("Unexpected user exception while applying " + "the batch " + batch, ex2);
}
}, Hint.CRITICAL, Hint.TIME_SENSIBLE);
}
}
List<OplogOperation> originalBatch = batch.getOriginalBatch();
return originalBatch.get(originalBatch.size() - 1);
}
use of com.codahale.metrics.Timer.Context in project opennms by OpenNMS.
the class HibernateEventWriter method process.
@Override
public void process(Log eventLog) throws EventProcessorException {
if (eventLog != null && eventLog.getEvents() != null) {
final List<Event> eventsInLog = eventLog.getEvents().getEventCollection();
// This shouldn't happen, but just to be safe...
if (eventsInLog == null) {
return;
}
// Find the events in the log that need to be persisted
final List<Event> eventsToPersist = eventsInLog.stream().filter(e -> checkEventSanityAndDoWeProcess(e, "HibernateEventWriter")).collect(Collectors.toList());
// If there are no events to persist, avoid creating a database transaction
if (eventsToPersist.size() < 1) {
return;
}
// Time the transaction and insertions
try (Context context = writeTimer.time()) {
final AtomicReference<EventProcessorException> exception = new AtomicReference<>();
m_transactionManager.execute(new TransactionCallbackWithoutResult() {
@Override
protected void doInTransactionWithoutResult(TransactionStatus status) {
for (Event eachEvent : eventsToPersist) {
try {
process(eventLog.getHeader(), eachEvent);
} catch (EventProcessorException e) {
exception.set(e);
return;
}
}
}
});
if (exception.get() != null) {
throw exception.get();
}
}
}
}
use of com.codahale.metrics.Timer.Context in project opennms by OpenNMS.
the class StressCommand method execute.
@Override
public Void execute() throws Exception {
// Apply sane lower bounds to all of the configurable options
intervalInSeconds = Math.max(1, intervalInSeconds);
numberOfNodes = Math.max(1, numberOfNodes);
numberOfInterfacesPerNode = Math.max(1, numberOfInterfacesPerNode);
numberOfGroupsPerInterface = Math.max(1, numberOfGroupsPerInterface);
numberOfNumericAttributesPerGroup = Math.max(0, numberOfNumericAttributesPerGroup);
numberOfStringAttributesPerGroup = Math.max(0, numberOfStringAttributesPerGroup);
reportIntervalInSeconds = Math.max(1, reportIntervalInSeconds);
numberOfGeneratorThreads = Math.max(1, numberOfGeneratorThreads);
stringVariationFactor = Math.max(0, stringVariationFactor);
if (stringVariationFactor > 0) {
stringAttributesVaried = metrics.meter("string-attributes-varied");
}
// Display the effective settings and rates
double attributesPerSecond = (1 / (double) intervalInSeconds) * numberOfGroupsPerInterface * numberOfInterfacesPerNode * numberOfNodes;
System.out.printf("Generating collection sets every %d seconds\n", intervalInSeconds);
System.out.printf("\t for %d nodes\n", numberOfNodes);
System.out.printf("\t with %d interfaces\n", numberOfInterfacesPerNode);
System.out.printf("\t with %d attribute groups\n", numberOfGroupsPerInterface);
System.out.printf("\t with %d numeric attributes\n", numberOfNumericAttributesPerGroup);
System.out.printf("\t with %d string attributes\n", numberOfStringAttributesPerGroup);
System.out.printf("Across %d threads\n", numberOfGeneratorThreads);
if (stringVariationFactor > 0) {
System.out.printf("With string variation factor %d\n", stringVariationFactor);
}
System.out.printf("Which will yield an effective\n");
System.out.printf("\t %.2f numeric attributes per second\n", numberOfNumericAttributesPerGroup * attributesPerSecond);
System.out.printf("\t %.2f string attributes per second\n", numberOfStringAttributesPerGroup * attributesPerSecond);
ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).convertRatesTo(TimeUnit.SECONDS).convertDurationsTo(TimeUnit.MILLISECONDS).build();
// Setup the executor
ThreadFactory threadFactoy = new ThreadFactoryBuilder().setNameFormat("Metrics Stress Tool Generator #%d").build();
ExecutorService executor = Executors.newFixedThreadPool(numberOfGeneratorThreads, threadFactoy);
// Setup auxiliary objects needed by the persister
ServiceParameters params = new ServiceParameters(Collections.emptyMap());
RrdRepository repository = new RrdRepository();
repository.setStep(Math.max(intervalInSeconds, 1));
repository.setHeartBeat(repository.getStep() * 2);
if (rras != null && rras.size() > 0) {
repository.setRraList(rras);
} else {
repository.setRraList(Lists.newArrayList(// Use the default list of RRAs we provide in our stock configuration files
"RRA:AVERAGE:0.5:1:2016", "RRA:AVERAGE:0.5:12:1488", "RRA:AVERAGE:0.5:288:366", "RRA:MAX:0.5:288:366", "RRA:MIN:0.5:288:366"));
}
repository.setRrdBaseDir(Paths.get(System.getProperty("opennms.home"), "share", "rrd", "snmp").toFile());
// Calculate how we fast we should insert the collection sets
int sleepTimeInMillisBetweenNodes = 0;
int sleepTimeInSecondsBetweenIterations = 0;
System.out.printf("Sleeping for\n");
if (burst) {
sleepTimeInSecondsBetweenIterations = intervalInSeconds;
System.out.printf("\t %d seconds between batches\n", sleepTimeInSecondsBetweenIterations);
} else {
// We want to "stream" the collection sets
sleepTimeInMillisBetweenNodes = Math.round((((float) intervalInSeconds * 1000) / numberOfNodes) * numberOfGeneratorThreads);
System.out.printf("\t %d milliseconds between nodes\n", sleepTimeInMillisBetweenNodes);
}
// Start generating, and keep generating until we're interrupted
try {
reporter.start(reportIntervalInSeconds, TimeUnit.SECONDS);
while (true) {
final Context context = batchTimer.time();
try {
// Split the tasks up among the threads
List<Future<Void>> futures = new ArrayList<>();
for (int generatorThreadId = 0; generatorThreadId < numberOfGeneratorThreads; generatorThreadId++) {
futures.add(executor.submit(generateAndPersistCollectionSets(params, repository, generatorThreadId, sleepTimeInMillisBetweenNodes)));
}
// Wait for all the tasks to complete before starting others
for (Future<Void> future : futures) {
future.get();
}
} catch (InterruptedException | ExecutionException e) {
break;
} finally {
context.stop();
}
try {
Thread.sleep(sleepTimeInSecondsBetweenIterations * 1000L);
} catch (InterruptedException e) {
break;
}
}
} finally {
reporter.stop();
abort.set(true);
executor.shutdownNow();
}
return null;
}
Aggregations