use of com.newrelic.agent.transport.HttpError in project newrelic-java-agent by newrelic.
the class TransactionEventsServiceTest method testSyntheticsBuffering2.
@Test
public void testSyntheticsBuffering2() throws Exception {
setup(true, true, TEST_RESERVOIR_SIZE);
this.rpmService.setSendAnalyticsEventsException(new HttpError("", HttpResponseCode.REQUEST_TIMEOUT, 0));
// limit number of events.
for (int i = 0; i < TransactionEventsService.MAX_UNSENT_SYNTHETICS_HOLDERS; ++i) {
for (int j = 0; j < TransactionEventsService.MAX_SYNTHETIC_EVENTS_PER_APP + 1; ++j) {
TransactionData transactionData = generateSyntheticTransactionData();
TransactionStats transactionStats = new TransactionStats();
service.dispatcherTransactionFinished(transactionData, transactionStats);
}
service.harvestEvents(APP_NAME);
}
// Check that the buffering is full
assertEquals(TransactionEventsService.MAX_UNSENT_SYNTHETICS_HOLDERS, service.pendingSyntheticsHeaps.size());
DistributedSamplingPriorityQueue<TransactionEvent> firstPendingSyntheticsHeap = service.pendingSyntheticsHeaps.peek();
assertNotNull(firstPendingSyntheticsHeap);
assertEquals(TransactionEventsService.MAX_SYNTHETIC_EVENTS_PER_APP, firstPendingSyntheticsHeap.size());
// Now we want to ensure that overflow keeps the most recent elements and only loses
// the oldest. We grab the very oldest value from the queue and fail one more buffer's
// worth of events and then check that the oldest value is gone. The job IDs should
// all be different because we generate the nanoTime into them.
String jobId = firstPendingSyntheticsHeap.peek().getSyntheticsJobId();
for (int j = 0; j < TransactionEventsService.MAX_SYNTHETIC_EVENTS_PER_APP + 1; ++j) {
TransactionData transactionData = generateSyntheticTransactionData();
TransactionStats transactionStats = new TransactionStats();
service.dispatcherTransactionFinished(transactionData, transactionStats);
}
service.harvestEvents(APP_NAME);
firstPendingSyntheticsHeap = service.pendingSyntheticsHeaps.peek();
assertNotNull(firstPendingSyntheticsHeap);
assertNotEquals(jobId, firstPendingSyntheticsHeap.peek().getSyntheticsJobId());
// Finally clear the mock RPM service from throwing exceptions every time we call send
// Check the catching-up algo by verifying the count goes down the first time, but not
// all the way to zero. Do this without hard-wiring the exact rate of the catching up.
int numPendingBuffers = service.pendingSyntheticsHeaps.size();
assertEquals(TransactionEventsService.MAX_UNSENT_SYNTHETICS_HOLDERS, numPendingBuffers);
this.rpmService.clearSendAnalyticsEventsException();
service.harvestEvents(APP_NAME);
assertNotEquals(0, service.pendingSyntheticsHeaps.size());
assertTrue(service.pendingSyntheticsHeaps.size() < numPendingBuffers - 1);
}
use of com.newrelic.agent.transport.HttpError in project newrelic-java-agent by newrelic.
the class CollectorSpanEventReservoirManagerTest method httpErrorTriggerDiscard.
@Test
public void httpErrorTriggerDiscard() {
ConfigService mockConfigService = mock21Samples();
CollectorSpanEventReservoirManager target = initWith25Tries(mockConfigService);
ReservoirManager.HarvestResult harvestResult = target.attemptToSendReservoir(APP_NAME, (appName, reservoirSize, eventsSeen, events) -> {
throw new HttpError("message", 0, 0) {
@Override
public boolean discardHarvestData() {
return true;
}
};
}, mock(Logger.class));
assertNull(harvestResult);
assertEquals(0, target.getOrCreateReservoir(APP_NAME).size());
assertEquals(0, target.getOrCreateReservoir(APP_NAME).getNumberOfTries());
}
use of com.newrelic.agent.transport.HttpError in project newrelic-java-agent by newrelic.
the class CommandParser method beforeHarvest.
/**
* Gets the agent commands from the rpm service, processes them, and returns the command results.
*
* @see RPMService#getAgentCommands()
* @see RPMService#sendCommandResults(Map)
*/
@Override
public void beforeHarvest(String appName, StatsEngine statsEngine) {
IRPMService rpmService = ServiceFactory.getRPMServiceManager().getOrCreateRPMService(appName);
for (Iterator<Map<Long, Object>> iterator = unsentCommandData.iterator(); iterator.hasNext(); ) {
Map<Long, Object> result = iterator.next();
try {
rpmService.sendCommandResults(result);
iterator.remove();
} catch (HttpError e) {
if (e.discardHarvestData()) {
iterator.remove();
} else {
String msg = MessageFormat.format("Unable to send agent command feedback. Data will be retried " + "on the next harvest. Command results: {0}", result.toString());
getLogger().fine(msg);
}
} catch (Exception e) {
iterator.remove();
String msg = MessageFormat.format("Unable to send agent command feedback. Data will be dropped. " + "Command results: {0}", result.toString());
getLogger().fine(msg);
}
}
List<List<?>> commands;
try {
commands = rpmService.getAgentCommands();
} catch (Exception e) {
getLogger().log(Level.FINE, "Unable to get agent commands - {0}", e.toString());
getLogger().log(Level.FINEST, e, e.toString());
return;
}
Map<Long, Object> commandResults = processCommands(rpmService, commands);
try {
rpmService.sendCommandResults(commandResults);
} catch (HttpError e) {
if (!e.discardHarvestData()) {
unsentCommandData.add(commandResults);
String msg = MessageFormat.format("Unable to send agent command feedback. Data will be retried on the next harvest. Command results: {0}", commandResults.toString());
getLogger().fine(msg);
}
} catch (Exception e) {
String msg = MessageFormat.format("Unable to send agent command feedback. Command results: {0}", commandResults.toString());
getLogger().fine(msg);
}
}
use of com.newrelic.agent.transport.HttpError in project newrelic-java-agent by newrelic.
the class ErrorServiceImpl method harvestEvents.
public void harvestEvents(final String appName) {
boolean eventsEnabled = isEventsEnabledForApp(appName);
if (!eventsEnabled) {
reservoirForApp.remove(appName);
return;
}
if (maxSamplesStored <= 0) {
clearReservoir(appName);
return;
}
long startTimeInNanos = System.nanoTime();
final DistributedSamplingPriorityQueue<ErrorEvent> reservoir = reservoirForApp.put(appName, new DistributedSamplingPriorityQueue<ErrorEvent>(appName, "Error Service", maxSamplesStored));
if (reservoir != null && reservoir.size() > 0) {
try {
ServiceFactory.getRPMServiceManager().getOrCreateRPMService(appName).sendErrorEvents(maxSamplesStored, reservoir.getNumberOfTries(), Collections.unmodifiableList(reservoir.asList()));
final long durationInNanos = System.nanoTime() - startTimeInNanos;
ServiceFactory.getStatsService().doStatsWork(new StatsWork() {
@Override
public void doWork(StatsEngine statsEngine) {
recordSupportabilityMetrics(statsEngine, durationInNanos, reservoir);
}
@Override
public String getAppName() {
return appName;
}
}, reservoir.getServiceName());
if (reservoir.size() < reservoir.getNumberOfTries()) {
int dropped = reservoir.getNumberOfTries() - reservoir.size();
Agent.LOG.log(Level.FINE, "Dropped {0} error events out of {1}.", dropped, reservoir.getNumberOfTries());
}
} catch (HttpError e) {
if (!e.discardHarvestData()) {
Agent.LOG.log(Level.FINE, "Unable to send error events. Unsent events will be included in the next harvest.", e);
// Save unsent data by merging it with current data using reservoir algorithm
DistributedSamplingPriorityQueue<ErrorEvent> currentReservoir = reservoirForApp.get(appName);
currentReservoir.retryAll(reservoir);
} else {
// discard harvest data
reservoir.clear();
Agent.LOG.log(Level.FINE, "Unable to send error events. Unsent events will be dropped.", e);
}
} catch (Exception e) {
// discard harvest data
reservoir.clear();
Agent.LOG.log(Level.FINE, "Unable to send error events. Unsent events will be dropped.", e);
}
}
}
use of com.newrelic.agent.transport.HttpError in project newrelic-java-agent by newrelic.
the class CollectorSpanEventReservoirManager method attemptToSendReservoir.
@Override
public HarvestResult attemptToSendReservoir(final String appName, EventSender<SpanEvent> eventSender, Logger logger) {
if (getMaxSamplesStored() <= 0) {
clearReservoir();
return null;
}
SpanEventsConfig config = configService.getAgentConfig(appName).getSpanEventsConfig();
int decidedLast = AdaptiveSampling.decidedLast(spanReservoirsForApp.get(appName), config.getTargetSamplesStored());
// save a reference to the old reservoir to finish harvesting, and create a new one
final SamplingPriorityQueue<SpanEvent> toSend = spanReservoirsForApp.get(appName);
spanReservoirsForApp.put(appName, createDistributedSamplingReservoir(appName, decidedLast));
if (toSend == null || toSend.size() <= 0) {
return null;
}
try {
eventSender.sendEvents(appName, config.getMaxSamplesStored(), toSend.getNumberOfTries(), Collections.unmodifiableList(toSend.asList()));
if (toSend.size() < toSend.getNumberOfTries()) {
int dropped = toSend.getNumberOfTries() - toSend.size();
logger.log(Level.FINE, "Dropped {0} span events out of {1}.", dropped, toSend.getNumberOfTries());
}
return new HarvestResult(toSend.getNumberOfTries(), toSend.size());
} catch (HttpError e) {
if (!e.discardHarvestData()) {
logger.log(Level.FINE, "Unable to send span events. Unsent events will be included in the next harvest.", e);
// Save unsent data by merging it with toSend data using reservoir algorithm
spanReservoirsForApp.get(appName).retryAll(toSend);
} else {
// discard harvest data
toSend.clear();
logger.log(Level.FINE, "Unable to send span events. Unsent events will be dropped.", e);
}
} catch (Exception e) {
// discard harvest data
toSend.clear();
logger.log(Level.FINE, "Unable to send span events. Unsent events will be dropped.", e);
}
return null;
}
Aggregations