use of org.apache.nifi.controller.repository.metrics.StandardFlowFileEvent in project nifi by apache.
the class StandardProcessSession method incrementConnectionOutputCounts.
private void incrementConnectionOutputCounts(final String connectionId, final int flowFileCount, final long bytes) {
final StandardFlowFileEvent connectionEvent = connectionCounts.computeIfAbsent(connectionId, id -> new StandardFlowFileEvent(id));
connectionEvent.setContentSizeOut(connectionEvent.getContentSizeOut() + bytes);
connectionEvent.setFlowFilesOut(connectionEvent.getFlowFilesOut() + flowFileCount);
}
use of org.apache.nifi.controller.repository.metrics.StandardFlowFileEvent in project nifi by apache.
the class StandardProcessSession method incrementConnectionInputCounts.
private void incrementConnectionInputCounts(final String connectionId, final int flowFileCount, final long bytes) {
final StandardFlowFileEvent connectionEvent = connectionCounts.computeIfAbsent(connectionId, id -> new StandardFlowFileEvent(id));
connectionEvent.setContentSizeIn(connectionEvent.getContentSizeIn() + bytes);
connectionEvent.setFlowFilesIn(connectionEvent.getFlowFilesIn() + flowFileCount);
}
use of org.apache.nifi.controller.repository.metrics.StandardFlowFileEvent in project nifi by apache.
the class StandardProcessSession method rollback.
private void rollback(final boolean penalize, final boolean rollbackCheckpoint) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} session rollback called, FlowFile records are {} {}", this, loggableFlowfileInfo(), new Throwable("Stack Trace on rollback"));
}
deleteOnCommit.clear();
closeStreams(openInputStreams, "rolled back", "input");
closeStreams(openOutputStreams, "rolled back", "output");
try {
claimCache.reset();
} catch (IOException e1) {
LOG.warn("{} Attempted to close Output Stream for {} due to session rollback but close failed", this, this.connectableDescription, e1);
}
final Set<StandardRepositoryRecord> recordsToHandle = new HashSet<>();
recordsToHandle.addAll(records.values());
if (rollbackCheckpoint) {
final Checkpoint existingCheckpoint = this.checkpoint;
this.checkpoint = null;
if (existingCheckpoint != null && existingCheckpoint.records != null) {
recordsToHandle.addAll(existingCheckpoint.records.values());
}
}
resetWriteClaims();
resetReadClaim();
if (recordsToHandle.isEmpty()) {
LOG.trace("{} was rolled back, but no events were performed by this ProcessSession", this);
acknowledgeRecords();
resetState();
return;
}
for (final StandardRepositoryRecord record : recordsToHandle) {
// remove the working claims if they are different than the originals.
removeTemporaryClaim(record);
}
final Set<RepositoryRecord> abortedRecords = new HashSet<>();
final Set<StandardRepositoryRecord> transferRecords = new HashSet<>();
for (final StandardRepositoryRecord record : recordsToHandle) {
if (record.isMarkedForAbort()) {
decrementClaimCount(record.getWorkingClaim());
if (record.getCurrentClaim() != null && !record.getCurrentClaim().equals(record.getWorkingClaim())) {
// if working & original claim are same, don't remove twice; we only want to remove the original
// if it's different from the working. Otherwise, we remove two claimant counts. This causes
// an issue if we only updated the flowfile attributes.
decrementClaimCount(record.getCurrentClaim());
}
abortedRecords.add(record);
} else {
transferRecords.add(record);
}
}
// Put the FlowFiles that are not marked for abort back to their original queues
for (final StandardRepositoryRecord record : transferRecords) {
if (record.getOriginal() != null) {
final FlowFileQueue originalQueue = record.getOriginalQueue();
if (originalQueue != null) {
if (penalize) {
final long expirationEpochMillis = System.currentTimeMillis() + context.getConnectable().getPenalizationPeriod(TimeUnit.MILLISECONDS);
final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getOriginal()).penaltyExpirationTime(expirationEpochMillis).build();
originalQueue.put(newFile);
} else {
originalQueue.put(record.getOriginal());
}
}
}
}
if (!abortedRecords.isEmpty()) {
try {
context.getFlowFileRepository().updateRepository(abortedRecords);
} catch (final IOException ioe) {
LOG.error("Unable to update FlowFile repository for aborted records due to {}", ioe.toString());
if (LOG.isDebugEnabled()) {
LOG.error("", ioe);
}
}
}
// If we have transient claims that need to be cleaned up, do so.
final List<ContentClaim> transientClaims = recordsToHandle.stream().flatMap(record -> record.getTransientClaims().stream()).collect(Collectors.toList());
if (!transientClaims.isEmpty()) {
final RepositoryRecord repoRecord = new TransientClaimRepositoryRecord(transientClaims);
try {
context.getFlowFileRepository().updateRepository(Collections.singletonList(repoRecord));
} catch (final IOException ioe) {
LOG.error("Unable to update FlowFile repository to cleanup transient claims due to {}", ioe.toString());
if (LOG.isDebugEnabled()) {
LOG.error("", ioe);
}
}
}
final Connectable connectable = context.getConnectable();
final StandardFlowFileEvent flowFileEvent = new StandardFlowFileEvent(connectable.getIdentifier());
flowFileEvent.setBytesRead(bytesRead);
flowFileEvent.setBytesWritten(bytesWritten);
flowFileEvent.setCounters(immediateCounters);
// update event repository
try {
context.getFlowFileEventRepository().updateRepository(flowFileEvent);
} catch (final Exception e) {
LOG.error("Failed to update FlowFileEvent Repository due to " + e);
if (LOG.isDebugEnabled()) {
LOG.error("", e);
}
}
acknowledgeRecords();
resetState();
}
use of org.apache.nifi.controller.repository.metrics.StandardFlowFileEvent in project nifi by apache.
the class StandardProcessSession method updateEventRepository.
private void updateEventRepository(final Checkpoint checkpoint) {
int flowFilesReceived = 0;
int flowFilesSent = 0;
long bytesReceived = 0L;
long bytesSent = 0L;
for (final ProvenanceEventRecord event : checkpoint.reportedEvents) {
if (isSpuriousForkEvent(event, checkpoint.removedFlowFiles)) {
continue;
}
switch(event.getEventType()) {
case SEND:
flowFilesSent++;
bytesSent += event.getFileSize();
break;
case RECEIVE:
case FETCH:
flowFilesReceived++;
bytesReceived += event.getFileSize();
break;
default:
break;
}
}
try {
// update event repository
final Connectable connectable = context.getConnectable();
final StandardFlowFileEvent flowFileEvent = new StandardFlowFileEvent(connectable.getIdentifier());
flowFileEvent.setBytesRead(checkpoint.bytesRead);
flowFileEvent.setBytesWritten(checkpoint.bytesWritten);
flowFileEvent.setContentSizeIn(checkpoint.contentSizeIn);
flowFileEvent.setContentSizeOut(checkpoint.contentSizeOut);
flowFileEvent.setContentSizeRemoved(checkpoint.removedBytes);
flowFileEvent.setFlowFilesIn(checkpoint.flowFilesIn);
flowFileEvent.setFlowFilesOut(checkpoint.flowFilesOut);
flowFileEvent.setFlowFilesRemoved(checkpoint.removedCount);
flowFileEvent.setFlowFilesReceived(flowFilesReceived);
flowFileEvent.setBytesReceived(bytesReceived);
flowFileEvent.setFlowFilesSent(flowFilesSent);
flowFileEvent.setBytesSent(bytesSent);
long lineageMillis = 0L;
for (final Map.Entry<FlowFileRecord, StandardRepositoryRecord> entry : checkpoint.records.entrySet()) {
final FlowFile flowFile = entry.getKey();
final long lineageDuration = System.currentTimeMillis() - flowFile.getLineageStartDate();
lineageMillis += lineageDuration;
}
flowFileEvent.setAggregateLineageMillis(lineageMillis);
final Map<String, Long> counters = combineCounters(checkpoint.countersOnCommit, checkpoint.immediateCounters);
flowFileEvent.setCounters(counters);
context.getFlowFileEventRepository().updateRepository(flowFileEvent);
for (final FlowFileEvent connectionEvent : checkpoint.connectionCounts.values()) {
context.getFlowFileEventRepository().updateRepository(connectionEvent);
}
} catch (final IOException ioe) {
LOG.error("FlowFile Event Repository failed to update", ioe);
}
}
use of org.apache.nifi.controller.repository.metrics.StandardFlowFileEvent in project nifi by apache.
the class ConnectableTask method invoke.
public InvocationResult invoke() {
if (scheduleState.isTerminated()) {
return InvocationResult.DO_NOT_YIELD;
}
// make sure processor is not yielded
if (isYielded()) {
return InvocationResult.DO_NOT_YIELD;
}
// make sure that either we're not clustered or this processor runs on all nodes or that this is the primary node
if (!isRunOnCluster(flowController)) {
return InvocationResult.DO_NOT_YIELD;
}
// * All incoming connections are self-loops
if (!isWorkToDo()) {
return InvocationResult.yield("No work to do");
}
if (numRelationships > 0) {
final int requiredNumberOfAvailableRelationships = connectable.isTriggerWhenAnyDestinationAvailable() ? 1 : numRelationships;
if (!repositoryContext.isRelationshipAvailabilitySatisfied(requiredNumberOfAvailableRelationships)) {
return InvocationResult.yield("Backpressure Applied");
}
}
final long batchNanos = connectable.getRunDuration(TimeUnit.NANOSECONDS);
final ProcessSessionFactory sessionFactory;
final StandardProcessSession rawSession;
final boolean batch;
if (connectable.isSessionBatchingSupported() && batchNanos > 0L) {
rawSession = new StandardProcessSession(repositoryContext, scheduleState::isTerminated);
sessionFactory = new BatchingSessionFactory(rawSession);
batch = true;
} else {
rawSession = null;
sessionFactory = new StandardProcessSessionFactory(repositoryContext, scheduleState::isTerminated);
batch = false;
}
final ActiveProcessSessionFactory activeSessionFactory = new WeakHashMapProcessSessionFactory(sessionFactory);
scheduleState.incrementActiveThreadCount(activeSessionFactory);
final long startNanos = System.nanoTime();
final long finishIfBackpressureEngaged = startNanos + (batchNanos / 25L);
final long finishNanos = startNanos + batchNanos;
int invocationCount = 0;
final String originalThreadName = Thread.currentThread().getName();
try {
try (final AutoCloseable ncl = NarCloseable.withComponentNarLoader(connectable.getRunnableComponent().getClass(), connectable.getIdentifier())) {
boolean shouldRun = connectable.getScheduledState() == ScheduledState.RUNNING;
while (shouldRun) {
connectable.onTrigger(processContext, activeSessionFactory);
invocationCount++;
if (!batch) {
return InvocationResult.DO_NOT_YIELD;
}
final long nanoTime = System.nanoTime();
if (nanoTime > finishNanos) {
return InvocationResult.DO_NOT_YIELD;
}
if (nanoTime > finishIfBackpressureEngaged && isBackPressureEngaged()) {
return InvocationResult.DO_NOT_YIELD;
}
if (connectable.getScheduledState() != ScheduledState.RUNNING) {
break;
}
if (!isWorkToDo()) {
break;
}
if (isYielded()) {
break;
}
if (numRelationships > 0) {
final int requiredNumberOfAvailableRelationships = connectable.isTriggerWhenAnyDestinationAvailable() ? 1 : numRelationships;
shouldRun = repositoryContext.isRelationshipAvailabilitySatisfied(requiredNumberOfAvailableRelationships);
}
}
} catch (final TerminatedTaskException tte) {
final ComponentLog procLog = new SimpleProcessLogger(connectable.getIdentifier(), connectable.getRunnableComponent());
procLog.info("Failed to process session due to task being terminated", new Object[] { tte });
} catch (final ProcessException pe) {
final ComponentLog procLog = new SimpleProcessLogger(connectable.getIdentifier(), connectable.getRunnableComponent());
procLog.error("Failed to process session due to {}", new Object[] { pe });
} catch (final Throwable t) {
// Use ComponentLog to log the event so that a bulletin will be created for this processor
final ComponentLog procLog = new SimpleProcessLogger(connectable.getIdentifier(), connectable.getRunnableComponent());
procLog.error("{} failed to process session due to {}; Processor Administratively Yielded for {}", new Object[] { connectable.getRunnableComponent(), t, schedulingAgent.getAdministrativeYieldDuration() }, t);
logger.warn("Administratively Yielding {} due to uncaught Exception: {}", connectable.getRunnableComponent(), t.toString(), t);
connectable.yield(schedulingAgent.getAdministrativeYieldDuration(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
}
} finally {
try {
if (batch) {
try {
rawSession.commit();
} catch (final Exception e) {
final ComponentLog procLog = new SimpleProcessLogger(connectable.getIdentifier(), connectable.getRunnableComponent());
procLog.error("Failed to commit session {} due to {}; rolling back", new Object[] { rawSession, e.toString() }, e);
try {
rawSession.rollback(true);
} catch (final Exception e1) {
procLog.error("Failed to roll back session {} due to {}", new Object[] { rawSession, e.toString() }, e);
}
}
}
final long processingNanos = System.nanoTime() - startNanos;
try {
final StandardFlowFileEvent procEvent = new StandardFlowFileEvent(connectable.getIdentifier());
procEvent.setProcessingNanos(processingNanos);
procEvent.setInvocations(invocationCount);
repositoryContext.getFlowFileEventRepository().updateRepository(procEvent);
} catch (final IOException e) {
logger.error("Unable to update FlowFileEvent Repository for {}; statistics may be inaccurate. Reason for failure: {}", connectable.getRunnableComponent(), e.toString());
logger.error("", e);
}
} finally {
scheduleState.decrementActiveThreadCount(activeSessionFactory);
Thread.currentThread().setName(originalThreadName);
}
}
return InvocationResult.DO_NOT_YIELD;
}
Aggregations