use of org.apache.nifi.controller.repository.claim.ContentClaim in project nifi by apache.
the class StandardProcessSession method rollback.
private void rollback(final boolean penalize, final boolean rollbackCheckpoint) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} session rollback called, FlowFile records are {} {}", this, loggableFlowfileInfo(), new Throwable("Stack Trace on rollback"));
}
deleteOnCommit.clear();
closeStreams(openInputStreams, "rolled back", "input");
closeStreams(openOutputStreams, "rolled back", "output");
try {
claimCache.reset();
} catch (IOException e1) {
LOG.warn("{} Attempted to close Output Stream for {} due to session rollback but close failed", this, this.connectableDescription, e1);
}
final Set<StandardRepositoryRecord> recordsToHandle = new HashSet<>();
recordsToHandle.addAll(records.values());
if (rollbackCheckpoint) {
final Checkpoint existingCheckpoint = this.checkpoint;
this.checkpoint = null;
if (existingCheckpoint != null && existingCheckpoint.records != null) {
recordsToHandle.addAll(existingCheckpoint.records.values());
}
}
resetWriteClaims();
resetReadClaim();
if (recordsToHandle.isEmpty()) {
LOG.trace("{} was rolled back, but no events were performed by this ProcessSession", this);
acknowledgeRecords();
resetState();
return;
}
for (final StandardRepositoryRecord record : recordsToHandle) {
// remove the working claims if they are different than the originals.
removeTemporaryClaim(record);
}
final Set<RepositoryRecord> abortedRecords = new HashSet<>();
final Set<StandardRepositoryRecord> transferRecords = new HashSet<>();
for (final StandardRepositoryRecord record : recordsToHandle) {
if (record.isMarkedForAbort()) {
decrementClaimCount(record.getWorkingClaim());
if (record.getCurrentClaim() != null && !record.getCurrentClaim().equals(record.getWorkingClaim())) {
// if working & original claim are same, don't remove twice; we only want to remove the original
// if it's different from the working. Otherwise, we remove two claimant counts. This causes
// an issue if we only updated the flowfile attributes.
decrementClaimCount(record.getCurrentClaim());
}
abortedRecords.add(record);
} else {
transferRecords.add(record);
}
}
// Put the FlowFiles that are not marked for abort back to their original queues
for (final StandardRepositoryRecord record : transferRecords) {
if (record.getOriginal() != null) {
final FlowFileQueue originalQueue = record.getOriginalQueue();
if (originalQueue != null) {
if (penalize) {
final long expirationEpochMillis = System.currentTimeMillis() + context.getConnectable().getPenalizationPeriod(TimeUnit.MILLISECONDS);
final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getOriginal()).penaltyExpirationTime(expirationEpochMillis).build();
originalQueue.put(newFile);
} else {
originalQueue.put(record.getOriginal());
}
}
}
}
if (!abortedRecords.isEmpty()) {
try {
context.getFlowFileRepository().updateRepository(abortedRecords);
} catch (final IOException ioe) {
LOG.error("Unable to update FlowFile repository for aborted records due to {}", ioe.toString());
if (LOG.isDebugEnabled()) {
LOG.error("", ioe);
}
}
}
// If we have transient claims that need to be cleaned up, do so.
final List<ContentClaim> transientClaims = recordsToHandle.stream().flatMap(record -> record.getTransientClaims().stream()).collect(Collectors.toList());
if (!transientClaims.isEmpty()) {
final RepositoryRecord repoRecord = new TransientClaimRepositoryRecord(transientClaims);
try {
context.getFlowFileRepository().updateRepository(Collections.singletonList(repoRecord));
} catch (final IOException ioe) {
LOG.error("Unable to update FlowFile repository to cleanup transient claims due to {}", ioe.toString());
if (LOG.isDebugEnabled()) {
LOG.error("", ioe);
}
}
}
final Connectable connectable = context.getConnectable();
final StandardFlowFileEvent flowFileEvent = new StandardFlowFileEvent(connectable.getIdentifier());
flowFileEvent.setBytesRead(bytesRead);
flowFileEvent.setBytesWritten(bytesWritten);
flowFileEvent.setCounters(immediateCounters);
// update event repository
try {
context.getFlowFileEventRepository().updateRepository(flowFileEvent);
} catch (final Exception e) {
LOG.error("Failed to update FlowFileEvent Repository due to " + e);
if (LOG.isDebugEnabled()) {
LOG.error("", e);
}
}
acknowledgeRecords();
resetState();
}
use of org.apache.nifi.controller.repository.claim.ContentClaim in project nifi by apache.
the class StandardProcessSession method checkpoint.
public void checkpoint() {
verifyTaskActive();
resetWriteClaims(false);
closeStreams(openInputStreams, "committed", "input");
closeStreams(openOutputStreams, "committed", "output");
if (!readRecursionSet.isEmpty()) {
throw new IllegalStateException();
}
if (!writeRecursionSet.isEmpty()) {
throw new IllegalStateException();
}
if (this.checkpoint == null) {
this.checkpoint = new Checkpoint();
}
if (records.isEmpty()) {
LOG.trace("{} checkpointed, but no events were performed by this ProcessSession", this);
return;
}
// any drop event that is the result of an auto-terminate should happen at the very end, so we keep the
// records in a separate List so that they can be persisted to the Provenance Repo after all of the
// Processor-reported events.
List<ProvenanceEventRecord> autoTerminatedEvents = null;
// validate that all records have a transfer relationship for them and if so determine the destination node and clone as necessary
final Map<FlowFileRecord, StandardRepositoryRecord> toAdd = new HashMap<>();
for (final StandardRepositoryRecord record : records.values()) {
if (record.isMarkedForDelete()) {
continue;
}
final Relationship relationship = record.getTransferRelationship();
if (relationship == null) {
rollback();
throw new FlowFileHandlingException(record.getCurrent() + " transfer relationship not specified");
}
final List<Connection> destinations = new ArrayList<>(context.getConnections(relationship));
if (destinations.isEmpty() && !context.getConnectable().isAutoTerminated(relationship)) {
if (relationship != Relationship.SELF) {
rollback();
throw new FlowFileHandlingException(relationship + " does not have any destinations for " + context.getConnectable());
}
}
if (destinations.isEmpty() && relationship == Relationship.SELF) {
record.setDestination(record.getOriginalQueue());
} else if (destinations.isEmpty()) {
record.markForDelete();
if (autoTerminatedEvents == null) {
autoTerminatedEvents = new ArrayList<>();
}
final ProvenanceEventRecord dropEvent;
try {
dropEvent = provenanceReporter.generateDropEvent(record.getCurrent(), "Auto-Terminated by " + relationship.getName() + " Relationship");
autoTerminatedEvents.add(dropEvent);
} catch (final Exception e) {
LOG.warn("Unable to generate Provenance Event for {} on behalf of {} due to {}", record.getCurrent(), connectableDescription, e);
if (LOG.isDebugEnabled()) {
LOG.warn("", e);
}
}
} else {
// remove last element
final Connection finalDestination = destinations.remove(destinations.size() - 1);
record.setDestination(finalDestination.getFlowFileQueue());
incrementConnectionInputCounts(finalDestination, record);
for (final Connection destination : destinations) {
// iterate over remaining destinations and "clone" as needed
incrementConnectionInputCounts(destination, record);
final FlowFileRecord currRec = record.getCurrent();
final StandardFlowFileRecord.Builder builder = new StandardFlowFileRecord.Builder().fromFlowFile(currRec);
builder.id(context.getNextFlowFileSequence());
final String newUuid = UUID.randomUUID().toString();
builder.addAttribute(CoreAttributes.UUID.key(), newUuid);
final FlowFileRecord clone = builder.build();
final StandardRepositoryRecord newRecord = new StandardRepositoryRecord(destination.getFlowFileQueue());
provenanceReporter.clone(currRec, clone, false);
final ContentClaim claim = clone.getContentClaim();
if (claim != null) {
context.getContentRepository().incrementClaimaintCount(claim);
}
newRecord.setWorking(clone, Collections.<String, String>emptyMap());
newRecord.setDestination(destination.getFlowFileQueue());
newRecord.setTransferRelationship(record.getTransferRelationship());
// put the mapping into toAdd because adding to records now will cause a ConcurrentModificationException
toAdd.put(clone, newRecord);
}
}
}
records.putAll(toAdd);
toAdd.clear();
checkpoint.checkpoint(this, autoTerminatedEvents);
resetState();
}
use of org.apache.nifi.controller.repository.claim.ContentClaim in project nifi by apache.
the class StandardProcessSession method write.
@Override
public FlowFile write(FlowFile source, final OutputStreamCallback writer) {
verifyTaskActive();
source = validateRecordState(source);
final StandardRepositoryRecord record = records.get(source);
long writtenToFlowFile = 0L;
ContentClaim newClaim = null;
try {
newClaim = claimCache.getContentClaim();
claimLog.debug("Creating ContentClaim {} for 'write' for {}", newClaim, source);
ensureNotAppending(newClaim);
try (final OutputStream stream = claimCache.write(newClaim);
final OutputStream disableOnClose = new DisableOnCloseOutputStream(stream);
final ByteCountingOutputStream countingOut = new ByteCountingOutputStream(disableOnClose)) {
try {
writeRecursionSet.add(source);
final OutputStream ffaos = new FlowFileAccessOutputStream(countingOut, source);
writer.process(createTaskTerminationStream(ffaos));
} finally {
writtenToFlowFile = countingOut.getBytesWritten();
bytesWritten += countingOut.getBytesWritten();
}
} finally {
writeRecursionSet.remove(source);
}
} catch (final ContentNotFoundException nfe) {
// need to reset write claim before we can remove the claim
resetWriteClaims();
destroyContent(newClaim);
handleContentNotFound(nfe, record);
} catch (final FlowFileAccessException ffae) {
// need to reset write claim before we can remove the claim
resetWriteClaims();
destroyContent(newClaim);
throw ffae;
} catch (final IOException ioe) {
// need to reset write claim before we can remove the claim
resetWriteClaims();
destroyContent(newClaim);
throw new ProcessException("IOException thrown from " + connectableDescription + ": " + ioe.toString(), ioe);
} catch (final Throwable t) {
// need to reset write claim before we can remove the claim
resetWriteClaims();
destroyContent(newClaim);
throw t;
}
removeTemporaryClaim(record);
final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getCurrent()).contentClaim(newClaim).contentClaimOffset(Math.max(0, newClaim.getLength() - writtenToFlowFile)).size(writtenToFlowFile).build();
record.setWorking(newFile);
return newFile;
}
use of org.apache.nifi.controller.repository.claim.ContentClaim in project nifi by apache.
the class StandardProcessSession method clone.
@Override
public FlowFile clone(FlowFile example, final long offset, final long size) {
verifyTaskActive();
example = validateRecordState(example);
final StandardRepositoryRecord exampleRepoRecord = records.get(example);
final FlowFileRecord currRec = exampleRepoRecord.getCurrent();
final ContentClaim claim = exampleRepoRecord.getCurrentClaim();
if (offset + size > example.getSize()) {
throw new FlowFileHandlingException("Specified offset of " + offset + " and size " + size + " exceeds size of " + example.toString());
}
final StandardFlowFileRecord.Builder builder = new StandardFlowFileRecord.Builder().fromFlowFile(currRec);
builder.id(context.getNextFlowFileSequence());
builder.contentClaimOffset(currRec.getContentClaimOffset() + offset);
builder.size(size);
final String newUuid = UUID.randomUUID().toString();
builder.addAttribute(CoreAttributes.UUID.key(), newUuid);
final FlowFileRecord clone = builder.build();
if (claim != null) {
context.getContentRepository().incrementClaimaintCount(claim);
}
final StandardRepositoryRecord record = new StandardRepositoryRecord(null);
record.setWorking(clone, clone.getAttributes());
records.put(clone, record);
if (offset == 0L && size == example.getSize()) {
provenanceReporter.clone(example, clone);
} else {
registerForkEvent(example, clone);
}
return clone;
}
use of org.apache.nifi.controller.repository.claim.ContentClaim in project nifi by apache.
the class VolatileContentRepository method create.
@Override
public ContentClaim create(boolean lossTolerant) throws IOException {
if (lossTolerant) {
return createLossTolerant();
} else {
final ContentRepository backupRepo = getBackupRepository();
if (backupRepo == null) {
// is not a backup repository
return createLossTolerant();
}
final ContentClaim backupClaim = backupRepo.create(lossTolerant);
backupRepoClaimMap.put(backupClaim, backupClaim);
return backupClaim;
}
}
Aggregations