use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class TestStandardProcessSession method createConnection.
private Connection createConnection() {
AtomicReference<FlowFileQueue> queueReference = new AtomicReference<>(flowFileQueue);
Connection connection = createConnection(queueReference);
flowFileQueue = queueReference.get();
return connection;
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class StandardProcessGroup method updateConnection.
private void updateConnection(final Connection connection, final VersionedConnection proposed) {
connection.setBendPoints(proposed.getBends() == null ? Collections.emptyList() : proposed.getBends().stream().map(pos -> new Position(pos.getX(), pos.getY())).collect(Collectors.toList()));
connection.setDestination(getConnectable(connection.getProcessGroup(), proposed.getDestination()));
connection.setLabelIndex(proposed.getLabelIndex());
connection.setName(proposed.getName());
connection.setRelationships(proposed.getSelectedRelationships().stream().map(name -> new Relationship.Builder().name(name).build()).collect(Collectors.toSet()));
connection.setZIndex(proposed.getzIndex());
final FlowFileQueue queue = connection.getFlowFileQueue();
queue.setBackPressureDataSizeThreshold(proposed.getBackPressureDataSizeThreshold());
queue.setBackPressureObjectThreshold(proposed.getBackPressureObjectThreshold());
queue.setFlowFileExpiration(proposed.getFlowFileExpiration());
final List<FlowFilePrioritizer> prioritizers = proposed.getPrioritizers() == null ? Collections.emptyList() : proposed.getPrioritizers().stream().map(prioritizerName -> {
try {
return flowController.createPrioritizer(prioritizerName);
} catch (final Exception e) {
throw new IllegalStateException("Failed to create Prioritizer of type " + prioritizerName + " for Connection with ID " + connection.getIdentifier());
}
}).collect(Collectors.toList());
queue.setPriorities(prioritizers);
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class TestHttpFlowFileServerProtocol method testPortDestinationFull.
@Test
public void testPortDestinationFull() throws Exception {
final HttpFlowFileServerProtocol serverProtocol = getDefaultHttpFlowFileServerProtocol();
final Peer peer = getDefaultPeer();
((HttpServerCommunicationsSession) peer.getCommunicationsSession()).putHandshakeParam(HandshakeProperty.PORT_IDENTIFIER, "port-identifier");
final ProcessGroup processGroup = mock(ProcessGroup.class);
final RootGroupPort port = mock(RootGroupPort.class);
final PortAuthorizationResult authResult = mock(PortAuthorizationResult.class);
doReturn(true).when(processGroup).isRootGroup();
doReturn(port).when(processGroup).getOutputPort("port-identifier");
doReturn(authResult).when(port).checkUserAuthorization(any(String.class));
doReturn(true).when(authResult).isAuthorized();
doReturn(true).when(port).isValid();
doReturn(true).when(port).isRunning();
final Set<Connection> connections = new HashSet<>();
final Connection connection = mock(Connection.class);
connections.add(connection);
doReturn(connections).when(port).getConnections();
final FlowFileQueue flowFileQueue = mock(FlowFileQueue.class);
doReturn(flowFileQueue).when(connection).getFlowFileQueue();
doReturn(true).when(flowFileQueue).isFull();
serverProtocol.setRootProcessGroup(processGroup);
try {
serverProtocol.handshake(peer);
fail();
} catch (final HandshakeException e) {
assertEquals(ResponseCode.PORTS_DESTINATION_FULL, e.getResponseCode());
}
assertFalse(serverProtocol.isHandshakeSuccessful());
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class StandardProcessSession method enrich.
private StandardProvenanceEventRecord enrich(final ProvenanceEventRecord rawEvent, final Map<String, FlowFileRecord> flowFileRecordMap, final Map<FlowFileRecord, StandardRepositoryRecord> records, final boolean updateAttributes) {
final StandardProvenanceEventRecord.Builder recordBuilder = new StandardProvenanceEventRecord.Builder().fromEvent(rawEvent);
final FlowFileRecord eventFlowFile = flowFileRecordMap.get(rawEvent.getFlowFileUuid());
if (eventFlowFile != null) {
final StandardRepositoryRecord repoRecord = records.get(eventFlowFile);
if (repoRecord.getCurrent() != null && repoRecord.getCurrentClaim() != null) {
final ContentClaim currentClaim = repoRecord.getCurrentClaim();
final long currentOffset = repoRecord.getCurrentClaimOffset();
final long size = eventFlowFile.getSize();
final ResourceClaim resourceClaim = currentClaim.getResourceClaim();
recordBuilder.setCurrentContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), currentOffset + currentClaim.getOffset(), size);
}
if (repoRecord.getOriginal() != null && repoRecord.getOriginalClaim() != null) {
final ContentClaim originalClaim = repoRecord.getOriginalClaim();
final long originalOffset = repoRecord.getOriginal().getContentClaimOffset();
final long originalSize = repoRecord.getOriginal().getSize();
final ResourceClaim resourceClaim = originalClaim.getResourceClaim();
recordBuilder.setPreviousContentClaim(resourceClaim.getContainer(), resourceClaim.getSection(), resourceClaim.getId(), originalOffset + originalClaim.getOffset(), originalSize);
}
final FlowFileQueue originalQueue = repoRecord.getOriginalQueue();
if (originalQueue != null) {
recordBuilder.setSourceQueueIdentifier(originalQueue.getIdentifier());
}
}
if (updateAttributes) {
final FlowFileRecord flowFileRecord = flowFileRecordMap.get(rawEvent.getFlowFileUuid());
if (flowFileRecord != null) {
final StandardRepositoryRecord record = records.get(flowFileRecord);
if (record != null) {
recordBuilder.setAttributes(record.getOriginalAttributes(), record.getUpdatedAttributes());
}
}
}
return recordBuilder.build();
}
use of org.apache.nifi.controller.queue.FlowFileQueue in project nifi by apache.
the class StandardProcessSession method rollback.
private void rollback(final boolean penalize, final boolean rollbackCheckpoint) {
if (LOG.isDebugEnabled()) {
LOG.debug("{} session rollback called, FlowFile records are {} {}", this, loggableFlowfileInfo(), new Throwable("Stack Trace on rollback"));
}
deleteOnCommit.clear();
closeStreams(openInputStreams, "rolled back", "input");
closeStreams(openOutputStreams, "rolled back", "output");
try {
claimCache.reset();
} catch (IOException e1) {
LOG.warn("{} Attempted to close Output Stream for {} due to session rollback but close failed", this, this.connectableDescription, e1);
}
final Set<StandardRepositoryRecord> recordsToHandle = new HashSet<>();
recordsToHandle.addAll(records.values());
if (rollbackCheckpoint) {
final Checkpoint existingCheckpoint = this.checkpoint;
this.checkpoint = null;
if (existingCheckpoint != null && existingCheckpoint.records != null) {
recordsToHandle.addAll(existingCheckpoint.records.values());
}
}
resetWriteClaims();
resetReadClaim();
if (recordsToHandle.isEmpty()) {
LOG.trace("{} was rolled back, but no events were performed by this ProcessSession", this);
acknowledgeRecords();
resetState();
return;
}
for (final StandardRepositoryRecord record : recordsToHandle) {
// remove the working claims if they are different than the originals.
removeTemporaryClaim(record);
}
final Set<RepositoryRecord> abortedRecords = new HashSet<>();
final Set<StandardRepositoryRecord> transferRecords = new HashSet<>();
for (final StandardRepositoryRecord record : recordsToHandle) {
if (record.isMarkedForAbort()) {
decrementClaimCount(record.getWorkingClaim());
if (record.getCurrentClaim() != null && !record.getCurrentClaim().equals(record.getWorkingClaim())) {
// if working & original claim are same, don't remove twice; we only want to remove the original
// if it's different from the working. Otherwise, we remove two claimant counts. This causes
// an issue if we only updated the flowfile attributes.
decrementClaimCount(record.getCurrentClaim());
}
abortedRecords.add(record);
} else {
transferRecords.add(record);
}
}
// Put the FlowFiles that are not marked for abort back to their original queues
for (final StandardRepositoryRecord record : transferRecords) {
if (record.getOriginal() != null) {
final FlowFileQueue originalQueue = record.getOriginalQueue();
if (originalQueue != null) {
if (penalize) {
final long expirationEpochMillis = System.currentTimeMillis() + context.getConnectable().getPenalizationPeriod(TimeUnit.MILLISECONDS);
final FlowFileRecord newFile = new StandardFlowFileRecord.Builder().fromFlowFile(record.getOriginal()).penaltyExpirationTime(expirationEpochMillis).build();
originalQueue.put(newFile);
} else {
originalQueue.put(record.getOriginal());
}
}
}
}
if (!abortedRecords.isEmpty()) {
try {
context.getFlowFileRepository().updateRepository(abortedRecords);
} catch (final IOException ioe) {
LOG.error("Unable to update FlowFile repository for aborted records due to {}", ioe.toString());
if (LOG.isDebugEnabled()) {
LOG.error("", ioe);
}
}
}
// If we have transient claims that need to be cleaned up, do so.
final List<ContentClaim> transientClaims = recordsToHandle.stream().flatMap(record -> record.getTransientClaims().stream()).collect(Collectors.toList());
if (!transientClaims.isEmpty()) {
final RepositoryRecord repoRecord = new TransientClaimRepositoryRecord(transientClaims);
try {
context.getFlowFileRepository().updateRepository(Collections.singletonList(repoRecord));
} catch (final IOException ioe) {
LOG.error("Unable to update FlowFile repository to cleanup transient claims due to {}", ioe.toString());
if (LOG.isDebugEnabled()) {
LOG.error("", ioe);
}
}
}
final Connectable connectable = context.getConnectable();
final StandardFlowFileEvent flowFileEvent = new StandardFlowFileEvent(connectable.getIdentifier());
flowFileEvent.setBytesRead(bytesRead);
flowFileEvent.setBytesWritten(bytesWritten);
flowFileEvent.setCounters(immediateCounters);
// update event repository
try {
context.getFlowFileEventRepository().updateRepository(flowFileEvent);
} catch (final Exception e) {
LOG.error("Failed to update FlowFileEvent Repository due to " + e);
if (LOG.isDebugEnabled()) {
LOG.error("", e);
}
}
acknowledgeRecords();
resetState();
}
Aggregations