use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class PutHiveStreaming method onTrigger.
private void onTrigger(ProcessContext context, ProcessSession session, FunctionContext functionContext) throws ProcessException {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final String dbName = context.getProperty(DB_NAME).evaluateAttributeExpressions(flowFile).getValue();
final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(flowFile).getValue();
// Only allow one thread to work on a DB/table at a time
final Semaphore newSemaphore = new Semaphore(1);
Semaphore semaphore = tableSemaphoreMap.putIfAbsent(dbName + "." + tableName, newSemaphore);
if (semaphore == null) {
semaphore = newSemaphore;
}
boolean gotSemaphore = false;
try {
gotSemaphore = semaphore.tryAcquire(0, TimeUnit.SECONDS);
} catch (InterruptedException ie) {
// Nothing to do, gotSemaphore defaults to false
}
if (!gotSemaphore) {
// We didn't get a chance to acquire, so rollback the session and try again next time
session.rollback();
return;
}
final ComponentLog log = getLogger();
final String metastoreUri = context.getProperty(METASTORE_URI).evaluateAttributeExpressions(flowFile).getValue();
final boolean autoCreatePartitions = context.getProperty(AUTOCREATE_PARTITIONS).asBoolean();
final Integer maxConnections = context.getProperty(MAX_OPEN_CONNECTIONS).asInteger();
final Integer heartbeatInterval = context.getProperty(HEARTBEAT_INTERVAL).evaluateAttributeExpressions().asInteger();
final Integer txnsPerBatch = context.getProperty(TXNS_PER_BATCH).evaluateAttributeExpressions(flowFile).asInteger();
final Integer recordsPerTxn = context.getProperty(RECORDS_PER_TXN).evaluateAttributeExpressions(flowFile).asInteger();
final Map<HiveEndPoint, HiveWriter> myWriters = new ConcurrentHashMap<>();
threadWriterList.add(myWriters);
HiveOptions o = new HiveOptions(metastoreUri, dbName, tableName).withTxnsPerBatch(txnsPerBatch).withAutoCreatePartitions(autoCreatePartitions).withMaxOpenConnections(maxConnections).withHeartBeatInterval(heartbeatInterval).withCallTimeout(callTimeout);
if (SecurityUtil.isSecurityEnabled(hiveConfig)) {
final String explicitPrincipal = context.getProperty(kerberosProperties.getKerberosPrincipal()).evaluateAttributeExpressions().getValue();
final String explicitKeytab = context.getProperty(kerberosProperties.getKerberosKeytab()).evaluateAttributeExpressions().getValue();
final KerberosCredentialsService credentialsService = context.getProperty(KERBEROS_CREDENTIALS_SERVICE).asControllerService(KerberosCredentialsService.class);
final String resolvedPrincipal;
final String resolvedKeytab;
if (credentialsService == null) {
resolvedPrincipal = explicitPrincipal;
resolvedKeytab = explicitKeytab;
} else {
resolvedPrincipal = credentialsService.getPrincipal();
resolvedKeytab = credentialsService.getKeytab();
}
o = o.withKerberosPrincipal(resolvedPrincipal).withKerberosKeytab(resolvedKeytab);
}
final HiveOptions options = o;
// Store the original class loader, then explicitly set it to this class's classloader (for use by the Hive Metastore)
ClassLoader originalClassloader = Thread.currentThread().getContextClassLoader();
Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader());
final List<String> partitionColumnList;
final String partitionColumns = context.getProperty(PARTITION_COLUMNS).evaluateAttributeExpressions().getValue();
if (partitionColumns == null || partitionColumns.isEmpty()) {
partitionColumnList = Collections.emptyList();
} else {
String[] partitionCols = partitionColumns.split(",");
partitionColumnList = new ArrayList<>(partitionCols.length);
for (String col : partitionCols) {
partitionColumnList.add(col.trim());
}
}
final AtomicReference<List<HiveStreamingRecord>> successfulRecords = new AtomicReference<>();
successfulRecords.set(new ArrayList<>());
final FlowFile inputFlowFile = flowFile;
final RoutingResult result = new RoutingResult();
final ExceptionHandler<FunctionContext> exceptionHandler = new ExceptionHandler<>();
exceptionHandler.mapException(s -> {
try {
if (s == null) {
return ErrorTypes.PersistentFailure;
}
throw s;
} catch (IllegalArgumentException | HiveWriter.WriteFailure | SerializationError inputError) {
return ErrorTypes.InvalidInput;
} catch (HiveWriter.CommitFailure | HiveWriter.TxnBatchFailure | HiveWriter.TxnFailure writerTxError) {
return ErrorTypes.TemporalInputFailure;
} catch (ConnectionError | HiveWriter.ConnectFailure connectionError) {
// Can't connect to Hive endpoint.
log.error("Error connecting to Hive endpoint: table {} at {}", new Object[] { options.getTableName(), options.getMetaStoreURI() });
return ErrorTypes.TemporalFailure;
} catch (IOException | InterruptedException tempError) {
return ErrorTypes.TemporalFailure;
} catch (Exception t) {
return ErrorTypes.UnknownFailure;
}
});
final BiFunction<FunctionContext, ErrorTypes, ErrorTypes.Result> adjustError = RollbackOnFailure.createAdjustError(getLogger());
exceptionHandler.adjustError(adjustError);
// Create output flow files and their Avro writers
functionContext.setFlowFiles(session.create(inputFlowFile), session.create(inputFlowFile));
try {
session.read(inputFlowFile, new InputStreamCallback() {
@Override
public void process(InputStream in) throws IOException {
try (final DataFileStream<GenericRecord> reader = new DataFileStream<>(in, new GenericDatumReader<GenericRecord>())) {
GenericRecord currRecord = null;
// Copy codec and schema information to all writers
final String codec = reader.getMetaString(DataFileConstants.CODEC) == null ? DataFileConstants.NULL_CODEC : reader.getMetaString(DataFileConstants.CODEC);
functionContext.initAvroWriters(session, codec, reader);
Runnable flushSuccessfulRecords = () -> {
// Now send the records to the successful FlowFile and update the success count
functionContext.appendRecordsToSuccess(session, successfulRecords.get());
// Clear the list of successful records, we'll use it at the end when we flush whatever records are left
successfulRecords.set(new ArrayList<>());
};
while (reader.hasNext()) {
// We can NOT reuse currRecord here, because currRecord is accumulated in successful records.
// If we use the same GenericRecord instance, every record ends up having the same contents.
// To avoid this, we need to create a brand new GenericRecord instance here each time.
currRecord = reader.next();
functionContext.recordCount.incrementAndGet();
// Extract the partition values (they must be put separately into the Hive Streaming API)
List<String> partitionValues = new ArrayList<>();
if (!exceptionHandler.execute(functionContext, currRecord, input -> {
for (String partition : partitionColumnList) {
Object partitionValue = input.get(partition);
if (partitionValue == null) {
throw new IllegalArgumentException("Partition column '" + partition + "' not found in Avro record");
}
partitionValues.add(partitionValue.toString());
}
}, onRecordError(context, session, myWriters))) {
continue;
}
final HiveStreamingRecord record = new HiveStreamingRecord(partitionValues, currRecord);
final AtomicReference<HiveWriter> hiveWriterRef = new AtomicReference<>();
// Write record to Hive streaming
if (!exceptionHandler.execute(functionContext, record, input -> {
final HiveEndPoint endPoint = makeHiveEndPoint(record.getPartitionValues(), options);
final HiveWriter hiveWriter = getOrCreateWriter(myWriters, options, endPoint);
hiveWriterRef.set(hiveWriter);
hiveWriter.write(record.getRecord().toString().getBytes(StandardCharsets.UTF_8));
successfulRecords.get().add(record);
}, onHiveRecordError(context, session, myWriters))) {
continue;
}
// If we've reached the records-per-transaction limit, flush the Hive Writer and update the Avro Writer for successful records
final HiveWriter hiveWriter = hiveWriterRef.get();
if (hiveWriter.getTotalRecords() >= recordsPerTxn) {
exceptionHandler.execute(functionContext, successfulRecords.get(), input -> {
hiveWriter.flush(true);
// Proceed function context. Process session can't be rollback anymore.
functionContext.proceed();
// Now send the records to the success relationship and update the success count
flushSuccessfulRecords.run();
}, onHiveRecordsError(context, session, myWriters).andThen((fc, input, res, commitException) -> {
// Reset hiveWriter for succeeding records.
switch(res.destination()) {
case Retry:
case Failure:
try {
// Abort current tx and move to next.
hiveWriter.abort();
} catch (Exception e) {
// Can't even abort properly, throw a process exception
throw new ProcessException(e);
}
}
}));
}
}
exceptionHandler.execute(functionContext, successfulRecords.get(), input -> {
// Finish any transactions
flushAllWriters(myWriters, true);
closeAllWriters(myWriters);
// Now send any remaining records to the success relationship and update the count
flushSuccessfulRecords.run();
// Append successfulRecords on failure.
}, onHiveRecordsError(context, session, myWriters));
} catch (IOException ioe) {
// The Avro file is invalid (or may not be an Avro file at all), send it to failure
final ErrorTypes.Result adjusted = adjustError.apply(functionContext, ErrorTypes.InvalidInput);
final String msg = "The incoming flow file can not be read as an Avro file";
switch(adjusted.destination()) {
case Failure:
log.error(msg, ioe);
result.routeTo(inputFlowFile, REL_FAILURE);
break;
case ProcessException:
throw new ProcessException(msg, ioe);
}
}
}
});
// If we got here, we've processed the outgoing flow files correctly, so remove the incoming one if necessary
if (result.getRoutedFlowFiles().values().stream().noneMatch(routed -> routed.contains(inputFlowFile))) {
session.remove(inputFlowFile);
}
} catch (DiscontinuedException e) {
// The input FlowFile processing is discontinued. Keep it in the input queue.
getLogger().warn("Discontinued processing for {} due to {}", new Object[] { flowFile, e }, e);
result.routeTo(flowFile, Relationship.SELF);
} catch (ShouldRetryException e) {
// This exception is already a result of adjusting an error, so simply transfer the FlowFile to retry.
getLogger().error(e.getMessage(), e);
flowFile = session.penalize(flowFile);
result.routeTo(flowFile, REL_RETRY);
} finally {
threadWriterList.remove(myWriters);
functionContext.transferFlowFiles(session, result, options);
// Restore original class loader, might not be necessary but is good practice since the processor task changed it
Thread.currentThread().setContextClassLoader(originalClassloader);
semaphore.release();
}
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class RouteHL7 method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final Charset charset = Charset.forName(context.getProperty(CHARACTER_SET).evaluateAttributeExpressions(flowFile).getValue());
final byte[] buffer = new byte[(int) flowFile.getSize()];
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
StreamUtils.fillBuffer(in, buffer);
}
});
@SuppressWarnings("resource") final HapiContext hapiContext = new DefaultHapiContext();
hapiContext.setValidationContext((ca.uhn.hl7v2.validation.ValidationContext) ValidationContextFactory.noValidation());
final PipeParser parser = hapiContext.getPipeParser();
final String hl7Text = new String(buffer, charset);
final HL7Message message;
try {
final Message hapiMessage = parser.parse(hl7Text);
message = new HapiMessage(hapiMessage);
} catch (final Exception e) {
getLogger().error("Failed to parse {} as HL7 due to {}; routing to failure", new Object[] { flowFile, e });
session.transfer(flowFile, REL_FAILURE);
return;
}
final Set<String> matchingRels = new HashSet<>();
final Map<Relationship, HL7Query> queryMap = queries;
for (final Map.Entry<Relationship, HL7Query> entry : queryMap.entrySet()) {
final Relationship relationship = entry.getKey();
final HL7Query query = entry.getValue();
final QueryResult result = query.evaluate(message);
if (result.isMatch()) {
FlowFile clone = session.clone(flowFile);
clone = session.putAttribute(clone, "RouteHL7.Route", relationship.getName());
session.transfer(clone, relationship);
session.getProvenanceReporter().route(clone, relationship);
matchingRels.add(relationship.getName());
}
}
session.transfer(flowFile, REL_ORIGINAL);
getLogger().info("Routed a copy of {} to {} relationships: {}", new Object[] { flowFile, matchingRels.size(), matchingRels });
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class ScanHBase method finalizeFlowFile.
private void finalizeFlowFile(final ProcessSession session, final HBaseClientService hBaseClientService, FlowFile flowFile, final String tableName, Long rowsPulled, Exception e) {
Relationship rel = REL_SUCCESS;
flowFile = session.putAttribute(flowFile, HBASE_ROWS_COUNT_ATTR, rowsPulled.toString());
final AtomicReference<IOException> ioe = new AtomicReference<>(null);
flowFile = session.append(flowFile, (out) -> {
try {
out.write("]".getBytes());
} catch (IOException ei) {
ioe.set(ei);
}
});
if (e != null || ioe.get() != null) {
flowFile = session.putAttribute(flowFile, "scanhbase.error", (e == null ? e : ioe.get()).toString());
rel = REL_FAILURE;
} else {
session.getProvenanceReporter().receive(flowFile, hBaseClientService.toTransitUri(tableName, "{ids}"));
}
session.transfer(flowFile, rel);
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class StandardProcessGroup method updateProcessGroup.
private void updateProcessGroup(final ProcessGroup group, final VersionedProcessGroup proposed, final String componentIdSeed, final Set<String> updatedVersionedComponentIds, final boolean updatePosition, final boolean updateName, final boolean updateDescendantVersionedGroups, final Set<String> variablesToSkip) throws ProcessorInstantiationException {
group.setComments(proposed.getComments());
if (updateName) {
group.setName(proposed.getName());
}
if (updatePosition && proposed.getPosition() != null) {
group.setPosition(new Position(proposed.getPosition().getX(), proposed.getPosition().getY()));
}
// Determine which variables have been added/removed and add/remove them from this group's variable registry.
// We don't worry about if a variable value has changed, because variables are designed to be 'environment specific.'
// As a result, once imported, we won't update variables to match the remote flow, but we will add any missing variables
// and remove any variables that are no longer part of the remote flow.
final Set<String> existingVariableNames = group.getVariableRegistry().getVariableMap().keySet().stream().map(VariableDescriptor::getName).collect(Collectors.toSet());
final Map<String, String> updatedVariableMap = new HashMap<>();
// If any new variables exist in the proposed flow, add those to the variable registry.
for (final Map.Entry<String, String> entry : proposed.getVariables().entrySet()) {
if (!existingVariableNames.contains(entry.getKey()) && !variablesToSkip.contains(entry.getKey())) {
updatedVariableMap.put(entry.getKey(), entry.getValue());
}
}
group.setVariables(updatedVariableMap);
final VersionedFlowCoordinates remoteCoordinates = proposed.getVersionedFlowCoordinates();
if (remoteCoordinates == null) {
group.disconnectVersionControl(false);
} else {
final String registryId = flowController.getFlowRegistryClient().getFlowRegistryId(remoteCoordinates.getRegistryUrl());
final String bucketId = remoteCoordinates.getBucketId();
final String flowId = remoteCoordinates.getFlowId();
final int version = remoteCoordinates.getVersion();
final FlowRegistry flowRegistry = flowController.getFlowRegistryClient().getFlowRegistry(registryId);
final String registryName = flowRegistry == null ? registryId : flowRegistry.getName();
final VersionedFlowState flowState = remoteCoordinates.getLatest() ? VersionedFlowState.UP_TO_DATE : VersionedFlowState.STALE;
final VersionControlInformation vci = new StandardVersionControlInformation.Builder().registryId(registryId).registryName(registryName).bucketId(bucketId).bucketName(bucketId).flowId(flowId).flowName(flowId).version(version).flowSnapshot(proposed).status(new StandardVersionedFlowStatus(flowState, flowState.getDescription())).build();
group.setVersionControlInformation(vci, Collections.emptyMap());
}
// Controller Services
// Controller Services have to be handled a bit differently than other components. This is because Processors and Controller
// Services may reference other Controller Services. Since we may be adding Service A, which depends on Service B, before adding
// Service B, we need to ensure that we create all Controller Services first and then call updateControllerService for each
// Controller Service. This way, we ensure that all services have been created before setting the properties. This allows us to
// properly obtain the correct mapping of Controller Service VersionedComponentID to Controller Service instance id.
final Map<String, ControllerServiceNode> servicesByVersionedId = group.getControllerServices(false).stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> controllerServicesRemoved = new HashSet<>(servicesByVersionedId.keySet());
final Map<ControllerServiceNode, VersionedControllerService> services = new HashMap<>();
// Add any Controller Service that does not yet exist.
for (final VersionedControllerService proposedService : proposed.getControllerServices()) {
ControllerServiceNode service = servicesByVersionedId.get(proposedService.getIdentifier());
if (service == null) {
service = addControllerService(group, proposedService, componentIdSeed);
LOG.info("Added {} to {}", service, this);
}
services.put(service, proposedService);
}
// Update all of the Controller Services to match the VersionedControllerService
for (final Map.Entry<ControllerServiceNode, VersionedControllerService> entry : services.entrySet()) {
final ControllerServiceNode service = entry.getKey();
final VersionedControllerService proposedService = entry.getValue();
if (updatedVersionedComponentIds.contains(proposedService.getIdentifier())) {
updateControllerService(service, proposedService);
LOG.info("Updated {}", service);
}
controllerServicesRemoved.remove(proposedService.getIdentifier());
}
// Child groups
final Map<String, ProcessGroup> childGroupsByVersionedId = group.getProcessGroups().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> childGroupsRemoved = new HashSet<>(childGroupsByVersionedId.keySet());
for (final VersionedProcessGroup proposedChildGroup : proposed.getProcessGroups()) {
final ProcessGroup childGroup = childGroupsByVersionedId.get(proposedChildGroup.getIdentifier());
final VersionedFlowCoordinates childCoordinates = proposedChildGroup.getVersionedFlowCoordinates();
if (childGroup == null) {
final ProcessGroup added = addProcessGroup(group, proposedChildGroup, componentIdSeed, variablesToSkip);
flowController.onProcessGroupAdded(added);
added.findAllRemoteProcessGroups().stream().forEach(RemoteProcessGroup::initialize);
LOG.info("Added {} to {}", added, this);
} else if (childCoordinates == null || updateDescendantVersionedGroups) {
updateProcessGroup(childGroup, proposedChildGroup, componentIdSeed, updatedVersionedComponentIds, true, true, updateDescendantVersionedGroups, variablesToSkip);
LOG.info("Updated {}", childGroup);
}
childGroupsRemoved.remove(proposedChildGroup.getIdentifier());
}
// Funnels
final Map<String, Funnel> funnelsByVersionedId = group.getFunnels().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> funnelsRemoved = new HashSet<>(funnelsByVersionedId.keySet());
for (final VersionedFunnel proposedFunnel : proposed.getFunnels()) {
final Funnel funnel = funnelsByVersionedId.get(proposedFunnel.getIdentifier());
if (funnel == null) {
final Funnel added = addFunnel(group, proposedFunnel, componentIdSeed);
flowController.onFunnelAdded(added);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedFunnel.getIdentifier())) {
updateFunnel(funnel, proposedFunnel);
LOG.info("Updated {}", funnel);
} else {
funnel.setPosition(new Position(proposedFunnel.getPosition().getX(), proposedFunnel.getPosition().getY()));
}
funnelsRemoved.remove(proposedFunnel.getIdentifier());
}
// Input Ports
final Map<String, Port> inputPortsByVersionedId = group.getInputPorts().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> inputPortsRemoved = new HashSet<>(inputPortsByVersionedId.keySet());
for (final VersionedPort proposedPort : proposed.getInputPorts()) {
final Port port = inputPortsByVersionedId.get(proposedPort.getIdentifier());
if (port == null) {
final Port added = addInputPort(group, proposedPort, componentIdSeed);
flowController.onInputPortAdded(added);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedPort.getIdentifier())) {
updatePort(port, proposedPort);
LOG.info("Updated {}", port);
} else {
port.setPosition(new Position(proposedPort.getPosition().getX(), proposedPort.getPosition().getY()));
}
inputPortsRemoved.remove(proposedPort.getIdentifier());
}
// Output Ports
final Map<String, Port> outputPortsByVersionedId = group.getOutputPorts().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> outputPortsRemoved = new HashSet<>(outputPortsByVersionedId.keySet());
for (final VersionedPort proposedPort : proposed.getOutputPorts()) {
final Port port = outputPortsByVersionedId.get(proposedPort.getIdentifier());
if (port == null) {
final Port added = addOutputPort(group, proposedPort, componentIdSeed);
flowController.onOutputPortAdded(added);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedPort.getIdentifier())) {
updatePort(port, proposedPort);
LOG.info("Updated {}", port);
} else {
port.setPosition(new Position(proposedPort.getPosition().getX(), proposedPort.getPosition().getY()));
}
outputPortsRemoved.remove(proposedPort.getIdentifier());
}
// Labels
final Map<String, Label> labelsByVersionedId = group.getLabels().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> labelsRemoved = new HashSet<>(labelsByVersionedId.keySet());
for (final VersionedLabel proposedLabel : proposed.getLabels()) {
final Label label = labelsByVersionedId.get(proposedLabel.getIdentifier());
if (label == null) {
final Label added = addLabel(group, proposedLabel, componentIdSeed);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedLabel.getIdentifier())) {
updateLabel(label, proposedLabel);
LOG.info("Updated {}", label);
} else {
label.setPosition(new Position(proposedLabel.getPosition().getX(), proposedLabel.getPosition().getY()));
}
labelsRemoved.remove(proposedLabel.getIdentifier());
}
// Processors
final Map<String, ProcessorNode> processorsByVersionedId = group.getProcessors().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> processorsRemoved = new HashSet<>(processorsByVersionedId.keySet());
final Map<ProcessorNode, Set<Relationship>> autoTerminatedRelationships = new HashMap<>();
for (final VersionedProcessor proposedProcessor : proposed.getProcessors()) {
final ProcessorNode processor = processorsByVersionedId.get(proposedProcessor.getIdentifier());
if (processor == null) {
final ProcessorNode added = addProcessor(group, proposedProcessor, componentIdSeed);
flowController.onProcessorAdded(added);
final Set<Relationship> proposedAutoTerminated = proposedProcessor.getAutoTerminatedRelationships() == null ? Collections.emptySet() : proposedProcessor.getAutoTerminatedRelationships().stream().map(relName -> added.getRelationship(relName)).collect(Collectors.toSet());
autoTerminatedRelationships.put(added, proposedAutoTerminated);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedProcessor.getIdentifier())) {
updateProcessor(processor, proposedProcessor);
final Set<Relationship> proposedAutoTerminated = proposedProcessor.getAutoTerminatedRelationships() == null ? Collections.emptySet() : proposedProcessor.getAutoTerminatedRelationships().stream().map(relName -> processor.getRelationship(relName)).collect(Collectors.toSet());
if (!processor.getAutoTerminatedRelationships().equals(proposedAutoTerminated)) {
autoTerminatedRelationships.put(processor, proposedAutoTerminated);
}
LOG.info("Updated {}", processor);
} else {
processor.setPosition(new Position(proposedProcessor.getPosition().getX(), proposedProcessor.getPosition().getY()));
}
processorsRemoved.remove(proposedProcessor.getIdentifier());
}
// Remote Groups
final Map<String, RemoteProcessGroup> rpgsByVersionedId = group.getRemoteProcessGroups().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> rpgsRemoved = new HashSet<>(rpgsByVersionedId.keySet());
for (final VersionedRemoteProcessGroup proposedRpg : proposed.getRemoteProcessGroups()) {
final RemoteProcessGroup rpg = rpgsByVersionedId.get(proposedRpg.getIdentifier());
if (rpg == null) {
final RemoteProcessGroup added = addRemoteProcessGroup(group, proposedRpg, componentIdSeed);
LOG.info("Added {} to {}", added, this);
} else if (updatedVersionedComponentIds.contains(proposedRpg.getIdentifier())) {
updateRemoteProcessGroup(rpg, proposedRpg, componentIdSeed);
LOG.info("Updated {}", rpg);
} else {
rpg.setPosition(new Position(proposedRpg.getPosition().getX(), proposedRpg.getPosition().getY()));
}
rpgsRemoved.remove(proposedRpg.getIdentifier());
}
// Connections
final Map<String, Connection> connectionsByVersionedId = group.getConnections().stream().collect(Collectors.toMap(component -> component.getVersionedComponentId().orElse(component.getIdentifier()), Function.identity()));
final Set<String> connectionsRemoved = new HashSet<>(connectionsByVersionedId.keySet());
for (final VersionedConnection proposedConnection : proposed.getConnections()) {
final Connection connection = connectionsByVersionedId.get(proposedConnection.getIdentifier());
if (connection == null) {
final Connection added = addConnection(group, proposedConnection, componentIdSeed);
flowController.onConnectionAdded(added);
LOG.info("Added {} to {}", added, this);
} else if (isUpdateable(connection)) {
// If the connection needs to be updated, then the source and destination will already have
// been stopped (else, the validation above would fail). So if the source or the destination is running,
// then we know that we don't need to update the connection.
updateConnection(connection, proposedConnection);
LOG.info("Updated {}", connection);
}
connectionsRemoved.remove(proposedConnection.getIdentifier());
}
// to remove a component if it has a connection going to it!
for (final String removedVersionedId : connectionsRemoved) {
final Connection connection = connectionsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", connection, group);
group.removeConnection(connection);
flowController.onConnectionRemoved(connection);
}
// Once the appropriate connections have been removed, we may now update Processors' auto-terminated relationships.
// We cannot do this above, in the 'updateProcessor' call because if a connection is removed and changed to auto-terminated,
// then updating this in the updateProcessor call above would attempt to set the Relationship to being auto-terminated while a
// Connection for that relationship exists. This will throw an Exception.
autoTerminatedRelationships.forEach((proc, rels) -> proc.setAutoTerminatedRelationships(rels));
// Remove all controller services no longer in use
for (final String removedVersionedId : controllerServicesRemoved) {
final ControllerServiceNode service = servicesByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", service, group);
// Must remove Controller Service through Flow Controller in order to remove from cache
flowController.removeControllerService(service);
}
for (final String removedVersionedId : funnelsRemoved) {
final Funnel funnel = funnelsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", funnel, group);
group.removeFunnel(funnel);
}
for (final String removedVersionedId : inputPortsRemoved) {
final Port port = inputPortsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", port, group);
group.removeInputPort(port);
}
for (final String removedVersionedId : outputPortsRemoved) {
final Port port = outputPortsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", port, group);
group.removeOutputPort(port);
}
for (final String removedVersionedId : labelsRemoved) {
final Label label = labelsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", label, group);
group.removeLabel(label);
}
for (final String removedVersionedId : processorsRemoved) {
final ProcessorNode processor = processorsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", processor, group);
group.removeProcessor(processor);
}
for (final String removedVersionedId : rpgsRemoved) {
final RemoteProcessGroup rpg = rpgsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", rpg, group);
group.removeRemoteProcessGroup(rpg);
}
for (final String removedVersionedId : childGroupsRemoved) {
final ProcessGroup childGroup = childGroupsByVersionedId.get(removedVersionedId);
LOG.info("Removing {} from {}", childGroup, group);
group.removeProcessGroup(childGroup);
}
}
use of org.apache.nifi.processor.Relationship in project nifi by apache.
the class NiFiRegistryFlowMapper method mapConnection.
public VersionedConnection mapConnection(final Connection connection) {
final FlowFileQueue queue = connection.getFlowFileQueue();
final VersionedConnection versionedConnection = new InstantiatedVersionedConnection(connection.getIdentifier(), connection.getProcessGroup().getIdentifier());
versionedConnection.setIdentifier(getId(connection.getVersionedComponentId(), connection.getIdentifier()));
versionedConnection.setGroupIdentifier(getGroupId(connection.getProcessGroup().getIdentifier()));
versionedConnection.setName(connection.getName());
versionedConnection.setBackPressureDataSizeThreshold(queue.getBackPressureDataSizeThreshold());
versionedConnection.setBackPressureObjectThreshold(queue.getBackPressureObjectThreshold());
versionedConnection.setFlowFileExpiration(queue.getFlowFileExpiration());
versionedConnection.setLabelIndex(connection.getLabelIndex());
versionedConnection.setPrioritizers(queue.getPriorities().stream().map(p -> p.getClass().getName()).collect(Collectors.toList()));
versionedConnection.setSelectedRelationships(connection.getRelationships().stream().map(Relationship::getName).collect(Collectors.toSet()));
versionedConnection.setzIndex(connection.getZIndex());
versionedConnection.setBends(connection.getBendPoints().stream().map(this::mapPosition).collect(Collectors.toList()));
versionedConnection.setSource(mapConnectable(connection.getSource()));
versionedConnection.setDestination(mapConnectable(connection.getDestination()));
return versionedConnection;
}
Aggregations