use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class ConsumeKafka method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) {
final PropertyValue heartbeatIntervalMsConfig = context.getProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG);
if (heartbeatIntervalMsConfig != null && heartbeatIntervalMsConfig.isSet()) {
heartbeatCheckIntervalMillis = heartbeatIntervalMsConfig.asInteger();
} else {
// Derived from org.apache.kafka.clients.consumer.ConsumerConfig.
heartbeatCheckIntervalMillis = 3_000;
}
// Without this, it remains -1 if downstream connections are full when this processor is scheduled at the 1st run after restart.
lastTriggeredTimestamp = System.currentTimeMillis();
// Stop previous connectionRetainer, if any, just in case, this shouldn't happen though
final ComponentLog logger = getLogger();
if (connectionRetainer != null) {
logger.warn("Connection retainer {} is still running, indicating something had happened.", new Object[] { connectionRetainer });
stopConnectionRetainer();
}
connectionRetainer = Executors.newSingleThreadScheduledExecutor();
connectionRetainer.scheduleAtFixedRate(() -> {
final long now = System.currentTimeMillis();
if (lastTriggeredTimestamp < 0 || lastTriggeredTimestamp > now - heartbeatCheckIntervalMillis) {
if (logger.isDebugEnabled()) {
logger.debug("No need to retain connection. Triggered at {}, {} millis ago.", new Object[] { lastTriggeredTimestamp, now - lastTriggeredTimestamp });
}
return;
}
try {
final ConsumerPool pool = getConsumerPool(context);
if (logger.isDebugEnabled()) {
final ConsumerPool.PoolStats stats = pool.getPoolStats();
logger.debug("Trying to retain connection. Obtained pool={}," + " leaseObtainedCount={}, consumerCreatedCount={}, consumerClosedCount={}", new Object[] { pool, stats.leasesObtainedCount, stats.consumerCreatedCount, stats.consumerClosedCount });
}
pool.retainConsumers();
} catch (final Exception e) {
logger.warn("Failed to retain connection due to {}", new Object[] { e }, e);
}
}, heartbeatCheckIntervalMillis, heartbeatCheckIntervalMillis, TimeUnit.MILLISECONDS);
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class PutSQL method constructProcess.
@OnScheduled
public void constructProcess() {
process = new PutGroup<>();
process.setLogger(getLogger());
process.fetchFlowFiles(fetchFlowFiles);
process.initConnection(initConnection);
process.groupFetchedFlowFiles(groupFlowFiles);
process.putFlowFiles(putFlowFiles);
process.adjustRoute(RollbackOnFailure.createAdjustRoute(REL_FAILURE, REL_RETRY));
process.onCompleted((c, s, fc, conn) -> {
try {
conn.commit();
} catch (SQLException e) {
// Throw ProcessException to rollback process session.
throw new ProcessException("Failed to commit database connection due to " + e, e);
}
});
process.onFailed((c, s, fc, conn, e) -> {
try {
conn.rollback();
} catch (SQLException re) {
// Just log the fact that rollback failed.
// ProcessSession will be rollback by the thrown Exception so don't have to do anything here.
getLogger().warn("Failed to rollback database connection due to %s", new Object[] { re }, re);
}
});
process.cleanup((c, s, fc, conn) -> {
// make sure that we try to set the auto commit back to whatever it was.
if (fc.originalAutoCommit) {
try {
conn.setAutoCommit(true);
} catch (final SQLException se) {
getLogger().warn("Failed to reset autocommit due to {}", new Object[] { se });
}
}
});
exceptionHandler = new ExceptionHandler<>();
exceptionHandler.mapException(e -> {
if (e instanceof SQLNonTransientException) {
return ErrorTypes.InvalidInput;
} else if (e instanceof SQLException) {
return ErrorTypes.TemporalFailure;
} else {
return ErrorTypes.UnknownFailure;
}
});
adjustError = RollbackOnFailure.createAdjustError(getLogger());
exceptionHandler.adjustError(adjustError);
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class RouteText method onScheduled.
/**
* When this processor is scheduled, update the dynamic properties into the map
* for quick access during each onTrigger call
*
* @param context ProcessContext used to retrieve dynamic properties
*/
@OnScheduled
public void onScheduled(final ProcessContext context) {
final String regex = context.getProperty(GROUPING_REGEX).getValue();
if (regex != null) {
groupingRegex = Pattern.compile(regex);
}
final Map<Relationship, PropertyValue> newPropertyMap = new HashMap<>();
for (final PropertyDescriptor descriptor : context.getProperties().keySet()) {
if (!descriptor.isDynamic()) {
continue;
}
getLogger().debug("Adding new dynamic property: {}", new Object[] { descriptor });
newPropertyMap.put(new Relationship.Builder().name(descriptor.getName()).build(), context.getProperty(descriptor));
}
this.propertyMap = newPropertyMap;
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class ScanAttribute method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) throws IOException {
final String filterRegex = context.getProperty(DICTIONARY_FILTER).getValue();
this.dictionaryFilterPattern = (filterRegex == null) ? null : Pattern.compile(filterRegex);
final String attributeRegex = context.getProperty(ATTRIBUTE_PATTERN).getValue();
this.attributePattern = (attributeRegex.equals(".*")) ? null : Pattern.compile(attributeRegex);
this.dictionaryTerms = createDictionary(context);
this.fileWatcher = new SynchronousFileWatcher(Paths.get(context.getProperty(DICTIONARY_FILE).getValue()), new LastModifiedMonitor(), 1000L);
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class GetJMSTopic method handleSubscriptions.
@OnScheduled
public void handleSubscriptions(final ProcessContext context) throws IOException, JMSException {
boolean usingDurableSubscription = context.getProperty(DURABLE_SUBSCRIPTION).asBoolean();
final Properties persistedProps = getSubscriptionPropertiesFromFile();
final Properties currentProps = getSubscriptionPropertiesFromContext(context);
if (persistedProps == null) {
if (usingDurableSubscription) {
// properties have not yet been persisted.
persistSubscriptionInfo(context);
}
return;
}
// decrypt the passwords so the persisted and current properties can be compared...
// we can modify this properties instance since the unsubscribe method will reload
// the properties from disk
decryptPassword(persistedProps, context);
decryptPassword(currentProps, context);
// check if current values are the same as the persisted values.
boolean same = true;
for (final Map.Entry<Object, Object> entry : persistedProps.entrySet()) {
final Object key = entry.getKey();
final Object value = entry.getValue();
final Object curVal = currentProps.get(key);
if (value == null && curVal == null) {
continue;
}
if (value == null || curVal == null) {
same = false;
break;
}
if (SUBSCRIPTION_NAME_PROPERTY.equals(key)) {
// ignore the random UUID part of the subscription name
if (!JmsFactory.clientIdPrefixEquals(value.toString(), curVal.toString())) {
same = false;
break;
}
} else if (!value.equals(curVal)) {
same = false;
break;
}
}
if (same && usingDurableSubscription) {
// properties are the same.
return;
}
// unsubscribe from the old subscription.
try {
unsubscribe(context);
} catch (final InvalidDestinationException e) {
getLogger().warn("Failed to unsubscribe from subscription due to {}; subscription does not appear to be active, so ignoring it", new Object[] { e });
}
// we've now got a new subscription, so we must persist that new info before we create the subscription.
if (usingDurableSubscription) {
persistSubscriptionInfo(context);
} else {
// remove old subscription info if it was persisted
try {
Files.delete(getSubscriptionPath());
} catch (Exception ignore) {
}
}
}
Aggregations