use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class AbstractAWSCredentialsProviderProcessor method onScheduled.
/**
* This method checks if {#link {@link #AWS_CREDENTIALS_PROVIDER_SERVICE} is available and if it
* is, uses the credentials provider, otherwise it invokes the {@link AbstractAWSProcessor#onScheduled(ProcessContext)}
* which uses static AWSCredentials for the aws processors
*/
@OnScheduled
public void onScheduled(ProcessContext context) {
ControllerService service = context.getProperty(AWS_CREDENTIALS_PROVIDER_SERVICE).asControllerService();
if (service != null) {
getLogger().debug("Using aws credentials provider service for creating client");
onScheduledUsingControllerService(context);
} else {
getLogger().debug("Using aws credentials for creating client");
super.onScheduled(context);
}
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class PutElasticsearchHttpRecord method setup.
@OnScheduled
public void setup(ProcessContext context) {
super.setup(context);
recordPathCache = new RecordPathCache(10);
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class AbstractEnrichIP method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) throws IOException {
final String dbFileString = context.getProperty(GEO_DATABASE_FILE).getValue();
final File dbFile = new File(dbFileString);
final StopWatch stopWatch = new StopWatch(true);
final DatabaseReader reader = new DatabaseReader.Builder(dbFile).build();
stopWatch.stop();
getLogger().info("Completed loading of Maxmind Database. Elapsed time was {} milliseconds.", new Object[] { stopWatch.getDuration(TimeUnit.MILLISECONDS) });
databaseReaderRef.set(reader);
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class AbstractListProcessor method updateState.
@OnScheduled
public final void updateState(final ProcessContext context) throws IOException {
final String path = getPath(context);
final DistributedMapCacheClient client = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);
// Check if state already exists for this path. If so, we have already migrated the state.
final StateMap stateMap = context.getStateManager().getState(getStateScope(context));
if (stateMap.getVersion() == -1L) {
try {
// Migrate state from the old way of managing state (distributed cache service and local file)
// to the new mechanism (State Manager).
migrateState(path, client, context.getStateManager(), getStateScope(context));
} catch (final IOException ioe) {
throw new IOException("Failed to properly migrate state to State Manager", ioe);
}
}
// When scheduled to run, check if the associated timestamp is null, signifying a clearing of state and reset the internal timestamp
if (lastListedLatestEntryTimestampMillis != null && stateMap.get(LATEST_LISTED_ENTRY_TIMESTAMP_KEY) == null) {
getLogger().info("Detected that state was cleared for this component. Resetting internal values.");
resetTimeStates();
}
if (resetState) {
context.getStateManager().clear(getStateScope(context));
resetState = false;
}
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class AbstractListenEventProcessor method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) throws IOException {
charset = Charset.forName(context.getProperty(CHARSET).getValue());
port = context.getProperty(PORT).asInteger();
events = new LinkedBlockingQueue<>(context.getProperty(MAX_MESSAGE_QUEUE_SIZE).asInteger());
final String nicIPAddressStr = context.getProperty(NETWORK_INTF_NAME).evaluateAttributeExpressions().getValue();
final int maxChannelBufferSize = context.getProperty(MAX_SOCKET_BUFFER_SIZE).asDataSize(DataUnit.B).intValue();
InetAddress nicIPAddress = null;
if (!StringUtils.isEmpty(nicIPAddressStr)) {
NetworkInterface netIF = NetworkInterface.getByName(nicIPAddressStr);
nicIPAddress = netIF.getInetAddresses().nextElement();
}
// create the dispatcher and call open() to bind to the given port
dispatcher = createDispatcher(context, events);
dispatcher.open(nicIPAddress, port, maxChannelBufferSize);
// start a thread to run the dispatcher
final Thread readerThread = new Thread(dispatcher);
readerThread.setName(getClass().getName() + " [" + getIdentifier() + "]");
readerThread.setDaemon(true);
readerThread.start();
}
Aggregations