use of org.apache.nifi.processor.exception.ProcessException in project nifi by apache.
the class InvokeScriptedProcessor method onTrigger.
/**
* Invokes the onTrigger() method of the scripted processor. If the script
* failed to reload, the processor yields until the script can be reloaded
* successfully. If the scripted processor's onTrigger() method throws an
* exception, a ProcessException will be thrown. If no processor is defined
* by the script, an error is logged with the system.
*
* @param context provides access to convenience methods for obtaining
* property values, delaying the scheduling of the processor, provides
* access to Controller Services, etc.
* @param sessionFactory provides access to a {@link ProcessSessionFactory},
* which can be used for accessing FlowFiles, etc.
* @throws ProcessException if the scripted processor's onTrigger() method
* throws an exception
*/
@Override
public void onTrigger(ProcessContext context, ProcessSessionFactory sessionFactory) throws ProcessException {
// Initialize the rest of the processor resources if we have not already done so
synchronized (scriptingComponentHelper.isInitialized) {
if (!scriptingComponentHelper.isInitialized.get()) {
scriptingComponentHelper.createResources();
}
}
ComponentLog log = getLogger();
// ensure the processor (if it exists) is loaded
final Processor instance = processor.get();
// ensure the processor did not fail to reload at some point
final Collection<ValidationResult> results = validationResults.get();
if (!results.isEmpty()) {
log.error(String.format("Unable to run because the Processor is not valid: [%s]", StringUtils.join(results, ", ")));
context.yield();
return;
}
if (instance != null) {
try {
// run the processor
instance.onTrigger(context, sessionFactory);
} catch (final ProcessException e) {
final String message = String.format("An error occurred executing the configured Processor [%s]: %s", context.getProperty(ScriptingComponentUtils.SCRIPT_FILE).getValue(), e);
log.error(message);
throw e;
}
} else {
log.error("There is no processor defined by the script");
}
}
use of org.apache.nifi.processor.exception.ProcessException in project nifi by apache.
the class SiteToSiteBulletinReportingTask method onTrigger.
@Override
public void onTrigger(final ReportingContext context) {
final boolean isClustered = context.isClustered();
final String nodeId = context.getClusterNodeIdentifier();
if (nodeId == null && isClustered) {
getLogger().debug("This instance of NiFi is configured for clustering, but the Cluster Node Identifier is not yet available. " + "Will wait for Node Identifier to be established.");
return;
}
if (lastSentBulletinId < 0) {
Map<String, String> state;
try {
state = context.getStateManager().getState(Scope.LOCAL).toMap();
} catch (IOException e) {
getLogger().error("Failed to get state at start up due to:" + e.getMessage(), e);
return;
}
if (state.containsKey(LAST_EVENT_ID_KEY)) {
lastSentBulletinId = Long.parseLong(state.get(LAST_EVENT_ID_KEY));
}
}
final BulletinQuery bulletinQuery = new BulletinQuery.Builder().after(lastSentBulletinId).build();
final List<Bulletin> bulletins = context.getBulletinRepository().findBulletins(bulletinQuery);
if (bulletins == null || bulletins.isEmpty()) {
getLogger().debug("No events to send because no events are stored in the repository.");
return;
}
final OptionalLong opMaxId = bulletins.stream().mapToLong(t -> t.getId()).max();
final Long currMaxId = opMaxId.isPresent() ? opMaxId.getAsLong() : -1;
if (currMaxId < lastSentBulletinId) {
getLogger().warn("Current bulletin max id is {} which is less than what was stored in state as the last queried event, which was {}. " + "This means the bulletins repository restarted its ids. Restarting querying from the beginning.", new Object[] { currMaxId, lastSentBulletinId });
lastSentBulletinId = -1;
}
if (currMaxId == lastSentBulletinId) {
getLogger().debug("No events to send due to the current max id being equal to the last id that was sent.");
return;
}
final String platform = context.getProperty(PLATFORM).evaluateAttributeExpressions().getValue();
final Map<String, ?> config = Collections.emptyMap();
final JsonBuilderFactory factory = Json.createBuilderFactory(config);
final JsonObjectBuilder builder = factory.createObjectBuilder();
final DateFormat df = new SimpleDateFormat(TIMESTAMP_FORMAT);
df.setTimeZone(TimeZone.getTimeZone("Z"));
final long start = System.nanoTime();
// Create a JSON array of all the events in the current batch
final JsonArrayBuilder arrayBuilder = factory.createArrayBuilder();
for (final Bulletin bulletin : bulletins) {
if (bulletin.getId() > lastSentBulletinId) {
arrayBuilder.add(serialize(factory, builder, bulletin, df, platform, nodeId));
}
}
final JsonArray jsonArray = arrayBuilder.build();
// Send the JSON document for the current batch
try {
final Transaction transaction = getClient().createTransaction(TransferDirection.SEND);
if (transaction == null) {
getLogger().info("All destination nodes are penalized; will attempt to send data later");
return;
}
final Map<String, String> attributes = new HashMap<>();
final String transactionId = UUID.randomUUID().toString();
attributes.put("reporting.task.transaction.id", transactionId);
attributes.put("reporting.task.name", getName());
attributes.put("reporting.task.uuid", getIdentifier());
attributes.put("reporting.task.type", this.getClass().getSimpleName());
attributes.put("mime.type", "application/json");
final byte[] data = jsonArray.toString().getBytes(StandardCharsets.UTF_8);
transaction.send(data, attributes);
transaction.confirm();
transaction.complete();
final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - start);
getLogger().info("Successfully sent {} Bulletins to destination in {} ms; Transaction ID = {}; First Event ID = {}", new Object[] { bulletins.size(), transferMillis, transactionId, bulletins.get(0).getId() });
} catch (final IOException e) {
throw new ProcessException("Failed to send Bulletins to destination due to IOException:" + e.getMessage(), e);
}
// Store the id of the last event so we know where we left off
try {
context.getStateManager().setState(Collections.singletonMap(LAST_EVENT_ID_KEY, String.valueOf(currMaxId)), Scope.LOCAL);
} catch (final IOException ioe) {
getLogger().error("Failed to update state to {} due to {}; this could result in events being re-sent after a restart.", new Object[] { currMaxId, ioe.getMessage() }, ioe);
}
lastSentBulletinId = currMaxId;
}
use of org.apache.nifi.processor.exception.ProcessException in project nifi by apache.
the class SNMPGetter method get.
/**
* Construct the PDU to perform the SNMP Get request and returns
* the result in order to create the flow file.
* @return {@link ResponseEvent}
*/
public ResponseEvent get() {
try {
PDU pdu = null;
if (this.target.getVersion() == SnmpConstants.version3) {
pdu = new ScopedPDU();
} else {
pdu = new PDU();
}
pdu.add(new VariableBinding(this.oid));
pdu.setType(PDU.GET);
return this.snmp.get(pdu, this.target);
} catch (IOException e) {
logger.error("Failed to get information from SNMP agent; " + this, e);
throw new ProcessException(e);
}
}
use of org.apache.nifi.processor.exception.ProcessException in project nifi by apache.
the class ValidateRecord method getValidationSchema.
protected RecordSchema getValidationSchema(final ProcessContext context, final FlowFile flowFile, final RecordReader reader) throws MalformedRecordException, IOException, SchemaNotFoundException {
final String schemaAccessStrategy = context.getProperty(SCHEMA_ACCESS_STRATEGY).getValue();
if (schemaAccessStrategy.equals(READER_SCHEMA.getValue())) {
return reader.getSchema();
} else if (schemaAccessStrategy.equals(SCHEMA_NAME_PROPERTY.getValue())) {
final SchemaRegistry schemaRegistry = context.getProperty(SCHEMA_REGISTRY).asControllerService(SchemaRegistry.class);
final String schemaName = context.getProperty(SCHEMA_NAME).evaluateAttributeExpressions(flowFile).getValue();
final SchemaIdentifier schemaIdentifier = SchemaIdentifier.builder().name(schemaName).build();
return schemaRegistry.retrieveSchema(schemaIdentifier);
} else if (schemaAccessStrategy.equals(SCHEMA_TEXT_PROPERTY.getValue())) {
final String schemaText = context.getProperty(SCHEMA_TEXT).evaluateAttributeExpressions(flowFile).getValue();
final Parser parser = new Schema.Parser();
final Schema avroSchema = parser.parse(schemaText);
return AvroTypeUtil.createSchema(avroSchema);
} else {
throw new ProcessException("Invalid Schema Access Strategy: " + schemaAccessStrategy);
}
}
use of org.apache.nifi.processor.exception.ProcessException in project nifi by apache.
the class FlowFileEnumerator method moveNext.
@Override
public boolean moveNext() {
currentRow = null;
while (currentRow == null) {
try {
currentRow = filterColumns(recordParser.nextRecord());
break;
} catch (final Exception e) {
throw new ProcessException("Failed to read next record in stream for " + flowFile, e);
}
}
if (currentRow == null) {
// If we are out of data, close the InputStream. We do this because
// Calcite does not necessarily call our close() method.
close();
try {
onFinish();
} catch (final Exception e) {
logger.error("Failed to perform tasks when enumerator was finished", e);
}
return false;
}
recordsRead++;
return true;
}
Aggregations