use of org.apache.nifi.processor.exception.ProcessException in project nifi by apache.
the class ReportLineageToAtlas method createNiFiAtlasClient.
/**
* In order to avoid authentication expiration issues (i.e. Kerberos ticket and DelegationToken expiration),
* create Atlas client instance at every onTrigger execution.
*/
private NiFiAtlasClient createNiFiAtlasClient(ReportingContext context) {
List<String> urls = new ArrayList<>();
parseAtlasUrls(context.getProperty(ATLAS_URLS), urls::add);
try {
return new NiFiAtlasClient(atlasAuthN.createClient(urls.toArray(new String[] {})));
} catch (final NullPointerException e) {
throw new ProcessException(String.format("Failed to initialize Atlas client due to %s." + " Make sure 'atlas-application.properties' is in the directory specified with %s" + " or under root classpath if not specified.", e, ATLAS_CONF_DIR.getDisplayName()), e);
}
}
use of org.apache.nifi.processor.exception.ProcessException in project nifi by apache.
the class BinFiles method processBins.
private int processBins(final ProcessContext context) {
final ComponentLog logger = getLogger();
int processedBins = 0;
Bin bin;
while ((bin = readyBins.poll()) != null) {
boolean binAlreadyCommitted;
try {
binAlreadyCommitted = this.processBin(bin, context);
} catch (final ProcessException e) {
logger.error("Failed to process bundle of {} files due to {}", new Object[] { bin.getContents().size(), e });
final ProcessSession binSession = bin.getSession();
for (final FlowFile flowFile : bin.getContents()) {
binSession.transfer(flowFile, REL_FAILURE);
}
binSession.commit();
continue;
} catch (final Exception e) {
logger.error("Failed to process bundle of {} files due to {}; rolling back sessions", new Object[] { bin.getContents().size(), e });
bin.getSession().rollback();
continue;
}
// If this bin's session has been committed, move on.
if (!binAlreadyCommitted) {
final ProcessSession binSession = bin.getSession();
binSession.transfer(bin.getContents(), REL_ORIGINAL);
binSession.commit();
}
processedBins++;
}
return processedBins;
}
use of org.apache.nifi.processor.exception.ProcessException in project nifi by apache.
the class ExtractImageMetadata method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowfile = session.get();
if (flowfile == null) {
return;
}
final ComponentLog logger = this.getLogger();
final AtomicReference<Metadata> value = new AtomicReference<>(null);
final Integer max = context.getProperty(MAX_NUMBER_OF_ATTRIBUTES).asInteger();
try {
session.read(flowfile, new InputStreamCallback() {
@Override
public void process(InputStream in) throws IOException {
try {
Metadata imageMetadata = ImageMetadataReader.readMetadata(in);
value.set(imageMetadata);
} catch (ImageProcessingException ex) {
throw new ProcessException(ex);
}
}
});
Metadata metadata = value.get();
Map<String, String> results = getTags(max, metadata);
// Write the results to an attribute
if (!results.isEmpty()) {
flowfile = session.putAllAttributes(flowfile, results);
}
session.transfer(flowfile, SUCCESS);
} catch (ProcessException e) {
logger.error("Failed to extract image metadata from {} due to {}", new Object[] { flowfile, e });
session.transfer(flowfile, FAILURE);
}
}
use of org.apache.nifi.processor.exception.ProcessException in project nifi by apache.
the class ExtractMediaMetadata method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final ComponentLog logger = this.getLogger();
final AtomicReference<Map<String, String>> value = new AtomicReference<>(null);
final Integer maxAttribCount = context.getProperty(MAX_NUMBER_OF_ATTRIBUTES).asInteger();
final Integer maxAttribLength = context.getProperty(MAX_ATTRIBUTE_LENGTH).asInteger();
final String prefix = context.getProperty(METADATA_KEY_PREFIX).evaluateAttributeExpressions(flowFile).getValue();
try {
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(InputStream in) throws IOException {
try {
Map<String, String> results = tika_parse(in, prefix, maxAttribCount, maxAttribLength);
value.set(results);
} catch (SAXException | TikaException e) {
throw new IOException(e);
}
}
});
// Write the results to attributes
Map<String, String> results = value.get();
if (results != null && !results.isEmpty()) {
flowFile = session.putAllAttributes(flowFile, results);
}
session.transfer(flowFile, SUCCESS);
session.getProvenanceReporter().modifyAttributes(flowFile, "media attributes extracted");
} catch (ProcessException e) {
logger.error("Failed to extract media metadata from {} due to {}", new Object[] { flowFile, e });
flowFile = session.penalize(flowFile);
session.transfer(flowFile, FAILURE);
}
}
use of org.apache.nifi.processor.exception.ProcessException in project nifi by apache.
the class DeleteMongo method onTrigger.
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
FlowFile flowFile = session.get();
final WriteConcern writeConcern = getWriteConcern(context);
final MongoCollection<Document> collection = getCollection(context).withWriteConcern(writeConcern);
final String deleteMode = context.getProperty(DELETE_MODE).getValue();
final String deleteAttr = flowFile.getAttribute("mongodb.delete.mode");
final Boolean failMode = context.getProperty(FAIL_ON_NO_DELETE).asBoolean();
if (deleteMode.equals(DELETE_ATTR.getValue()) && (StringUtils.isEmpty(deleteAttr) || !ALLOWED_DELETE_VALUES.contains(deleteAttr.toLowerCase()))) {
getLogger().error(String.format("%s is not an allowed value for mongodb.delete.mode", deleteAttr));
session.transfer(flowFile, REL_FAILURE);
return;
}
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
session.exportTo(flowFile, bos);
bos.close();
String json = new String(bos.toByteArray());
Document query = Document.parse(json);
DeleteResult result;
if (deleteMode.equals(DELETE_ONE.getValue()) || (deleteMode.equals(DELETE_ATTR.getValue()) && deleteAttr.toLowerCase().equals("one"))) {
result = collection.deleteOne(query);
} else {
result = collection.deleteMany(query);
}
if (failMode && result.getDeletedCount() == 0) {
session.transfer(flowFile, REL_FAILURE);
} else {
session.transfer(flowFile, REL_SUCCESS);
}
} catch (Exception ex) {
getLogger().error("Could not send a delete to MongoDB, failing...", ex);
session.transfer(flowFile, REL_FAILURE);
}
}
Aggregations