use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class ReflectionUtilsTest method validateNoFailureIfQuiatelyIfOneOfArgumentsWrongTypeAndProcessLog.
@Test
public void validateNoFailureIfQuiatelyIfOneOfArgumentsWrongTypeAndProcessLog() throws Exception {
ComponentLog pl = mock(ComponentLog.class);
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnStopped.class, new E(), pl, 3, "hjk", "hjk");
verify(pl, Mockito.atMost(1)).error(Mockito.anyString());
}
use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class ProcessorInitializer method teardown.
@Override
public void teardown(ConfigurableComponent component) {
Processor processor = (Processor) component;
try (NarCloseable narCloseable = NarCloseable.withComponentNarLoader(component.getClass(), component.getIdentifier())) {
final ComponentLog logger = new MockComponentLogger();
final MockProcessContext context = new MockProcessContext();
ReflectionUtils.quietlyInvokeMethodsWithAnnotation(OnShutdown.class, processor, logger, context);
} finally {
ExtensionManager.removeInstanceClassLoader(component.getIdentifier());
}
}
use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class PutCassandraQL method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
ComponentLog logger = getLogger();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final long startNanos = System.nanoTime();
final long statementTimeout = context.getProperty(STATEMENT_TIMEOUT).evaluateAttributeExpressions(flowFile).asTimePeriod(TimeUnit.MILLISECONDS);
final Charset charset = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions(flowFile).getValue());
// The documentation for the driver recommends the session remain open the entire time the processor is running
// and states that it is thread-safe. This is why connectionSession is not in a try-with-resources.
final Session connectionSession = cassandraSession.get();
String cql = getCQL(session, flowFile, charset);
try {
PreparedStatement statement = connectionSession.prepare(cql);
BoundStatement boundStatement = statement.bind();
Map<String, String> attributes = flowFile.getAttributes();
for (final Map.Entry<String, String> entry : attributes.entrySet()) {
final String key = entry.getKey();
final Matcher matcher = CQL_TYPE_ATTRIBUTE_PATTERN.matcher(key);
if (matcher.matches()) {
final int parameterIndex = Integer.parseInt(matcher.group(1));
String paramType = entry.getValue();
if (StringUtils.isEmpty(paramType)) {
throw new ProcessException("Value of the " + key + " attribute is null or empty, it must contain a valid value");
}
paramType = paramType.trim();
final String valueAttrName = "cql.args." + parameterIndex + ".value";
final String parameterValue = attributes.get(valueAttrName);
try {
setStatementObject(boundStatement, parameterIndex - 1, valueAttrName, parameterValue, paramType);
} catch (final InvalidTypeException | IllegalArgumentException e) {
throw new ProcessException("The value of the " + valueAttrName + " is '" + parameterValue + "', which cannot be converted into the necessary data type: " + paramType, e);
}
}
}
try {
ResultSetFuture future = connectionSession.executeAsync(boundStatement);
if (statementTimeout > 0) {
future.getUninterruptibly(statementTimeout, TimeUnit.MILLISECONDS);
} else {
future.getUninterruptibly();
}
// Emit a Provenance SEND event
final long transmissionMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
// This isn't a real URI but since Cassandra is distributed we just use the cluster name
String transitUri = "cassandra://" + connectionSession.getCluster().getMetadata().getClusterName();
session.getProvenanceReporter().send(flowFile, transitUri, transmissionMillis, true);
session.transfer(flowFile, REL_SUCCESS);
} catch (final TimeoutException e) {
throw new ProcessException(e);
}
} catch (final NoHostAvailableException nhae) {
getLogger().error("No host in the Cassandra cluster can be contacted successfully to execute this statement", nhae);
// Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
// a thousand error messages would be logged. However we would like information from Cassandra itself, so
// cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
// logger message above).
getLogger().error(nhae.getCustomMessage(10, true, false));
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_RETRY);
} catch (final QueryExecutionException qee) {
logger.error("Cannot execute the statement with the requested consistency level successfully", qee);
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_RETRY);
} catch (final QueryValidationException qve) {
logger.error("The CQL statement {} is invalid due to syntax error, authorization issue, or another " + "validation problem; routing {} to failure", new Object[] { cql, flowFile }, qve);
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
} catch (final ProcessException e) {
logger.error("Unable to execute CQL select statement {} for {} due to {}; routing to failure", new Object[] { cql, flowFile, e });
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_FAILURE);
}
}
use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class QueryCassandra method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) {
ComponentLog log = getLogger();
try {
connectToCassandra(context);
final int fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
if (fetchSize > 0) {
synchronized (cluster.get()) {
cluster.get().getConfiguration().getQueryOptions().setFetchSize(fetchSize);
}
}
} catch (final NoHostAvailableException nhae) {
log.error("No host in the Cassandra cluster can be contacted successfully to execute this query", nhae);
// Log up to 10 error messages. Otherwise if a 1000-node cluster was specified but there was no connectivity,
// a thousand error messages would be logged. However we would like information from Cassandra itself, so
// cap the error limit at 10, format the messages, and don't include the stack trace (it is displayed by the
// logger message above).
log.error(nhae.getCustomMessage(10, true, false));
throw new ProcessException(nhae);
} catch (final AuthenticationException ae) {
log.error("Invalid username/password combination", ae);
throw new ProcessException(ae);
}
}
use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class PutCouchbaseKey method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLogger();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final byte[] content = new byte[(int) flowFile.getSize()];
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
StreamUtils.fillBuffer(in, content, true);
}
});
String docId = flowFile.getAttribute(CoreAttributes.UUID.key());
if (!StringUtils.isEmpty(context.getProperty(DOC_ID).getValue())) {
docId = context.getProperty(DOC_ID).evaluateAttributeExpressions(flowFile).getValue();
}
try {
Document<?> doc = null;
final DocumentType documentType = DocumentType.valueOf(context.getProperty(DOCUMENT_TYPE).getValue());
switch(documentType) {
case Json:
{
doc = RawJsonDocument.create(docId, new String(content, StandardCharsets.UTF_8));
break;
}
case Binary:
{
final ByteBuf buf = Unpooled.copiedBuffer(content);
doc = BinaryDocument.create(docId, buf);
break;
}
}
final PersistTo persistTo = PersistTo.valueOf(context.getProperty(PERSIST_TO).getValue());
final ReplicateTo replicateTo = ReplicateTo.valueOf(context.getProperty(REPLICATE_TO).getValue());
doc = openBucket(context).upsert(doc, persistTo, replicateTo);
final Map<String, String> updatedAttrs = new HashMap<>();
updatedAttrs.put(CouchbaseAttributes.Cluster.key(), context.getProperty(COUCHBASE_CLUSTER_SERVICE).getValue());
updatedAttrs.put(CouchbaseAttributes.Bucket.key(), context.getProperty(BUCKET_NAME).getValue());
updatedAttrs.put(CouchbaseAttributes.DocId.key(), docId);
updatedAttrs.put(CouchbaseAttributes.Cas.key(), String.valueOf(doc.cas()));
updatedAttrs.put(CouchbaseAttributes.Expiry.key(), String.valueOf(doc.expiry()));
flowFile = session.putAllAttributes(flowFile, updatedAttrs);
session.getProvenanceReporter().send(flowFile, getTransitUrl(context, docId));
session.transfer(flowFile, REL_SUCCESS);
} catch (final CouchbaseException e) {
String errMsg = String.format("Writing document %s to Couchbase Server using %s failed due to %s", docId, flowFile, e);
handleCouchbaseException(context, session, logger, flowFile, e, errMsg);
}
}
Aggregations