use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class PutJMS method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLogger();
final List<FlowFile> flowFiles = session.get(context.getProperty(BATCH_SIZE).asInteger().intValue());
if (flowFiles.isEmpty()) {
return;
}
WrappedMessageProducer wrappedProducer = producerQueue.poll();
if (wrappedProducer == null) {
try {
wrappedProducer = JmsFactory.createMessageProducer(context, true);
logger.info("Connected to JMS server {}", new Object[] { context.getProperty(URL).getValue() });
} catch (final JMSException e) {
logger.error("Failed to connect to JMS Server due to {}", new Object[] { e });
session.transfer(flowFiles, REL_FAILURE);
context.yield();
return;
}
}
final Session jmsSession = wrappedProducer.getSession();
final MessageProducer producer = wrappedProducer.getProducer();
final int maxBufferSize = context.getProperty(MAX_BUFFER_SIZE).asDataSize(DataUnit.B).intValue();
try {
final Set<FlowFile> successfulFlowFiles = new HashSet<>();
for (FlowFile flowFile : flowFiles) {
if (flowFile.getSize() > maxBufferSize) {
session.transfer(flowFile, REL_FAILURE);
logger.warn("Routing {} to failure because its size exceeds the configured max", new Object[] { flowFile });
continue;
}
// Read the contents of the FlowFile into a byte array
final byte[] messageContent = new byte[(int) flowFile.getSize()];
session.read(flowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
StreamUtils.fillBuffer(in, messageContent, true);
}
});
final Long ttl = context.getProperty(MESSAGE_TTL).asTimePeriod(TimeUnit.MILLISECONDS);
final String replyToQueueName = context.getProperty(REPLY_TO_QUEUE).evaluateAttributeExpressions(flowFile).getValue();
final Destination replyToQueue = replyToQueueName == null ? null : JmsFactory.createQueue(context, replyToQueueName);
int priority = DEFAULT_MESSAGE_PRIORITY;
try {
final Integer priorityInt = context.getProperty(MESSAGE_PRIORITY).evaluateAttributeExpressions(flowFile).asInteger();
priority = priorityInt == null ? priority : priorityInt;
} catch (final NumberFormatException e) {
logger.warn("Invalid value for JMS Message Priority: {}; defaulting to priority of {}", new Object[] { context.getProperty(MESSAGE_PRIORITY).evaluateAttributeExpressions(flowFile).getValue(), DEFAULT_MESSAGE_PRIORITY });
}
try {
final Message message = createMessage(jmsSession, context, messageContent, flowFile, replyToQueue, priority);
if (ttl == null) {
producer.setTimeToLive(0L);
} else {
producer.setTimeToLive(ttl);
}
producer.send(message);
} catch (final JMSException e) {
logger.error("Failed to send {} to JMS Server due to {}", new Object[] { flowFile, e });
session.transfer(flowFiles, REL_FAILURE);
context.yield();
try {
jmsSession.rollback();
} catch (final JMSException jmse) {
logger.warn("Unable to roll back JMS Session due to {}", new Object[] { jmse });
}
wrappedProducer.close(logger);
return;
}
successfulFlowFiles.add(flowFile);
session.getProvenanceReporter().send(flowFile, context.getProperty(URL).getValue());
}
try {
jmsSession.commit();
session.transfer(successfulFlowFiles, REL_SUCCESS);
final String flowFileDescription = successfulFlowFiles.size() > 10 ? successfulFlowFiles.size() + " FlowFiles" : successfulFlowFiles.toString();
logger.info("Sent {} to JMS Server and transferred to 'success'", new Object[] { flowFileDescription });
} catch (JMSException e) {
logger.error("Failed to commit JMS Session due to {} and transferred to 'failure'", new Object[] { e });
session.transfer(flowFiles, REL_FAILURE);
context.yield();
wrappedProducer.close(logger);
}
} finally {
if (!wrappedProducer.isClosed()) {
producerQueue.offer(wrappedProducer);
}
}
}
use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class QueryDatabaseTable method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException {
// Fetch the column/table info once
if (!setupComplete.get()) {
super.setup(context);
}
ProcessSession session = sessionFactory.createSession();
final List<FlowFile> resultSetFlowFiles = new ArrayList<>();
final ComponentLog logger = getLogger();
final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
final DatabaseAdapter dbAdapter = dbAdapters.get(context.getProperty(DB_TYPE).getValue());
final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions().getValue();
final String columnNames = context.getProperty(COLUMN_NAMES).evaluateAttributeExpressions().getValue();
final String maxValueColumnNames = context.getProperty(MAX_VALUE_COLUMN_NAMES).evaluateAttributeExpressions().getValue();
final String customWhereClause = context.getProperty(WHERE_CLAUSE).evaluateAttributeExpressions().getValue();
final Integer fetchSize = context.getProperty(FETCH_SIZE).evaluateAttributeExpressions().asInteger();
final Integer maxRowsPerFlowFile = context.getProperty(MAX_ROWS_PER_FLOW_FILE).evaluateAttributeExpressions().asInteger();
final Integer outputBatchSizeField = context.getProperty(OUTPUT_BATCH_SIZE).evaluateAttributeExpressions().asInteger();
final int outputBatchSize = outputBatchSizeField == null ? 0 : outputBatchSizeField;
final Integer maxFragments = context.getProperty(MAX_FRAGMENTS).isSet() ? context.getProperty(MAX_FRAGMENTS).evaluateAttributeExpressions().asInteger() : 0;
final JdbcCommon.AvroConversionOptions options = JdbcCommon.AvroConversionOptions.builder().recordName(tableName).maxRows(maxRowsPerFlowFile).convertNames(context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean()).useLogicalTypes(context.getProperty(USE_AVRO_LOGICAL_TYPES).asBoolean()).defaultPrecision(context.getProperty(DEFAULT_PRECISION).evaluateAttributeExpressions().asInteger()).defaultScale(context.getProperty(DEFAULT_SCALE).evaluateAttributeExpressions().asInteger()).build();
final StateManager stateManager = context.getStateManager();
final StateMap stateMap;
try {
stateMap = stateManager.getState(Scope.CLUSTER);
} catch (final IOException ioe) {
getLogger().error("Failed to retrieve observed maximum values from the State Manager. Will not perform " + "query until this is accomplished.", ioe);
context.yield();
return;
}
// Make a mutable copy of the current state property map. This will be updated by the result row callback, and eventually
// set as the current state map (after the session has been committed)
final Map<String, String> statePropertyMap = new HashMap<>(stateMap.toMap());
// If an initial max value for column(s) has been specified using properties, and this column is not in the state manager, sync them to the state property map
for (final Map.Entry<String, String> maxProp : maxValueProperties.entrySet()) {
String maxPropKey = maxProp.getKey().toLowerCase();
String fullyQualifiedMaxPropKey = getStateKey(tableName, maxPropKey);
if (!statePropertyMap.containsKey(fullyQualifiedMaxPropKey)) {
String newMaxPropValue;
// but store the new initial max value under the fully-qualified key.
if (statePropertyMap.containsKey(maxPropKey)) {
newMaxPropValue = statePropertyMap.get(maxPropKey);
} else {
newMaxPropValue = maxProp.getValue();
}
statePropertyMap.put(fullyQualifiedMaxPropKey, newMaxPropValue);
}
}
List<String> maxValueColumnNameList = StringUtils.isEmpty(maxValueColumnNames) ? null : Arrays.asList(maxValueColumnNames.split("\\s*,\\s*"));
final String selectQuery = getQuery(dbAdapter, tableName, columnNames, maxValueColumnNameList, customWhereClause, statePropertyMap);
final StopWatch stopWatch = new StopWatch(true);
final String fragmentIdentifier = UUID.randomUUID().toString();
try (final Connection con = dbcpService.getConnection();
final Statement st = con.createStatement()) {
if (fetchSize != null && fetchSize > 0) {
try {
st.setFetchSize(fetchSize);
} catch (SQLException se) {
// Not all drivers support this, just log the error (at debug level) and move on
logger.debug("Cannot set fetch size to {} due to {}", new Object[] { fetchSize, se.getLocalizedMessage() }, se);
}
}
String jdbcURL = "DBCPService";
try {
DatabaseMetaData databaseMetaData = con.getMetaData();
if (databaseMetaData != null) {
jdbcURL = databaseMetaData.getURL();
}
} catch (SQLException se) {
// Ignore and use default JDBC URL. This shouldn't happen unless the driver doesn't implement getMetaData() properly
}
final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).evaluateAttributeExpressions().asTimePeriod(TimeUnit.SECONDS).intValue();
// timeout in seconds
st.setQueryTimeout(queryTimeout);
try {
logger.debug("Executing query {}", new Object[] { selectQuery });
final ResultSet resultSet = st.executeQuery(selectQuery);
int fragmentIndex = 0;
while (true) {
final AtomicLong nrOfRows = new AtomicLong(0L);
FlowFile fileToProcess = session.create();
try {
fileToProcess = session.write(fileToProcess, out -> {
// Max values will be updated in the state property map by the callback
final MaxValueResultSetRowCollector maxValCollector = new MaxValueResultSetRowCollector(tableName, statePropertyMap, dbAdapter);
try {
nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, options, maxValCollector));
} catch (SQLException | RuntimeException e) {
throw new ProcessException("Error during database query or conversion of records to Avro.", e);
}
});
} catch (ProcessException e) {
// Add flowfile to results before rethrowing so it will be removed from session in outer catch
resultSetFlowFiles.add(fileToProcess);
throw e;
}
if (nrOfRows.get() > 0) {
// set attribute how many rows were selected
fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));
fileToProcess = session.putAttribute(fileToProcess, RESULT_TABLENAME, tableName);
fileToProcess = session.putAttribute(fileToProcess, CoreAttributes.MIME_TYPE.key(), JdbcCommon.MIME_TYPE_AVRO_BINARY);
if (maxRowsPerFlowFile > 0) {
fileToProcess = session.putAttribute(fileToProcess, "fragment.identifier", fragmentIdentifier);
fileToProcess = session.putAttribute(fileToProcess, "fragment.index", String.valueOf(fragmentIndex));
}
logger.info("{} contains {} Avro records; transferring to 'success'", new Object[] { fileToProcess, nrOfRows.get() });
session.getProvenanceReporter().receive(fileToProcess, jdbcURL, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
resultSetFlowFiles.add(fileToProcess);
// If we've reached the batch size, send out the flow files
if (outputBatchSize > 0 && resultSetFlowFiles.size() >= outputBatchSize) {
session.transfer(resultSetFlowFiles, REL_SUCCESS);
session.commit();
resultSetFlowFiles.clear();
}
} else {
// If there were no rows returned, don't send the flowfile
session.remove(fileToProcess);
context.yield();
break;
}
fragmentIndex++;
if (maxFragments > 0 && fragmentIndex >= maxFragments) {
break;
}
}
// Even though the maximum value and total count are known at this point, to maintain consistent behavior if Output Batch Size is set, do not store the attributes
if (outputBatchSize == 0) {
for (int i = 0; i < resultSetFlowFiles.size(); i++) {
// Add maximum values as attributes
for (Map.Entry<String, String> entry : statePropertyMap.entrySet()) {
// Get just the column name from the key
String key = entry.getKey();
String colName = key.substring(key.lastIndexOf(NAMESPACE_DELIMITER) + NAMESPACE_DELIMITER.length());
resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "maxvalue." + colName, entry.getValue()));
}
// set count on all FlowFiles
if (maxRowsPerFlowFile > 0) {
resultSetFlowFiles.set(i, session.putAttribute(resultSetFlowFiles.get(i), "fragment.count", Integer.toString(fragmentIndex)));
}
}
}
} catch (final SQLException e) {
throw e;
}
session.transfer(resultSetFlowFiles, REL_SUCCESS);
} catch (final ProcessException | SQLException e) {
logger.error("Unable to execute SQL select query {} due to {}", new Object[] { selectQuery, e });
if (!resultSetFlowFiles.isEmpty()) {
session.remove(resultSetFlowFiles);
}
context.yield();
} finally {
session.commit();
try {
// Update the state
stateManager.setState(statePropertyMap, Scope.CLUSTER);
} catch (IOException ioe) {
getLogger().error("{} failed to update State Manager, maximum observed values will not be recorded", new Object[] { this, ioe });
}
}
}
use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class ReplaceText method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final List<FlowFile> flowFiles = session.get(FlowFileFilters.newSizeBasedFilter(1, DataUnit.MB, 100));
if (flowFiles.isEmpty()) {
return;
}
final ComponentLog logger = getLogger();
final String replacementStrategy = context.getProperty(REPLACEMENT_STRATEGY).getValue();
final Charset charset = Charset.forName(context.getProperty(CHARACTER_SET).getValue());
final int maxBufferSize = context.getProperty(MAX_BUFFER_SIZE).asDataSize(DataUnit.B).intValue();
final String evaluateMode = context.getProperty(EVALUATION_MODE).getValue();
final byte[] buffer;
if (replacementStrategy.equalsIgnoreCase(regexReplaceValue) || replacementStrategy.equalsIgnoreCase(literalReplaceValue)) {
buffer = new byte[maxBufferSize];
} else {
buffer = null;
}
ReplacementStrategyExecutor replacementStrategyExecutor;
switch(replacementStrategy) {
case prependValue:
replacementStrategyExecutor = new PrependReplace();
break;
case appendValue:
replacementStrategyExecutor = new AppendReplace();
break;
case regexReplaceValue:
// for backward compatibility - if replacement regex is ".*" then we will simply always replace the content.
if (context.getProperty(SEARCH_VALUE).getValue().equals(".*")) {
replacementStrategyExecutor = new AlwaysReplace();
} else {
replacementStrategyExecutor = new RegexReplace(buffer, context);
}
break;
case literalReplaceValue:
replacementStrategyExecutor = new LiteralReplace(buffer);
break;
case alwaysReplace:
replacementStrategyExecutor = new AlwaysReplace();
break;
default:
throw new AssertionError();
}
for (FlowFile flowFile : flowFiles) {
if (evaluateMode.equalsIgnoreCase(ENTIRE_TEXT)) {
if (flowFile.getSize() > maxBufferSize && replacementStrategyExecutor.isAllDataBufferedForEntireText()) {
session.transfer(flowFile, REL_FAILURE);
continue;
}
}
final StopWatch stopWatch = new StopWatch(true);
flowFile = replacementStrategyExecutor.replace(flowFile, session, context, evaluateMode, charset, maxBufferSize);
logger.info("Transferred {} to 'success'", new Object[] { flowFile });
session.getProvenanceReporter().modifyContent(flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
session.transfer(flowFile, REL_SUCCESS);
}
}
use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class ReplaceTextWithMapping method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
updateMapping(context);
final List<FlowFile> flowFiles = session.get(5);
if (flowFiles.isEmpty()) {
return;
}
final ComponentLog logger = getLogger();
final int maxBufferSize = context.getProperty(MAX_BUFFER_SIZE).asDataSize(DataUnit.B).intValue();
for (FlowFile flowFile : flowFiles) {
if (flowFile.getSize() > maxBufferSize) {
session.transfer(flowFile, REL_FAILURE);
continue;
}
final StopWatch stopWatch = new StopWatch(true);
flowFile = session.write(flowFile, new ReplaceTextCallback(context, flowFile, maxBufferSize));
logger.info("Transferred {} to 'success'", new Object[] { flowFile });
session.getProvenanceReporter().modifyContent(flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
session.transfer(flowFile, REL_SUCCESS);
}
}
use of org.apache.nifi.logging.ComponentLog in project nifi by apache.
the class RouteOnAttribute method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final ComponentLog logger = getLogger();
final Map<Relationship, PropertyValue> propMap = this.propertyMap;
final Set<Relationship> matchingRelationships = new HashSet<>();
for (final Map.Entry<Relationship, PropertyValue> entry : propMap.entrySet()) {
final PropertyValue value = entry.getValue();
final boolean matches = value.evaluateAttributeExpressions(flowFile).asBoolean();
if (matches) {
matchingRelationships.add(entry.getKey());
}
}
final Set<Relationship> destinationRelationships = new HashSet<>();
switch(context.getProperty(ROUTE_STRATEGY).getValue()) {
case routeAllMatchValue:
if (matchingRelationships.size() == propMap.size()) {
destinationRelationships.add(REL_MATCH);
} else {
destinationRelationships.add(REL_NO_MATCH);
}
break;
case routeAnyMatches:
if (matchingRelationships.isEmpty()) {
destinationRelationships.add(REL_NO_MATCH);
} else {
destinationRelationships.add(REL_MATCH);
}
break;
case routePropertyNameValue:
default:
destinationRelationships.addAll(matchingRelationships);
break;
}
if (destinationRelationships.isEmpty()) {
logger.info("Routing {} to unmatched", new Object[] { flowFile });
flowFile = session.putAttribute(flowFile, ROUTE_ATTRIBUTE_KEY, REL_NO_MATCH.getName());
session.getProvenanceReporter().route(flowFile, REL_NO_MATCH);
session.transfer(flowFile, REL_NO_MATCH);
} else {
final Iterator<Relationship> relationshipNameIterator = destinationRelationships.iterator();
final Relationship firstRelationship = relationshipNameIterator.next();
final Map<Relationship, FlowFile> transferMap = new HashMap<>();
final Set<FlowFile> clones = new HashSet<>();
// make all the clones for any remaining relationships
while (relationshipNameIterator.hasNext()) {
final Relationship relationship = relationshipNameIterator.next();
final FlowFile cloneFlowFile = session.clone(flowFile);
clones.add(cloneFlowFile);
transferMap.put(relationship, cloneFlowFile);
}
// now transfer any clones generated
for (final Map.Entry<Relationship, FlowFile> entry : transferMap.entrySet()) {
logger.info("Cloned {} into {} and routing clone to relationship {}", new Object[] { flowFile, entry.getValue(), entry.getKey() });
FlowFile updatedFlowFile = session.putAttribute(entry.getValue(), ROUTE_ATTRIBUTE_KEY, entry.getKey().getName());
session.getProvenanceReporter().route(updatedFlowFile, entry.getKey());
session.transfer(updatedFlowFile, entry.getKey());
}
// now transfer the original flow file
logger.info("Routing {} to {}", new Object[] { flowFile, firstRelationship });
session.getProvenanceReporter().route(flowFile, firstRelationship);
flowFile = session.putAttribute(flowFile, ROUTE_ATTRIBUTE_KEY, firstRelationship.getName());
session.transfer(flowFile, firstRelationship);
}
}
Aggregations