use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class ExtractCCDAAttributes method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) throws IOException {
getLogger().debug("Loading packages");
final StopWatch stopWatch = new StopWatch(true);
// Load required MDHT packages
System.setProperty("org.eclipse.emf.ecore.EPackage.Registry.INSTANCE", "org.eclipse.emf.ecore.impl.EPackageRegistryImpl");
CDAPackage.eINSTANCE.eClass();
HITSPPackage.eINSTANCE.eClass();
CCDPackage.eINSTANCE.eClass();
ConsolPackage.eINSTANCE.eClass();
IHEPackage.eINSTANCE.eClass();
stopWatch.stop();
getLogger().debug("Loaded packages in {}", new Object[] { stopWatch.getDuration(TimeUnit.MILLISECONDS) });
// Initialize JEXL
jexl = new JexlBuilder().cache(1024).debug(false).silent(true).strict(false).create();
jexlCtx = new MapContext();
getLogger().debug("Loading mappings");
// Load CDA mappings for parser
loadMappings();
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class ExtractCCDAAttributes method processElement.
/**
* Process elements children based on the parser mapping.
* Any String values are added to attributes
* For List, the processList method is called to iterate and process
* For an Object this method is called recursively
* While adding to the attributes the key is prefixed by parent
* @param parent parent key for this element, used as a prefix for attribute key
* @param element element to be processed
* @param attributes map of attributes to populate
* @return map of processed data, value can contain String or Map of Strings
*/
protected Map<String, Object> processElement(String parent, Object element, Map<String, String> attributes) {
final StopWatch stopWatch = new StopWatch(true);
Map<String, Object> map = new LinkedHashMap<String, Object>();
String name = element.getClass().getName();
// get JEXL mappings for this element
Map<String, String> jexlMap = processMap.get(name);
if (jexlMap == null) {
getLogger().warn("Missing mapping for element " + name);
return null;
}
for (Entry<String, String> entry : jexlMap.entrySet()) {
// evaluate JEXL for each child element
jexlCtx.set("element", element);
JexlExpression jexlExpr = jexl.createExpression(entry.getValue());
Object value = jexlExpr.evaluate(jexlCtx);
String key = entry.getKey();
String prefix = parent != null ? parent + "." + key : key;
addElement(map, prefix, key, value, attributes);
}
stopWatch.stop();
getLogger().debug("Processed {} in {}", new Object[] { name, stopWatch.getDuration(TimeUnit.MILLISECONDS) });
return map;
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class AbstractHeartbeatMonitor method monitorHeartbeats.
/**
* Fetches all of the latest heartbeats and updates the Cluster Coordinator
* as appropriate, based on the heartbeats received.
*
* Visible for testing.
*/
protected synchronized void monitorHeartbeats() {
final NodeIdentifier activeCoordinator = clusterCoordinator.getElectedActiveCoordinatorNode();
if (activeCoordinator != null && !activeCoordinator.equals(clusterCoordinator.getLocalNodeIdentifier())) {
// Occasionally Curator appears to not notify us that we have lost the elected leader role, or does so
// on a very large delay. So before we kick the node out of the cluster, we want to first check what the
// ZNode in ZooKeeper says, and ensure that this is the node that is being advertised as the appropriate
// destination for heartbeats.
logger.debug("It appears that this node is no longer the actively elected cluster coordinator. Will not request that node disconnect.");
return;
}
final Map<NodeIdentifier, NodeHeartbeat> latestHeartbeats = getLatestHeartbeats();
if (latestHeartbeats == null || latestHeartbeats.isEmpty()) {
logger.debug("Received no new heartbeats. Will not disconnect any nodes due to lack of heartbeat");
return;
}
final StopWatch procStopWatch = new StopWatch(true);
for (final NodeHeartbeat heartbeat : latestHeartbeats.values()) {
try {
processHeartbeat(heartbeat);
} catch (final Exception e) {
clusterCoordinator.reportEvent(null, Severity.ERROR, "Received heartbeat from " + heartbeat.getNodeIdentifier() + " but failed to process heartbeat due to " + e);
logger.error("Failed to process heartbeat from {} due to {}", heartbeat.getNodeIdentifier(), e.toString());
logger.error("", e);
}
}
procStopWatch.stop();
logger.info("Finished processing {} heartbeats in {}", latestHeartbeats.size(), procStopWatch.getDuration());
// Disconnect any node that hasn't sent a heartbeat in a long time (8 times the heartbeat interval)
final long maxMillis = heartbeatIntervalMillis * 8;
final long currentTimestamp = System.currentTimeMillis();
final long threshold = currentTimestamp - maxMillis;
// consider all connected nodes
for (final NodeIdentifier nodeIdentifier : clusterCoordinator.getNodeIdentifiers(NodeConnectionState.CONNECTED)) {
final NodeHeartbeat heartbeat = latestHeartbeats.get(nodeIdentifier);
// consider the most recent heartbeat for this node
if (heartbeat == null) {
final long purgeTimestamp = getPurgeTimestamp();
// if there is no heartbeat for this node, see if we purged the heartbeats beyond the allowed heartbeat threshold
if (purgeTimestamp < threshold) {
final long secondsSinceLastPurge = TimeUnit.MILLISECONDS.toSeconds(currentTimestamp - purgeTimestamp);
clusterCoordinator.disconnectionRequestedByNode(nodeIdentifier, DisconnectionCode.LACK_OF_HEARTBEAT, "Have not received a heartbeat from node in " + secondsSinceLastPurge + " seconds");
}
} else {
// see if the heartbeat occurred before the allowed heartbeat threshold
if (heartbeat.getTimestamp() < threshold) {
final long secondsSinceLastHeartbeat = TimeUnit.MILLISECONDS.toSeconds(currentTimestamp - heartbeat.getTimestamp());
clusterCoordinator.disconnectionRequestedByNode(nodeIdentifier, DisconnectionCode.LACK_OF_HEARTBEAT, "Have not received a heartbeat from node in " + secondsSinceLastHeartbeat + " seconds");
try {
removeHeartbeat(nodeIdentifier);
} catch (final Exception e) {
logger.warn("Failed to remove heartbeat for {} due to {}", nodeIdentifier, e.toString());
logger.warn("", e);
}
}
}
}
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class SmtpConsumer method data.
@Override
public void data(final InputStream data) throws RejectException, TooMuchDataException, IOException {
final ProcessSession processSession = sessionFactory.createSession();
final StopWatch watch = new StopWatch();
watch.start();
try {
FlowFile flowFile = processSession.create();
final AtomicBoolean limitExceeded = new AtomicBoolean(false);
flowFile = processSession.write(flowFile, (OutputStream out) -> {
final LimitingInputStream lis = new LimitingInputStream(data, maxMessageSize);
IOUtils.copy(lis, out);
if (lis.hasReachedLimit()) {
limitExceeded.set(true);
}
});
if (limitExceeded.get()) {
throw new TooMuchDataException("Maximum message size limit reached - client must send smaller messages");
}
flowFile = processSession.putAllAttributes(flowFile, extractMessageAttributes());
watch.stop();
processSession.getProvenanceReporter().receive(flowFile, "smtp://" + host + ":" + port + "/", watch.getDuration(TimeUnit.MILLISECONDS));
processSession.transfer(flowFile, ListenSMTP.REL_SUCCESS);
processSession.commit();
} catch (FlowFileAccessException | IllegalStateException | RejectException | IOException ex) {
log.error("Unable to fully process input due to " + ex.getMessage(), ex);
throw ex;
} finally {
// make sure this happens no matter what - is safe
processSession.rollback();
}
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class JmsConsumer method consume.
public void consume(final ProcessContext context, final ProcessSession session, final WrappedMessageConsumer wrappedConsumer) throws ProcessException {
final ComponentLog logger = getLogger();
final MessageConsumer consumer = wrappedConsumer.getConsumer();
final boolean clientAcknowledge = context.getProperty(ACKNOWLEDGEMENT_MODE).getValue().equalsIgnoreCase(ACK_MODE_CLIENT);
final long timeout = context.getProperty(TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS);
final boolean addAttributes = context.getProperty(JMS_PROPS_TO_ATTRIBUTES).asBoolean();
final int batchSize = context.getProperty(BATCH_SIZE).asInteger();
final JmsProcessingSummary processingSummary = new JmsProcessingSummary();
final StopWatch stopWatch = new StopWatch(true);
for (int i = 0; i < batchSize; i++) {
final Message message;
try {
// all messages that are immediately available.
if (processingSummary.getMessagesReceived() == 0) {
message = consumer.receive(timeout);
} else {
message = consumer.receiveNoWait();
}
} catch (final JMSException e) {
logger.error("Failed to receive JMS Message due to {}", e);
wrappedConsumer.close(logger);
break;
}
if (message == null) {
// if no messages, we're done
break;
}
try {
processingSummary.add(map2FlowFile(context, session, message, addAttributes, logger));
} catch (Exception e) {
logger.error("Failed to receive JMS Message due to {}", e);
wrappedConsumer.close(logger);
break;
}
}
if (processingSummary.getFlowFilesCreated() == 0) {
context.yield();
return;
}
session.commit();
stopWatch.stop();
if (processingSummary.getFlowFilesCreated() > 0) {
final float secs = (stopWatch.getDuration(TimeUnit.MILLISECONDS) / 1000F);
float messagesPerSec = (processingSummary.getMessagesReceived()) / secs;
final String dataRate = stopWatch.calculateDataRate(processingSummary.getBytesReceived());
logger.info("Received {} messages in {} milliseconds, at a rate of {} messages/sec or {}", new Object[] { processingSummary.getMessagesReceived(), stopWatch.getDuration(TimeUnit.MILLISECONDS), messagesPerSec, dataRate });
}
// if we need to acknowledge the messages, do so now.
final Message lastMessage = processingSummary.getLastMessageReceived();
if (clientAcknowledge && lastMessage != null) {
try {
// acknowledge all received messages by acknowledging only the last.
lastMessage.acknowledge();
} catch (final JMSException e) {
logger.error("Failed to acknowledge {} JMS Message(s). This may result in duplicate messages. Reason for failure: {}", new Object[] { processingSummary.getMessagesReceived(), e });
}
}
}
Aggregations