use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class ResizeImage method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final int width, height;
try {
width = context.getProperty(IMAGE_WIDTH).evaluateAttributeExpressions(flowFile).asInteger();
height = context.getProperty(IMAGE_HEIGHT).evaluateAttributeExpressions(flowFile).asInteger();
} catch (final NumberFormatException nfe) {
getLogger().error("Failed to resize {} due to {}", new Object[] { flowFile, nfe });
session.transfer(flowFile, REL_FAILURE);
return;
}
final String algorithm = context.getProperty(SCALING_ALGORITHM).getValue();
final int hints;
if (algorithm.equalsIgnoreCase(RESIZE_DEFAULT.getValue())) {
hints = Image.SCALE_DEFAULT;
} else if (algorithm.equalsIgnoreCase(RESIZE_FAST.getValue())) {
hints = Image.SCALE_FAST;
} else if (algorithm.equalsIgnoreCase(RESIZE_SMOOTH.getValue())) {
hints = Image.SCALE_SMOOTH;
} else if (algorithm.equalsIgnoreCase(RESIZE_REPLICATE.getValue())) {
hints = Image.SCALE_REPLICATE;
} else if (algorithm.equalsIgnoreCase(RESIZE_AREA_AVERAGING.getValue())) {
hints = Image.SCALE_AREA_AVERAGING;
} else {
throw new AssertionError("Invalid Scaling Algorithm: " + algorithm);
}
final StopWatch stopWatch = new StopWatch(true);
try {
flowFile = session.write(flowFile, new StreamCallback() {
@Override
public void process(final InputStream rawIn, final OutputStream out) throws IOException {
try (final BufferedInputStream in = new BufferedInputStream(rawIn)) {
final ImageInputStream iis = ImageIO.createImageInputStream(in);
if (iis == null) {
throw new ProcessException("FlowFile is not in a valid format");
}
final Iterator<ImageReader> readers = ImageIO.getImageReaders(iis);
if (!readers.hasNext()) {
throw new ProcessException("FlowFile is not in a valid format");
}
final ImageReader reader = readers.next();
final String formatName = reader.getFormatName();
reader.setInput(iis, true);
final BufferedImage image = reader.read(0);
final Image scaledImage = image.getScaledInstance(width, height, hints);
final BufferedImage scaledBufferedImg;
if (scaledImage instanceof BufferedImage) {
scaledBufferedImg = (BufferedImage) scaledImage;
} else {
scaledBufferedImg = new BufferedImage(scaledImage.getWidth(null), scaledImage.getHeight(null), image.getType());
final Graphics2D graphics = scaledBufferedImg.createGraphics();
try {
graphics.drawImage(scaledImage, 0, 0, null);
} finally {
graphics.dispose();
}
}
ImageIO.write(scaledBufferedImg, formatName, out);
}
}
});
session.getProvenanceReporter().modifyContent(flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
session.transfer(flowFile, REL_SUCCESS);
} catch (final ProcessException pe) {
getLogger().error("Failed to resize {} due to {}", new Object[] { flowFile, pe });
session.transfer(flowFile, REL_FAILURE);
}
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class PutSyslog method onTrigger.
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
final String protocol = context.getProperty(PROTOCOL).getValue();
final int batchSize = context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger();
final List<FlowFile> flowFiles = session.get(batchSize);
if (flowFiles == null || flowFiles.isEmpty()) {
final PruneResult result = pruneIdleSenders(context.getProperty(IDLE_EXPIRATION).evaluateAttributeExpressions().asTimePeriod(TimeUnit.MILLISECONDS).longValue());
// yield if we closed an idle connection, or if there were no connections in the first place
if (result.getNumClosed() > 0 || (result.getNumClosed() == 0 && result.getNumConsidered() == 0)) {
context.yield();
}
return;
}
// get a sender from the pool, or create a new one if the pool is empty
// if we can't create a new connection then route flow files to failure and yield
ChannelSender sender = senderPool.poll();
if (sender == null) {
try {
getLogger().debug("No available connections, creating a new one...");
sender = createSender(context);
} catch (IOException e) {
for (final FlowFile flowFile : flowFiles) {
getLogger().error("No available connections, and unable to create a new one, transferring {} to failure", new Object[] { flowFile }, e);
session.transfer(flowFile, REL_FAILURE);
}
context.yield();
return;
}
}
final String port = context.getProperty(PORT).evaluateAttributeExpressions().getValue();
final String host = context.getProperty(HOSTNAME).evaluateAttributeExpressions().getValue();
final String transitUri = new StringBuilder().append(protocol).append("://").append(host).append(":").append(port).toString();
final AtomicReference<IOException> exceptionHolder = new AtomicReference<>(null);
final Charset charSet = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions().getValue());
try {
for (FlowFile flowFile : flowFiles) {
final StopWatch timer = new StopWatch(true);
final String priority = context.getProperty(MSG_PRIORITY).evaluateAttributeExpressions(flowFile).getValue();
final String version = context.getProperty(MSG_VERSION).evaluateAttributeExpressions(flowFile).getValue();
final String timestamp = context.getProperty(MSG_TIMESTAMP).evaluateAttributeExpressions(flowFile).getValue();
final String hostname = context.getProperty(MSG_HOSTNAME).evaluateAttributeExpressions(flowFile).getValue();
final String body = context.getProperty(MSG_BODY).evaluateAttributeExpressions(flowFile).getValue();
final StringBuilder messageBuilder = new StringBuilder();
messageBuilder.append("<").append(priority).append(">");
if (version != null) {
messageBuilder.append(version).append(" ");
}
messageBuilder.append(timestamp).append(" ").append(hostname).append(" ").append(body);
final String fullMessage = messageBuilder.toString();
getLogger().debug(fullMessage);
if (isValid(fullMessage)) {
try {
// now that we validated, add a new line if doing TCP
if (protocol.equals(TCP_VALUE.getValue())) {
messageBuilder.append('\n');
}
sender.send(messageBuilder.toString(), charSet);
timer.stop();
final long duration = timer.getDuration(TimeUnit.MILLISECONDS);
session.getProvenanceReporter().send(flowFile, transitUri, duration, true);
getLogger().info("Transferring {} to success", new Object[] { flowFile });
session.transfer(flowFile, REL_SUCCESS);
} catch (IOException e) {
getLogger().error("Transferring {} to failure", new Object[] { flowFile }, e);
session.transfer(flowFile, REL_FAILURE);
exceptionHolder.set(e);
}
} else {
getLogger().info("Transferring {} to invalid", new Object[] { flowFile });
session.transfer(flowFile, REL_INVALID);
}
}
} finally {
// if the connection is still open and no IO errors happened then try to return, if pool is full then close
if (sender.isConnected() && exceptionHolder.get() == null) {
boolean returned = senderPool.offer(sender);
if (!returned) {
sender.close();
}
} else {
// probably already closed here, but quietly close anyway to be safe
sender.close();
}
}
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class PutUDP method onTrigger.
/**
* event handler method to handle the FlowFile being forwarded to the Processor by the framework. The FlowFile contents is sent out as a UDP datagram using an acquired ChannelSender object. If the
* FlowFile contents was sent out successfully then the FlowFile is forwarded to the success relationship. If an error occurred then the FlowFile is forwarded to the failure relationship.
*
* @param context
* - the current process context.
*
* @param sessionFactory
* - a factory object to obtain a process session.
*/
@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException {
final ProcessSession session = sessionFactory.createSession();
final FlowFile flowFile = session.get();
if (flowFile == null) {
final PruneResult result = pruneIdleSenders(context.getProperty(IDLE_EXPIRATION).asTimePeriod(TimeUnit.MILLISECONDS).longValue());
// yield if we closed an idle connection, or if there were no connections in the first place
if (result.getNumClosed() > 0 || (result.getNumClosed() == 0 && result.getNumConsidered() == 0)) {
context.yield();
}
return;
}
ChannelSender sender = acquireSender(context, session, flowFile);
if (sender == null) {
return;
}
try {
byte[] content = readContent(session, flowFile);
StopWatch stopWatch = new StopWatch(true);
sender.send(content);
session.getProvenanceReporter().send(flowFile, transitUri, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
session.transfer(flowFile, REL_SUCCESS);
session.commit();
} catch (Exception e) {
getLogger().error("Exception while handling a process session, transferring {} to failure.", new Object[] { flowFile }, e);
onFailure(context, session, flowFile);
} finally {
relinquishSender(sender);
}
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class QueryRecord method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final FlowFile original = session.get();
if (original == null) {
return;
}
final StopWatch stopWatch = new StopWatch(true);
final RecordSetWriterFactory recordSetWriterFactory = context.getProperty(RECORD_WRITER_FACTORY).asControllerService(RecordSetWriterFactory.class);
final RecordReaderFactory recordReaderFactory = context.getProperty(RECORD_READER_FACTORY).asControllerService(RecordReaderFactory.class);
final Map<FlowFile, Relationship> transformedFlowFiles = new HashMap<>();
final Set<FlowFile> createdFlowFiles = new HashSet<>();
// Determine the Record Reader's schema
final RecordSchema readerSchema;
try (final InputStream rawIn = session.read(original)) {
final Map<String, String> originalAttributes = original.getAttributes();
final RecordReader reader = recordReaderFactory.createRecordReader(originalAttributes, rawIn, getLogger());
final RecordSchema inputSchema = reader.getSchema();
readerSchema = recordSetWriterFactory.getSchema(originalAttributes, inputSchema);
} catch (final Exception e) {
getLogger().error("Failed to determine Record Schema from {}; routing to failure", new Object[] { original, e });
session.transfer(original, REL_FAILURE);
return;
}
// Determine the schema for writing the data
final Map<String, String> originalAttributes = original.getAttributes();
int recordsRead = 0;
try {
for (final PropertyDescriptor descriptor : context.getProperties().keySet()) {
if (!descriptor.isDynamic()) {
continue;
}
final Relationship relationship = new Relationship.Builder().name(descriptor.getName()).build();
// We have to fork a child because we may need to read the input FlowFile more than once,
// and we cannot call session.read() on the original FlowFile while we are within a write
// callback for the original FlowFile.
FlowFile transformed = session.create(original);
boolean flowFileRemoved = false;
try {
final String sql = context.getProperty(descriptor).evaluateAttributeExpressions(original).getValue();
final AtomicReference<WriteResult> writeResultRef = new AtomicReference<>();
final QueryResult queryResult;
if (context.getProperty(CACHE_SCHEMA).asBoolean()) {
queryResult = queryWithCache(session, original, sql, context, recordReaderFactory);
} else {
queryResult = query(session, original, sql, context, recordReaderFactory);
}
final AtomicReference<String> mimeTypeRef = new AtomicReference<>();
try {
final ResultSet rs = queryResult.getResultSet();
transformed = session.write(transformed, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
final ResultSetRecordSet recordSet;
final RecordSchema writeSchema;
try {
recordSet = new ResultSetRecordSet(rs, readerSchema);
final RecordSchema resultSetSchema = recordSet.getSchema();
writeSchema = recordSetWriterFactory.getSchema(originalAttributes, resultSetSchema);
} catch (final SQLException | SchemaNotFoundException e) {
throw new ProcessException(e);
}
try (final RecordSetWriter resultSetWriter = recordSetWriterFactory.createWriter(getLogger(), writeSchema, out)) {
writeResultRef.set(resultSetWriter.write(recordSet));
mimeTypeRef.set(resultSetWriter.getMimeType());
} catch (final Exception e) {
throw new IOException(e);
}
}
});
} finally {
closeQuietly(queryResult);
}
recordsRead = Math.max(recordsRead, queryResult.getRecordsRead());
final WriteResult result = writeResultRef.get();
if (result.getRecordCount() == 0 && !context.getProperty(INCLUDE_ZERO_RECORD_FLOWFILES).asBoolean()) {
session.remove(transformed);
flowFileRemoved = true;
transformedFlowFiles.remove(transformed);
getLogger().info("Transformed {} but the result contained no data so will not pass on a FlowFile", new Object[] { original });
} else {
final Map<String, String> attributesToAdd = new HashMap<>();
if (result.getAttributes() != null) {
attributesToAdd.putAll(result.getAttributes());
}
attributesToAdd.put(CoreAttributes.MIME_TYPE.key(), mimeTypeRef.get());
attributesToAdd.put("record.count", String.valueOf(result.getRecordCount()));
transformed = session.putAllAttributes(transformed, attributesToAdd);
transformedFlowFiles.put(transformed, relationship);
session.adjustCounter("Records Written", result.getRecordCount(), false);
}
} finally {
// Ensure that we have the FlowFile in the set in case we throw any Exception
if (!flowFileRemoved) {
createdFlowFiles.add(transformed);
}
}
}
final long elapsedMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS);
if (transformedFlowFiles.size() > 0) {
session.getProvenanceReporter().fork(original, transformedFlowFiles.keySet(), elapsedMillis);
for (final Map.Entry<FlowFile, Relationship> entry : transformedFlowFiles.entrySet()) {
final FlowFile transformed = entry.getKey();
final Relationship relationship = entry.getValue();
session.getProvenanceReporter().route(transformed, relationship);
session.transfer(transformed, relationship);
}
}
getLogger().info("Successfully queried {} in {} millis", new Object[] { original, elapsedMillis });
session.transfer(original, REL_ORIGINAL);
} catch (final SQLException e) {
getLogger().error("Unable to query {} due to {}", new Object[] { original, e.getCause() == null ? e : e.getCause() });
session.remove(createdFlowFiles);
session.transfer(original, REL_FAILURE);
} catch (final Exception e) {
getLogger().error("Unable to query {} due to {}", new Object[] { original, e });
session.remove(createdFlowFiles);
session.transfer(original, REL_FAILURE);
}
session.adjustCounter("Records Read", recordsRead, false);
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class PutAzureEventHub method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final StopWatch stopWatch = new StopWatch(true);
final byte[] buffer = new byte[(int) flowFile.getSize()];
session.read(flowFile, in -> StreamUtils.fillBuffer(in, buffer));
try {
sendMessage(buffer);
} catch (final ProcessException processException) {
getLogger().error("Failed to send {} to EventHub due to {}; routing to failure", new Object[] { flowFile, processException }, processException);
session.transfer(session.penalize(flowFile), REL_FAILURE);
return;
}
final String namespace = context.getProperty(NAMESPACE).getValue();
final String eventHubName = context.getProperty(EVENT_HUB_NAME).getValue();
session.getProvenanceReporter().send(flowFile, "amqps://" + namespace + ".servicebus.windows.net" + "/" + eventHubName, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
session.transfer(flowFile, REL_SUCCESS);
}
Aggregations