Search in sources :

Example 61 with StopWatch

use of org.apache.nifi.util.StopWatch in project nifi by apache.

the class ResizeImage method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }
    final int width, height;
    try {
        width = context.getProperty(IMAGE_WIDTH).evaluateAttributeExpressions(flowFile).asInteger();
        height = context.getProperty(IMAGE_HEIGHT).evaluateAttributeExpressions(flowFile).asInteger();
    } catch (final NumberFormatException nfe) {
        getLogger().error("Failed to resize {} due to {}", new Object[] { flowFile, nfe });
        session.transfer(flowFile, REL_FAILURE);
        return;
    }
    final String algorithm = context.getProperty(SCALING_ALGORITHM).getValue();
    final int hints;
    if (algorithm.equalsIgnoreCase(RESIZE_DEFAULT.getValue())) {
        hints = Image.SCALE_DEFAULT;
    } else if (algorithm.equalsIgnoreCase(RESIZE_FAST.getValue())) {
        hints = Image.SCALE_FAST;
    } else if (algorithm.equalsIgnoreCase(RESIZE_SMOOTH.getValue())) {
        hints = Image.SCALE_SMOOTH;
    } else if (algorithm.equalsIgnoreCase(RESIZE_REPLICATE.getValue())) {
        hints = Image.SCALE_REPLICATE;
    } else if (algorithm.equalsIgnoreCase(RESIZE_AREA_AVERAGING.getValue())) {
        hints = Image.SCALE_AREA_AVERAGING;
    } else {
        throw new AssertionError("Invalid Scaling Algorithm: " + algorithm);
    }
    final StopWatch stopWatch = new StopWatch(true);
    try {
        flowFile = session.write(flowFile, new StreamCallback() {

            @Override
            public void process(final InputStream rawIn, final OutputStream out) throws IOException {
                try (final BufferedInputStream in = new BufferedInputStream(rawIn)) {
                    final ImageInputStream iis = ImageIO.createImageInputStream(in);
                    if (iis == null) {
                        throw new ProcessException("FlowFile is not in a valid format");
                    }
                    final Iterator<ImageReader> readers = ImageIO.getImageReaders(iis);
                    if (!readers.hasNext()) {
                        throw new ProcessException("FlowFile is not in a valid format");
                    }
                    final ImageReader reader = readers.next();
                    final String formatName = reader.getFormatName();
                    reader.setInput(iis, true);
                    final BufferedImage image = reader.read(0);
                    final Image scaledImage = image.getScaledInstance(width, height, hints);
                    final BufferedImage scaledBufferedImg;
                    if (scaledImage instanceof BufferedImage) {
                        scaledBufferedImg = (BufferedImage) scaledImage;
                    } else {
                        scaledBufferedImg = new BufferedImage(scaledImage.getWidth(null), scaledImage.getHeight(null), image.getType());
                        final Graphics2D graphics = scaledBufferedImg.createGraphics();
                        try {
                            graphics.drawImage(scaledImage, 0, 0, null);
                        } finally {
                            graphics.dispose();
                        }
                    }
                    ImageIO.write(scaledBufferedImg, formatName, out);
                }
            }
        });
        session.getProvenanceReporter().modifyContent(flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(flowFile, REL_SUCCESS);
    } catch (final ProcessException pe) {
        getLogger().error("Failed to resize {} due to {}", new Object[] { flowFile, pe });
        session.transfer(flowFile, REL_FAILURE);
    }
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) BufferedInputStream(java.io.BufferedInputStream) ImageInputStream(javax.imageio.stream.ImageInputStream) InputStream(java.io.InputStream) OutputStream(java.io.OutputStream) ImageInputStream(javax.imageio.stream.ImageInputStream) Image(java.awt.Image) BufferedImage(java.awt.image.BufferedImage) StreamCallback(org.apache.nifi.processor.io.StreamCallback) BufferedImage(java.awt.image.BufferedImage) StopWatch(org.apache.nifi.util.StopWatch) Graphics2D(java.awt.Graphics2D) ProcessException(org.apache.nifi.processor.exception.ProcessException) BufferedInputStream(java.io.BufferedInputStream) ImageReader(javax.imageio.ImageReader)

Example 62 with StopWatch

use of org.apache.nifi.util.StopWatch in project nifi by apache.

the class PutSyslog method onTrigger.

@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    final String protocol = context.getProperty(PROTOCOL).getValue();
    final int batchSize = context.getProperty(BATCH_SIZE).evaluateAttributeExpressions().asInteger();
    final List<FlowFile> flowFiles = session.get(batchSize);
    if (flowFiles == null || flowFiles.isEmpty()) {
        final PruneResult result = pruneIdleSenders(context.getProperty(IDLE_EXPIRATION).evaluateAttributeExpressions().asTimePeriod(TimeUnit.MILLISECONDS).longValue());
        // yield if we closed an idle connection, or if there were no connections in the first place
        if (result.getNumClosed() > 0 || (result.getNumClosed() == 0 && result.getNumConsidered() == 0)) {
            context.yield();
        }
        return;
    }
    // get a sender from the pool, or create a new one if the pool is empty
    // if we can't create a new connection then route flow files to failure and yield
    ChannelSender sender = senderPool.poll();
    if (sender == null) {
        try {
            getLogger().debug("No available connections, creating a new one...");
            sender = createSender(context);
        } catch (IOException e) {
            for (final FlowFile flowFile : flowFiles) {
                getLogger().error("No available connections, and unable to create a new one, transferring {} to failure", new Object[] { flowFile }, e);
                session.transfer(flowFile, REL_FAILURE);
            }
            context.yield();
            return;
        }
    }
    final String port = context.getProperty(PORT).evaluateAttributeExpressions().getValue();
    final String host = context.getProperty(HOSTNAME).evaluateAttributeExpressions().getValue();
    final String transitUri = new StringBuilder().append(protocol).append("://").append(host).append(":").append(port).toString();
    final AtomicReference<IOException> exceptionHolder = new AtomicReference<>(null);
    final Charset charSet = Charset.forName(context.getProperty(CHARSET).evaluateAttributeExpressions().getValue());
    try {
        for (FlowFile flowFile : flowFiles) {
            final StopWatch timer = new StopWatch(true);
            final String priority = context.getProperty(MSG_PRIORITY).evaluateAttributeExpressions(flowFile).getValue();
            final String version = context.getProperty(MSG_VERSION).evaluateAttributeExpressions(flowFile).getValue();
            final String timestamp = context.getProperty(MSG_TIMESTAMP).evaluateAttributeExpressions(flowFile).getValue();
            final String hostname = context.getProperty(MSG_HOSTNAME).evaluateAttributeExpressions(flowFile).getValue();
            final String body = context.getProperty(MSG_BODY).evaluateAttributeExpressions(flowFile).getValue();
            final StringBuilder messageBuilder = new StringBuilder();
            messageBuilder.append("<").append(priority).append(">");
            if (version != null) {
                messageBuilder.append(version).append(" ");
            }
            messageBuilder.append(timestamp).append(" ").append(hostname).append(" ").append(body);
            final String fullMessage = messageBuilder.toString();
            getLogger().debug(fullMessage);
            if (isValid(fullMessage)) {
                try {
                    // now that we validated, add a new line if doing TCP
                    if (protocol.equals(TCP_VALUE.getValue())) {
                        messageBuilder.append('\n');
                    }
                    sender.send(messageBuilder.toString(), charSet);
                    timer.stop();
                    final long duration = timer.getDuration(TimeUnit.MILLISECONDS);
                    session.getProvenanceReporter().send(flowFile, transitUri, duration, true);
                    getLogger().info("Transferring {} to success", new Object[] { flowFile });
                    session.transfer(flowFile, REL_SUCCESS);
                } catch (IOException e) {
                    getLogger().error("Transferring {} to failure", new Object[] { flowFile }, e);
                    session.transfer(flowFile, REL_FAILURE);
                    exceptionHolder.set(e);
                }
            } else {
                getLogger().info("Transferring {} to invalid", new Object[] { flowFile });
                session.transfer(flowFile, REL_INVALID);
            }
        }
    } finally {
        // if the connection is still open and no IO errors happened then try to return, if pool is full then close
        if (sender.isConnected() && exceptionHolder.get() == null) {
            boolean returned = senderPool.offer(sender);
            if (!returned) {
                sender.close();
            }
        } else {
            // probably already closed here, but quietly close anyway to be safe
            sender.close();
        }
    }
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) DatagramChannelSender(org.apache.nifi.processor.util.put.sender.DatagramChannelSender) SSLSocketChannelSender(org.apache.nifi.processor.util.put.sender.SSLSocketChannelSender) ChannelSender(org.apache.nifi.processor.util.put.sender.ChannelSender) SocketChannelSender(org.apache.nifi.processor.util.put.sender.SocketChannelSender) Charset(java.nio.charset.Charset) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) StopWatch(org.apache.nifi.util.StopWatch)

Example 63 with StopWatch

use of org.apache.nifi.util.StopWatch in project nifi by apache.

the class PutUDP method onTrigger.

/**
 * event handler method to handle the FlowFile being forwarded to the Processor by the framework. The FlowFile contents is sent out as a UDP datagram using an acquired ChannelSender object. If the
 * FlowFile contents was sent out successfully then the FlowFile is forwarded to the success relationship. If an error occurred then the FlowFile is forwarded to the failure relationship.
 *
 * @param context
 *            - the current process context.
 *
 * @param sessionFactory
 *            - a factory object to obtain a process session.
 */
@Override
public void onTrigger(final ProcessContext context, final ProcessSessionFactory sessionFactory) throws ProcessException {
    final ProcessSession session = sessionFactory.createSession();
    final FlowFile flowFile = session.get();
    if (flowFile == null) {
        final PruneResult result = pruneIdleSenders(context.getProperty(IDLE_EXPIRATION).asTimePeriod(TimeUnit.MILLISECONDS).longValue());
        // yield if we closed an idle connection, or if there were no connections in the first place
        if (result.getNumClosed() > 0 || (result.getNumClosed() == 0 && result.getNumConsidered() == 0)) {
            context.yield();
        }
        return;
    }
    ChannelSender sender = acquireSender(context, session, flowFile);
    if (sender == null) {
        return;
    }
    try {
        byte[] content = readContent(session, flowFile);
        StopWatch stopWatch = new StopWatch(true);
        sender.send(content);
        session.getProvenanceReporter().send(flowFile, transitUri, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(flowFile, REL_SUCCESS);
        session.commit();
    } catch (Exception e) {
        getLogger().error("Exception while handling a process session, transferring {} to failure.", new Object[] { flowFile }, e);
        onFailure(context, session, flowFile);
    } finally {
        relinquishSender(sender);
    }
}
Also used : ProcessSession(org.apache.nifi.processor.ProcessSession) FlowFile(org.apache.nifi.flowfile.FlowFile) ChannelSender(org.apache.nifi.processor.util.put.sender.ChannelSender) IOException(java.io.IOException) ProcessException(org.apache.nifi.processor.exception.ProcessException) StopWatch(org.apache.nifi.util.StopWatch)

Example 64 with StopWatch

use of org.apache.nifi.util.StopWatch in project nifi by apache.

the class QueryRecord method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    final FlowFile original = session.get();
    if (original == null) {
        return;
    }
    final StopWatch stopWatch = new StopWatch(true);
    final RecordSetWriterFactory recordSetWriterFactory = context.getProperty(RECORD_WRITER_FACTORY).asControllerService(RecordSetWriterFactory.class);
    final RecordReaderFactory recordReaderFactory = context.getProperty(RECORD_READER_FACTORY).asControllerService(RecordReaderFactory.class);
    final Map<FlowFile, Relationship> transformedFlowFiles = new HashMap<>();
    final Set<FlowFile> createdFlowFiles = new HashSet<>();
    // Determine the Record Reader's schema
    final RecordSchema readerSchema;
    try (final InputStream rawIn = session.read(original)) {
        final Map<String, String> originalAttributes = original.getAttributes();
        final RecordReader reader = recordReaderFactory.createRecordReader(originalAttributes, rawIn, getLogger());
        final RecordSchema inputSchema = reader.getSchema();
        readerSchema = recordSetWriterFactory.getSchema(originalAttributes, inputSchema);
    } catch (final Exception e) {
        getLogger().error("Failed to determine Record Schema from {}; routing to failure", new Object[] { original, e });
        session.transfer(original, REL_FAILURE);
        return;
    }
    // Determine the schema for writing the data
    final Map<String, String> originalAttributes = original.getAttributes();
    int recordsRead = 0;
    try {
        for (final PropertyDescriptor descriptor : context.getProperties().keySet()) {
            if (!descriptor.isDynamic()) {
                continue;
            }
            final Relationship relationship = new Relationship.Builder().name(descriptor.getName()).build();
            // We have to fork a child because we may need to read the input FlowFile more than once,
            // and we cannot call session.read() on the original FlowFile while we are within a write
            // callback for the original FlowFile.
            FlowFile transformed = session.create(original);
            boolean flowFileRemoved = false;
            try {
                final String sql = context.getProperty(descriptor).evaluateAttributeExpressions(original).getValue();
                final AtomicReference<WriteResult> writeResultRef = new AtomicReference<>();
                final QueryResult queryResult;
                if (context.getProperty(CACHE_SCHEMA).asBoolean()) {
                    queryResult = queryWithCache(session, original, sql, context, recordReaderFactory);
                } else {
                    queryResult = query(session, original, sql, context, recordReaderFactory);
                }
                final AtomicReference<String> mimeTypeRef = new AtomicReference<>();
                try {
                    final ResultSet rs = queryResult.getResultSet();
                    transformed = session.write(transformed, new OutputStreamCallback() {

                        @Override
                        public void process(final OutputStream out) throws IOException {
                            final ResultSetRecordSet recordSet;
                            final RecordSchema writeSchema;
                            try {
                                recordSet = new ResultSetRecordSet(rs, readerSchema);
                                final RecordSchema resultSetSchema = recordSet.getSchema();
                                writeSchema = recordSetWriterFactory.getSchema(originalAttributes, resultSetSchema);
                            } catch (final SQLException | SchemaNotFoundException e) {
                                throw new ProcessException(e);
                            }
                            try (final RecordSetWriter resultSetWriter = recordSetWriterFactory.createWriter(getLogger(), writeSchema, out)) {
                                writeResultRef.set(resultSetWriter.write(recordSet));
                                mimeTypeRef.set(resultSetWriter.getMimeType());
                            } catch (final Exception e) {
                                throw new IOException(e);
                            }
                        }
                    });
                } finally {
                    closeQuietly(queryResult);
                }
                recordsRead = Math.max(recordsRead, queryResult.getRecordsRead());
                final WriteResult result = writeResultRef.get();
                if (result.getRecordCount() == 0 && !context.getProperty(INCLUDE_ZERO_RECORD_FLOWFILES).asBoolean()) {
                    session.remove(transformed);
                    flowFileRemoved = true;
                    transformedFlowFiles.remove(transformed);
                    getLogger().info("Transformed {} but the result contained no data so will not pass on a FlowFile", new Object[] { original });
                } else {
                    final Map<String, String> attributesToAdd = new HashMap<>();
                    if (result.getAttributes() != null) {
                        attributesToAdd.putAll(result.getAttributes());
                    }
                    attributesToAdd.put(CoreAttributes.MIME_TYPE.key(), mimeTypeRef.get());
                    attributesToAdd.put("record.count", String.valueOf(result.getRecordCount()));
                    transformed = session.putAllAttributes(transformed, attributesToAdd);
                    transformedFlowFiles.put(transformed, relationship);
                    session.adjustCounter("Records Written", result.getRecordCount(), false);
                }
            } finally {
                // Ensure that we have the FlowFile in the set in case we throw any Exception
                if (!flowFileRemoved) {
                    createdFlowFiles.add(transformed);
                }
            }
        }
        final long elapsedMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS);
        if (transformedFlowFiles.size() > 0) {
            session.getProvenanceReporter().fork(original, transformedFlowFiles.keySet(), elapsedMillis);
            for (final Map.Entry<FlowFile, Relationship> entry : transformedFlowFiles.entrySet()) {
                final FlowFile transformed = entry.getKey();
                final Relationship relationship = entry.getValue();
                session.getProvenanceReporter().route(transformed, relationship);
                session.transfer(transformed, relationship);
            }
        }
        getLogger().info("Successfully queried {} in {} millis", new Object[] { original, elapsedMillis });
        session.transfer(original, REL_ORIGINAL);
    } catch (final SQLException e) {
        getLogger().error("Unable to query {} due to {}", new Object[] { original, e.getCause() == null ? e : e.getCause() });
        session.remove(createdFlowFiles);
        session.transfer(original, REL_FAILURE);
    } catch (final Exception e) {
        getLogger().error("Unable to query {} due to {}", new Object[] { original, e });
        session.remove(createdFlowFiles);
        session.transfer(original, REL_FAILURE);
    }
    session.adjustCounter("Records Read", recordsRead, false);
}
Also used : HashMap(java.util.HashMap) SQLException(java.sql.SQLException) RecordReader(org.apache.nifi.serialization.RecordReader) OutputStream(java.io.OutputStream) RecordSetWriter(org.apache.nifi.serialization.RecordSetWriter) RecordSetWriterFactory(org.apache.nifi.serialization.RecordSetWriterFactory) ResultSet(java.sql.ResultSet) OutputStreamCallback(org.apache.nifi.processor.io.OutputStreamCallback) RecordSchema(org.apache.nifi.serialization.record.RecordSchema) HashSet(java.util.HashSet) FlowFile(org.apache.nifi.flowfile.FlowFile) PropertyDescriptor(org.apache.nifi.components.PropertyDescriptor) InputStream(java.io.InputStream) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) ResultSetRecordSet(org.apache.nifi.serialization.record.ResultSetRecordSet) SchemaNotFoundException(org.apache.nifi.schema.access.SchemaNotFoundException) ProcessException(org.apache.nifi.processor.exception.ProcessException) SQLException(java.sql.SQLException) IOException(java.io.IOException) StopWatch(org.apache.nifi.util.StopWatch) RecordReaderFactory(org.apache.nifi.serialization.RecordReaderFactory) ProcessException(org.apache.nifi.processor.exception.ProcessException) WriteResult(org.apache.nifi.serialization.WriteResult) Relationship(org.apache.nifi.processor.Relationship) DynamicRelationship(org.apache.nifi.annotation.behavior.DynamicRelationship) SchemaNotFoundException(org.apache.nifi.schema.access.SchemaNotFoundException) Map(java.util.Map) HashMap(java.util.HashMap)

Example 65 with StopWatch

use of org.apache.nifi.util.StopWatch in project nifi by apache.

the class PutAzureEventHub method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }
    final StopWatch stopWatch = new StopWatch(true);
    final byte[] buffer = new byte[(int) flowFile.getSize()];
    session.read(flowFile, in -> StreamUtils.fillBuffer(in, buffer));
    try {
        sendMessage(buffer);
    } catch (final ProcessException processException) {
        getLogger().error("Failed to send {} to EventHub due to {}; routing to failure", new Object[] { flowFile, processException }, processException);
        session.transfer(session.penalize(flowFile), REL_FAILURE);
        return;
    }
    final String namespace = context.getProperty(NAMESPACE).getValue();
    final String eventHubName = context.getProperty(EVENT_HUB_NAME).getValue();
    session.getProvenanceReporter().send(flowFile, "amqps://" + namespace + ".servicebus.windows.net" + "/" + eventHubName, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
    session.transfer(flowFile, REL_SUCCESS);
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) ProcessException(org.apache.nifi.processor.exception.ProcessException) StopWatch(org.apache.nifi.util.StopWatch)

Aggregations

StopWatch (org.apache.nifi.util.StopWatch)72 FlowFile (org.apache.nifi.flowfile.FlowFile)59 IOException (java.io.IOException)41 ProcessException (org.apache.nifi.processor.exception.ProcessException)37 InputStream (java.io.InputStream)27 ComponentLog (org.apache.nifi.logging.ComponentLog)27 OutputStream (java.io.OutputStream)21 HashMap (java.util.HashMap)16 ArrayList (java.util.ArrayList)13 Map (java.util.Map)11 ProcessSession (org.apache.nifi.processor.ProcessSession)11 AtomicLong (java.util.concurrent.atomic.AtomicLong)10 InputStreamCallback (org.apache.nifi.processor.io.InputStreamCallback)10 StreamCallback (org.apache.nifi.processor.io.StreamCallback)10 HashSet (java.util.HashSet)9 Path (org.apache.hadoop.fs.Path)9 Charset (java.nio.charset.Charset)8 AtomicReference (java.util.concurrent.atomic.AtomicReference)8 FileSystem (org.apache.hadoop.fs.FileSystem)8 PropertyDescriptor (org.apache.nifi.components.PropertyDescriptor)8