use of org.apache.nifi.processor.io.OutputStreamCallback in project nifi by apache.
the class GetSQS method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final String queueUrl = context.getProperty(DYNAMIC_QUEUE_URL).evaluateAttributeExpressions().getValue();
final AmazonSQSClient client = getClient();
final ReceiveMessageRequest request = new ReceiveMessageRequest();
request.setAttributeNames(Collections.singleton("All"));
request.setMessageAttributeNames(Collections.singleton("All"));
request.setMaxNumberOfMessages(context.getProperty(BATCH_SIZE).asInteger());
request.setVisibilityTimeout(context.getProperty(VISIBILITY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue());
request.setQueueUrl(queueUrl);
request.setWaitTimeSeconds(context.getProperty(RECEIVE_MSG_WAIT_TIME).asTimePeriod(TimeUnit.SECONDS).intValue());
final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());
final ReceiveMessageResult result;
try {
result = client.receiveMessage(request);
} catch (final Exception e) {
getLogger().error("Failed to receive messages from Amazon SQS due to {}", new Object[] { e });
context.yield();
return;
}
final List<Message> messages = result.getMessages();
if (messages.isEmpty()) {
context.yield();
return;
}
final boolean autoDelete = context.getProperty(AUTO_DELETE).asBoolean();
for (final Message message : messages) {
FlowFile flowFile = session.create();
final Map<String, String> attributes = new HashMap<>();
for (final Map.Entry<String, String> entry : message.getAttributes().entrySet()) {
attributes.put("sqs." + entry.getKey(), entry.getValue());
}
for (final Map.Entry<String, MessageAttributeValue> entry : message.getMessageAttributes().entrySet()) {
attributes.put("sqs." + entry.getKey(), entry.getValue().getStringValue());
}
attributes.put("hash.value", message.getMD5OfBody());
attributes.put("hash.algorithm", "md5");
attributes.put("sqs.message.id", message.getMessageId());
attributes.put("sqs.receipt.handle", message.getReceiptHandle());
flowFile = session.putAllAttributes(flowFile, attributes);
flowFile = session.write(flowFile, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
out.write(message.getBody().getBytes(charset));
}
});
session.transfer(flowFile, REL_SUCCESS);
session.getProvenanceReporter().receive(flowFile, queueUrl);
getLogger().info("Successfully received {} from Amazon SQS", new Object[] { flowFile });
}
if (autoDelete) {
// If we want to auto-delete messages, we must fist commit the session to ensure that the data
// is persisted in NiFi's repositories.
session.commit();
final DeleteMessageBatchRequest deleteRequest = new DeleteMessageBatchRequest();
deleteRequest.setQueueUrl(queueUrl);
final List<DeleteMessageBatchRequestEntry> deleteRequestEntries = new ArrayList<>();
for (final Message message : messages) {
final DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry();
entry.setId(message.getMessageId());
entry.setReceiptHandle(message.getReceiptHandle());
deleteRequestEntries.add(entry);
}
deleteRequest.setEntries(deleteRequestEntries);
try {
client.deleteMessageBatch(deleteRequest);
} catch (final Exception e) {
getLogger().error("Received {} messages from Amazon SQS but failed to delete the messages; these messages" + " may be duplicated. Reason for deletion failure: {}", new Object[] { messages.size(), e });
}
}
}
use of org.apache.nifi.processor.io.OutputStreamCallback in project nifi by apache.
the class ConsumeMQTT method transferQueue.
private void transferQueue(ProcessSession session) {
while (!mqttQueue.isEmpty()) {
FlowFile messageFlowfile = session.create();
final MQTTQueueMessage mqttMessage = mqttQueue.peek();
Map<String, String> attrs = new HashMap<>();
attrs.put(BROKER_ATTRIBUTE_KEY, broker);
attrs.put(TOPIC_ATTRIBUTE_KEY, mqttMessage.getTopic());
attrs.put(QOS_ATTRIBUTE_KEY, String.valueOf(mqttMessage.getQos()));
attrs.put(IS_DUPLICATE_ATTRIBUTE_KEY, String.valueOf(mqttMessage.isDuplicate()));
attrs.put(IS_RETAINED_ATTRIBUTE_KEY, String.valueOf(mqttMessage.isRetained()));
messageFlowfile = session.putAllAttributes(messageFlowfile, attrs);
messageFlowfile = session.write(messageFlowfile, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
out.write(mqttMessage.getPayload());
}
});
String transitUri = new StringBuilder(broker).append(mqttMessage.getTopic()).toString();
session.getProvenanceReporter().receive(messageFlowfile, transitUri);
session.transfer(messageFlowfile, REL_MESSAGE);
session.commit();
if (!mqttQueue.remove(mqttMessage) && logger.isWarnEnabled()) {
logger.warn(new StringBuilder("FlowFile ").append(messageFlowfile.getAttribute(CoreAttributes.UUID.key())).append(" for Mqtt message ").append(mqttMessage).append(" had already been removed from queue, possible duplication of flow files").toString());
}
}
}
use of org.apache.nifi.processor.io.OutputStreamCallback in project nifi by apache.
the class InferAvroSchema method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final FlowFile original = session.get();
if (original == null) {
return;
}
try {
final AtomicReference<String> avroSchema = new AtomicReference<>();
switch(context.getProperty(INPUT_CONTENT_TYPE).getValue()) {
case USE_MIME_TYPE:
avroSchema.set(inferAvroSchemaFromMimeType(original, context, session));
break;
case JSON_CONTENT:
avroSchema.set(inferAvroSchemaFromJSON(original, context, session));
break;
case CSV_CONTENT:
avroSchema.set(inferAvroSchemaFromCSV(original, context, session));
break;
default:
// Shouldn't be possible but just in case
session.transfer(original, REL_UNSUPPORTED_CONTENT);
break;
}
if (StringUtils.isNotEmpty(avroSchema.get())) {
String destination = context.getProperty(SCHEMA_DESTINATION).getValue();
FlowFile avroSchemaFF = null;
switch(destination) {
case DESTINATION_ATTRIBUTE:
avroSchemaFF = session.putAttribute(session.clone(original), AVRO_SCHEMA_ATTRIBUTE_NAME, avroSchema.get());
// Leaves the original CoreAttributes.MIME_TYPE in place.
break;
case DESTINATION_CONTENT:
avroSchemaFF = session.write(session.create(), new OutputStreamCallback() {
@Override
public void process(OutputStream out) throws IOException {
out.write(avroSchema.get().getBytes());
}
});
avroSchemaFF = session.putAttribute(avroSchemaFF, CoreAttributes.MIME_TYPE.key(), AVRO_MIME_TYPE);
break;
default:
break;
}
// Transfer the sessions.
avroSchemaFF = session.putAttribute(avroSchemaFF, CoreAttributes.FILENAME.key(), (original.getAttribute(CoreAttributes.FILENAME.key()) + AVRO_FILE_EXTENSION));
session.transfer(avroSchemaFF, REL_SUCCESS);
session.transfer(original, REL_ORIGINAL);
} else {
// If the avroSchema is null then the content type is unknown and therefore unsupported
session.transfer(original, REL_UNSUPPORTED_CONTENT);
}
} catch (Exception ex) {
getLogger().error("Failed to infer Avro schema for {} due to {}", new Object[] { original, ex });
session.transfer(original, REL_FAILURE);
}
}
use of org.apache.nifi.processor.io.OutputStreamCallback in project nifi by apache.
the class JmsConsumer method map2FlowFile.
public static JmsProcessingSummary map2FlowFile(final ProcessContext context, final ProcessSession session, final Message message, final boolean addAttributes, ComponentLog logger) throws Exception {
// Currently not very useful, because always one Message == one FlowFile
final AtomicInteger msgsThisFlowFile = new AtomicInteger(1);
FlowFile flowFile = session.create();
try {
// MapMessage is exception, add only name-value pairs to FlowFile attributes
if (message instanceof MapMessage) {
MapMessage mapMessage = (MapMessage) message;
flowFile = session.putAllAttributes(flowFile, createMapMessageValues(mapMessage));
} else {
// all other message types, write Message body to FlowFile content
flowFile = session.write(flowFile, new OutputStreamCallback() {
@Override
public void process(final OutputStream rawOut) throws IOException {
try (final OutputStream out = new BufferedOutputStream(rawOut, 65536)) {
final byte[] messageBody = JmsFactory.createByteArray(message);
out.write(messageBody);
} catch (final JMSException e) {
throw new ProcessException("Failed to receive JMS Message due to " + e.getMessage(), e);
}
}
});
}
if (addAttributes) {
flowFile = session.putAllAttributes(flowFile, JmsFactory.createAttributeMap(message));
}
session.getProvenanceReporter().receive(flowFile, context.getProperty(URL).getValue());
session.transfer(flowFile, REL_SUCCESS);
logger.info("Created {} from {} messages received from JMS Server and transferred to 'success'", new Object[] { flowFile, msgsThisFlowFile.get() });
return new JmsProcessingSummary(flowFile.getSize(), message, flowFile);
} catch (Exception e) {
session.remove(flowFile);
throw e;
}
}
use of org.apache.nifi.processor.io.OutputStreamCallback in project nifi by apache.
the class JoltTransformJSON method onTrigger.
@Override
public void onTrigger(final ProcessContext context, ProcessSession session) throws ProcessException {
final FlowFile original = session.get();
if (original == null) {
return;
}
final ComponentLog logger = getLogger();
final StopWatch stopWatch = new StopWatch(true);
final Object inputJson;
try (final InputStream in = session.read(original)) {
inputJson = JsonUtils.jsonToObject(in);
} catch (final Exception e) {
logger.error("Failed to transform {}; routing to failure", new Object[] { original, e });
session.transfer(original, REL_FAILURE);
return;
}
final String jsonString;
final ClassLoader originalContextClassLoader = Thread.currentThread().getContextClassLoader();
try {
final JoltTransform transform = getTransform(context, original);
if (customClassLoader != null) {
Thread.currentThread().setContextClassLoader(customClassLoader);
}
final Object transformedJson = TransformUtils.transform(transform, inputJson);
jsonString = JsonUtils.toJsonString(transformedJson);
} catch (final Exception ex) {
logger.error("Unable to transform {} due to {}", new Object[] { original, ex.toString(), ex });
session.transfer(original, REL_FAILURE);
return;
} finally {
if (customClassLoader != null && originalContextClassLoader != null) {
Thread.currentThread().setContextClassLoader(originalContextClassLoader);
}
}
FlowFile transformed = session.write(original, new OutputStreamCallback() {
@Override
public void process(OutputStream out) throws IOException {
out.write(jsonString.getBytes(DEFAULT_CHARSET));
}
});
final String transformType = context.getProperty(JOLT_TRANSFORM).getValue();
transformed = session.putAttribute(transformed, CoreAttributes.MIME_TYPE.key(), "application/json");
session.transfer(transformed, REL_SUCCESS);
session.getProvenanceReporter().modifyContent(transformed, "Modified With " + transformType, stopWatch.getElapsed(TimeUnit.MILLISECONDS));
logger.info("Transformed {}", new Object[] { original });
}
Aggregations