use of com.amazonaws.services.sqs.model.DeleteMessageBatchRequestEntry in project nifi by apache.
the class GetSQS method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final String queueUrl = context.getProperty(DYNAMIC_QUEUE_URL).evaluateAttributeExpressions().getValue();
final AmazonSQSClient client = getClient();
final ReceiveMessageRequest request = new ReceiveMessageRequest();
request.setAttributeNames(Collections.singleton("All"));
request.setMessageAttributeNames(Collections.singleton("All"));
request.setMaxNumberOfMessages(context.getProperty(BATCH_SIZE).asInteger());
request.setVisibilityTimeout(context.getProperty(VISIBILITY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue());
request.setQueueUrl(queueUrl);
request.setWaitTimeSeconds(context.getProperty(RECEIVE_MSG_WAIT_TIME).asTimePeriod(TimeUnit.SECONDS).intValue());
final Charset charset = Charset.forName(context.getProperty(CHARSET).getValue());
final ReceiveMessageResult result;
try {
result = client.receiveMessage(request);
} catch (final Exception e) {
getLogger().error("Failed to receive messages from Amazon SQS due to {}", new Object[] { e });
context.yield();
return;
}
final List<Message> messages = result.getMessages();
if (messages.isEmpty()) {
context.yield();
return;
}
final boolean autoDelete = context.getProperty(AUTO_DELETE).asBoolean();
for (final Message message : messages) {
FlowFile flowFile = session.create();
final Map<String, String> attributes = new HashMap<>();
for (final Map.Entry<String, String> entry : message.getAttributes().entrySet()) {
attributes.put("sqs." + entry.getKey(), entry.getValue());
}
for (final Map.Entry<String, MessageAttributeValue> entry : message.getMessageAttributes().entrySet()) {
attributes.put("sqs." + entry.getKey(), entry.getValue().getStringValue());
}
attributes.put("hash.value", message.getMD5OfBody());
attributes.put("hash.algorithm", "md5");
attributes.put("sqs.message.id", message.getMessageId());
attributes.put("sqs.receipt.handle", message.getReceiptHandle());
flowFile = session.putAllAttributes(flowFile, attributes);
flowFile = session.write(flowFile, new OutputStreamCallback() {
@Override
public void process(final OutputStream out) throws IOException {
out.write(message.getBody().getBytes(charset));
}
});
session.transfer(flowFile, REL_SUCCESS);
session.getProvenanceReporter().receive(flowFile, queueUrl);
getLogger().info("Successfully received {} from Amazon SQS", new Object[] { flowFile });
}
if (autoDelete) {
// If we want to auto-delete messages, we must fist commit the session to ensure that the data
// is persisted in NiFi's repositories.
session.commit();
final DeleteMessageBatchRequest deleteRequest = new DeleteMessageBatchRequest();
deleteRequest.setQueueUrl(queueUrl);
final List<DeleteMessageBatchRequestEntry> deleteRequestEntries = new ArrayList<>();
for (final Message message : messages) {
final DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry();
entry.setId(message.getMessageId());
entry.setReceiptHandle(message.getReceiptHandle());
deleteRequestEntries.add(entry);
}
deleteRequest.setEntries(deleteRequestEntries);
try {
client.deleteMessageBatch(deleteRequest);
} catch (final Exception e) {
getLogger().error("Received {} messages from Amazon SQS but failed to delete the messages; these messages" + " may be duplicated. Reason for deletion failure: {}", new Object[] { messages.size(), e });
}
}
}
use of com.amazonaws.services.sqs.model.DeleteMessageBatchRequestEntry in project krypton-android by kryptco.
the class SQSTransport method receiveMessages.
public static List<byte[]> receiveMessages(final Pairing pairing) throws TransportException {
final AmazonSQSClient client = getClient();
ReceiveMessageRequest request = new ReceiveMessageRequest(sendQueueURL(pairing));
request.setWaitTimeSeconds(10);
request.setMaxNumberOfMessages(10);
ReceiveMessageResult result = client.receiveMessage(request);
final List<DeleteMessageBatchRequestEntry> deleteEntries = new ArrayList<>();
ArrayList<byte[]> messages = new ArrayList<byte[]>();
for (Message m : result.getMessages()) {
deleteEntries.add(new DeleteMessageBatchRequestEntry(m.getMessageId(), m.getReceiptHandle()));
try {
messages.add(Base64.decode(m.getBody()));
} catch (Exception e) {
Log.e(TAG, "failed to decode message: " + e.getMessage());
}
}
if (!deleteEntries.isEmpty()) {
deleteThreadPool.submit(() -> {
try {
DeleteMessageBatchRequest deleteRequest = new DeleteMessageBatchRequest(sendQueueURL(pairing)).withEntries(deleteEntries);
client.deleteMessageBatch(deleteRequest);
} catch (Exception e) {
Log.e(TAG, "failed to delete messages: " + e.getMessage());
}
});
}
return messages;
}
use of com.amazonaws.services.sqs.model.DeleteMessageBatchRequestEntry in project nifi by apache.
the class DeleteSQS method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
List<FlowFile> flowFiles = session.get(1);
if (flowFiles.isEmpty()) {
return;
}
final FlowFile firstFlowFile = flowFiles.get(0);
final String queueUrl = context.getProperty(QUEUE_URL).evaluateAttributeExpressions(firstFlowFile).getValue();
final AmazonSQSClient client = getClient();
final DeleteMessageBatchRequest request = new DeleteMessageBatchRequest();
request.setQueueUrl(queueUrl);
final List<DeleteMessageBatchRequestEntry> entries = new ArrayList<>(flowFiles.size());
for (final FlowFile flowFile : flowFiles) {
final DeleteMessageBatchRequestEntry entry = new DeleteMessageBatchRequestEntry();
entry.setReceiptHandle(context.getProperty(RECEIPT_HANDLE).evaluateAttributeExpressions(flowFile).getValue());
entries.add(entry);
}
request.setEntries(entries);
try {
client.deleteMessageBatch(request);
getLogger().info("Successfully deleted {} objects from SQS", new Object[] { flowFiles.size() });
session.transfer(flowFiles, REL_SUCCESS);
} catch (final Exception e) {
getLogger().error("Failed to delete {} objects from SQS due to {}", new Object[] { flowFiles.size(), e });
final List<FlowFile> penalizedFlowFiles = new ArrayList<>();
for (final FlowFile flowFile : flowFiles) {
penalizedFlowFiles.add(session.penalize(flowFile));
}
session.transfer(penalizedFlowFiles, REL_FAILURE);
}
}
Aggregations