use of com.amazonaws.services.kinesis.model.PutRecordsRequestEntry in project apex-malhar by apache.
the class AbstractKinesisOutputOperator method addRecord.
private void addRecord(T tuple) {
try {
Pair<String, V> keyValue = tupleToKeyValue(tuple);
PutRecordsRequestEntry putRecordsEntry = new PutRecordsRequestEntry();
putRecordsEntry.setData(ByteBuffer.wrap(getRecord(keyValue.second)));
putRecordsEntry.setPartitionKey(keyValue.first);
putRecordsRequestEntryList.add(putRecordsEntry);
} catch (AmazonClientException e) {
throw new RuntimeException(e);
}
}
use of com.amazonaws.services.kinesis.model.PutRecordsRequestEntry in project apex-malhar by apache.
the class KinesisTestProducer method generateRecords.
private void generateRecords() {
// Create dummy message
int recordNo = 1;
while (recordNo <= sendCount) {
String dataStr = "Record_" + recordNo;
PutRecordsRequestEntry putRecordsEntry = new PutRecordsRequestEntry();
putRecordsEntry.setData(ByteBuffer.wrap(dataStr.getBytes()));
putRecordsEntry.setPartitionKey(dataStr);
putRecordsRequestEntryList.add(putRecordsEntry);
if ((putRecordsRequestEntryList.size() == batchSize) || (recordNo == sendCount)) {
PutRecordsRequest putRecordsRequest = new PutRecordsRequest();
putRecordsRequest.setStreamName(streamName);
putRecordsRequest.setRecords(putRecordsRequestEntryList);
client.putRecords(putRecordsRequest);
putRecordsRequestEntryList.clear();
}
recordNo++;
}
}
use of com.amazonaws.services.kinesis.model.PutRecordsRequestEntry in project hazelcast by hazelcast.
the class KinesisTestHelper method putRecords.
public PutRecordsResult putRecords(List<Map.Entry<String, String>> messages) {
PutRecordsRequest request = new PutRecordsRequest();
request.setStreamName(stream);
request.setRecords(messages.stream().map(entry -> {
PutRecordsRequestEntry putEntry = new PutRecordsRequestEntry();
putEntry.setPartitionKey(entry.getKey());
putEntry.setData(ByteBuffer.wrap(entry.getValue().getBytes(StandardCharsets.UTF_8)));
return putEntry;
}).collect(Collectors.toList()));
return callSafely(() -> putRecords(request), "put records");
}
use of com.amazonaws.services.kinesis.model.PutRecordsRequestEntry in project beam by apache.
the class KinesisUploader method uploadAll.
public static void uploadAll(List<String> data, KinesisTestOptions options) {
AmazonKinesisClient client = new AmazonKinesisClient(new StaticCredentialsProvider(new BasicAWSCredentials(options.getAwsAccessKey(), options.getAwsSecretKey()))).withRegion(Regions.fromName(options.getAwsKinesisRegion()));
List<List<String>> partitions = Lists.partition(data, MAX_NUMBER_OF_RECORDS_IN_BATCH);
for (List<String> partition : partitions) {
List<PutRecordsRequestEntry> allRecords = newArrayList();
for (String row : partition) {
allRecords.add(new PutRecordsRequestEntry().withData(ByteBuffer.wrap(row.getBytes(Charsets.UTF_8))).withPartitionKey(Integer.toString(row.hashCode())));
}
PutRecordsResult result;
do {
result = client.putRecords(new PutRecordsRequest().withStreamName(options.getAwsKinesisStream()).withRecords(allRecords));
List<PutRecordsRequestEntry> failedRecords = newArrayList();
int i = 0;
for (PutRecordsResultEntry row : result.getRecords()) {
if (row.getErrorCode() != null) {
failedRecords.add(allRecords.get(i));
}
++i;
}
allRecords = failedRecords;
} while (result.getFailedRecordCount() > 0);
}
}
use of com.amazonaws.services.kinesis.model.PutRecordsRequestEntry in project nifi by apache.
the class PutKinesisStream method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final int batchSize = context.getProperty(BATCH_SIZE).asInteger();
final long maxBufferSizeBytes = context.getProperty(MAX_MESSAGE_BUFFER_SIZE_MB).asDataSize(DataUnit.B).longValue();
List<FlowFile> flowFiles = filterMessagesByMaxSize(session, batchSize, maxBufferSizeBytes, AWS_KINESIS_ERROR_MESSAGE);
HashMap<String, List<FlowFile>> hashFlowFiles = new HashMap<>();
HashMap<String, List<PutRecordsRequestEntry>> recordHash = new HashMap<String, List<PutRecordsRequestEntry>>();
final AmazonKinesisClient client = getClient();
try {
List<FlowFile> failedFlowFiles = new ArrayList<>();
List<FlowFile> successfulFlowFiles = new ArrayList<>();
// Prepare batch of records
for (int i = 0; i < flowFiles.size(); i++) {
FlowFile flowFile = flowFiles.get(i);
String streamName = context.getProperty(KINESIS_STREAM_NAME).evaluateAttributeExpressions(flowFile).getValue();
;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
session.exportTo(flowFile, baos);
PutRecordsRequestEntry record = new PutRecordsRequestEntry().withData(ByteBuffer.wrap(baos.toByteArray()));
String partitionKey = context.getProperty(PutKinesisStream.KINESIS_PARTITION_KEY).evaluateAttributeExpressions(flowFiles.get(i)).getValue();
if (StringUtils.isBlank(partitionKey) == false) {
record.setPartitionKey(partitionKey);
} else {
record.setPartitionKey(Integer.toString(randomParitionKeyGenerator.nextInt()));
}
if (recordHash.containsKey(streamName) == false) {
recordHash.put(streamName, new ArrayList<>());
}
if (hashFlowFiles.containsKey(streamName) == false) {
hashFlowFiles.put(streamName, new ArrayList<>());
}
hashFlowFiles.get(streamName).add(flowFile);
recordHash.get(streamName).add(record);
}
for (Map.Entry<String, List<PutRecordsRequestEntry>> entryRecord : recordHash.entrySet()) {
String streamName = entryRecord.getKey();
List<PutRecordsRequestEntry> records = entryRecord.getValue();
if (records.size() > 0) {
PutRecordsRequest putRecordRequest = new PutRecordsRequest();
putRecordRequest.setStreamName(streamName);
putRecordRequest.setRecords(records);
PutRecordsResult results = client.putRecords(putRecordRequest);
List<PutRecordsResultEntry> responseEntries = results.getRecords();
for (int i = 0; i < responseEntries.size(); i++) {
PutRecordsResultEntry entry = responseEntries.get(i);
FlowFile flowFile = hashFlowFiles.get(streamName).get(i);
Map<String, String> attributes = new HashMap<>();
attributes.put(AWS_KINESIS_SHARD_ID, entry.getShardId());
attributes.put(AWS_KINESIS_SEQUENCE_NUMBER, entry.getSequenceNumber());
if (StringUtils.isBlank(entry.getErrorCode()) == false) {
attributes.put(AWS_KINESIS_ERROR_CODE, entry.getErrorCode());
attributes.put(AWS_KINESIS_ERROR_MESSAGE, entry.getErrorMessage());
flowFile = session.putAllAttributes(flowFile, attributes);
failedFlowFiles.add(flowFile);
} else {
flowFile = session.putAllAttributes(flowFile, attributes);
successfulFlowFiles.add(flowFile);
}
}
}
recordHash.get(streamName).clear();
records.clear();
}
if (failedFlowFiles.size() > 0) {
session.transfer(failedFlowFiles, REL_FAILURE);
getLogger().error("Failed to publish to kinesis records {}", new Object[] { failedFlowFiles });
}
if (successfulFlowFiles.size() > 0) {
session.transfer(successfulFlowFiles, REL_SUCCESS);
getLogger().debug("Successfully published to kinesis records {}", new Object[] { successfulFlowFiles });
}
} catch (final Exception exception) {
getLogger().error("Failed to publish due to exception {} flowfiles {} ", new Object[] { exception, flowFiles });
session.transfer(flowFiles, REL_FAILURE);
context.yield();
}
}
Aggregations