use of software.amazon.awssdk.services.kinesis.model.Record in project beam by apache.
the class KinesisIOReadTest method testRecords.
private List<List<Record>> testRecords(int shards, int events) {
final Instant now = DateTime.now().toInstant();
Function<Integer, List<Record>> dataStream = shard -> range(0, events).mapToObj(off -> record(now, shard, off)).collect(toList());
return range(0, shards).boxed().map(dataStream).collect(toList());
}
use of software.amazon.awssdk.services.kinesis.model.Record in project beam by apache.
the class KinesisIOReadTest method readFromShards.
private void readFromShards(Function<Read, Read> fn, Iterable<Record> expected) {
Read read = KinesisIO.read().withStreamName("stream").withInitialPositionInStream(TRIM_HORIZON).withArrivalTimeWatermarkPolicy().withMaxNumRecords(SHARDS * SHARD_EVENTS);
PCollection<Record> result = p.apply(fn.apply(read)).apply(ParDo.of(new ToRecord()));
PAssert.that(result).containsInAnyOrder(expected);
p.run();
}
use of software.amazon.awssdk.services.kinesis.model.Record in project beam by apache.
the class SimplifiedKinesisClientTest method generateRecords.
private List<Record> generateRecords(int num) {
List<Record> records = new ArrayList<>();
for (int i = 0; i < num; i++) {
byte[] value = new byte[1024];
Arrays.fill(value, (byte) i);
records.add(Record.builder().sequenceNumber(String.valueOf(i)).partitionKey("key").data(SdkBytes.fromByteBuffer(ByteBuffer.wrap(value))).build());
}
return records;
}
use of software.amazon.awssdk.services.kinesis.model.Record in project flink by apache.
the class FanOutRecordPublisherTest method testToSdkV1Records.
@Test
public void testToSdkV1Records() throws Exception {
Date now = new Date();
byte[] data = new byte[] { 0, 1, 2, 3 };
Record record = Record.builder().approximateArrivalTimestamp(now.toInstant()).partitionKey("pk").sequenceNumber("sn").data(SdkBytes.fromByteArray(data)).build();
KinesisProxyV2Interface kinesis = singletonShard(createSubscribeToShardEvent(record));
RecordPublisher publisher = createRecordPublisher(kinesis, latest());
TestConsumer consumer = new TestConsumer();
publisher.run(consumer);
UserRecord actual = consumer.getRecordBatches().get(0).getDeaggregatedRecords().get(0);
assertFalse(actual.isAggregated());
assertEquals(now, actual.getApproximateArrivalTimestamp());
assertEquals("sn", actual.getSequenceNumber());
assertEquals("pk", actual.getPartitionKey());
assertThat(toByteArray(actual.getData()), Matchers.equalTo(data));
}
use of software.amazon.awssdk.services.kinesis.model.Record in project flink by apache.
the class FanOutRecordPublisher method run.
@Override
public RecordPublisherRunResult run(final RecordBatchConsumer recordConsumer) throws InterruptedException {
LOG.info("Running fan out record publisher on {}::{} from {} - {}", subscribedShard.getStreamName(), subscribedShard.getShard().getShardId(), nextStartingPosition.getShardIteratorType(), nextStartingPosition.getStartingMarker());
Consumer<SubscribeToShardEvent> eventConsumer = event -> {
RecordBatch recordBatch = new RecordBatch(toSdkV1Records(event.records()), subscribedShard, event.millisBehindLatest());
SequenceNumber sequenceNumber = recordConsumer.accept(recordBatch);
nextStartingPosition = StartingPosition.continueFromSequenceNumber(sequenceNumber);
};
RecordPublisherRunResult result = runWithBackoff(eventConsumer);
LOG.info("Subscription expired {}::{}, with status {}", subscribedShard.getStreamName(), subscribedShard.getShard().getShardId(), result);
return result;
}
Aggregations