use of com.google.cloud.pubsublite.SequencedMessage in project java-pubsublite by googleapis.
the class SubscriberImplTest method reinitialize_handlesIgnoredReset.
@Test
public void reinitialize_handlesIgnoredReset() throws Exception {
subscriber.allowFlow(FlowControlRequest.newBuilder().setAllowedBytes(100).setAllowedMessages(100).build());
ImmutableList<SequencedMessage> messages = ImmutableList.of(SequencedMessage.of(Message.builder().build(), Timestamps.EPOCH, Offset.of(0), 10), SequencedMessage.of(Message.builder().build(), Timestamps.EPOCH, Offset.of(1), 10));
CountDownLatch messagesReceived = countdownMessageBatches(1);
leakedResponseObserver.onResponse(messages);
assertThat(messagesReceived.await(10, SECONDS)).isTrue();
verify(mockMessageConsumer).accept(messages);
final SubscribeRequest nextOffsetRequest = SubscribeRequest.newBuilder().setInitial(BASE_INITIAL_SUBSCRIBE_REQUEST.toBuilder().setInitialLocation(SeekRequest.newBuilder().setCursor(Cursor.newBuilder().setOffset(2)))).build();
doAnswer(args -> {
leakedResponseObserver = args.getArgument(1);
return mockConnectedSubscriber2;
}).when(mockSubscriberFactory).New(any(), any(), eq(nextOffsetRequest));
// If the RESET signal is received and subscriber reset is ignored, the subscriber should read
// from the next offset upon reconnect.
when(mockResetHandler.handleReset()).thenReturn(false);
subscriber.triggerReinitialize(TestResetSignal.newCheckedException());
verify(mockSubscriberFactory, times(1)).New(any(), any(), eq(initialRequest()));
verify(mockSubscriberFactory, times(1)).New(any(), any(), eq(nextOffsetRequest));
verify(mockConnectedSubscriber2).allowFlow(FlowControlRequest.newBuilder().setAllowedBytes(80).setAllowedMessages(98).build());
}
use of com.google.cloud.pubsublite.SequencedMessage in project java-pubsublite by googleapis.
the class SubscriberImplTest method reinitialize_reconnectsToNextOffset.
@Test
public void reinitialize_reconnectsToNextOffset() throws Exception {
subscriber.allowFlow(FlowControlRequest.newBuilder().setAllowedBytes(100).setAllowedMessages(100).build());
ImmutableList<SequencedMessage> messages = ImmutableList.of(SequencedMessage.of(Message.builder().build(), Timestamps.EPOCH, Offset.of(0), 10), SequencedMessage.of(Message.builder().build(), Timestamps.EPOCH, Offset.of(1), 10));
CountDownLatch messagesReceived = countdownMessageBatches(1);
leakedResponseObserver.onResponse(messages);
assertThat(messagesReceived.await(10, SECONDS)).isTrue();
verify(mockMessageConsumer).accept(messages);
final SubscribeRequest nextOffsetRequest = SubscribeRequest.newBuilder().setInitial(BASE_INITIAL_SUBSCRIBE_REQUEST.toBuilder().setInitialLocation(SeekRequest.newBuilder().setCursor(Cursor.newBuilder().setOffset(2)))).build();
doAnswer(args -> {
leakedResponseObserver = args.getArgument(1);
return mockConnectedSubscriber2;
}).when(mockSubscriberFactory).New(any(), any(), eq(nextOffsetRequest));
subscriber.triggerReinitialize(new CheckedApiException(Code.ABORTED));
verify(mockSubscriberFactory, times(1)).New(any(), any(), eq(initialRequest()));
verify(mockSubscriberFactory, times(1)).New(any(), any(), eq(nextOffsetRequest));
verify(mockConnectedSubscriber2).allowFlow(FlowControlRequest.newBuilder().setAllowedBytes(80).setAllowedMessages(98).build());
}
use of com.google.cloud.pubsublite.SequencedMessage in project java-pubsublite-kafka by googleapis.
the class SingleSubscriptionConsumerImpl method doPoll.
private Map<Partition, Queue<SequencedMessage>> doPoll(Duration duration) {
try {
ImmutableList.Builder<ApiFuture<Void>> stopSleepingSignals = ImmutableList.builder();
try (CloseableMonitor.Hold h = monitor.enter()) {
stopSleepingSignals.add(wakeupTriggered);
stopSleepingSignals.add(assignmentChanged);
partitions.values().forEach(subscriber -> stopSleepingSignals.add(subscriber.onData()));
}
try {
ApiFuturesExtensions.whenFirstDone(stopSleepingSignals.build()).get(duration.toMillis(), MILLISECONDS);
} catch (TimeoutException e) {
return ImmutableMap.of();
}
try (CloseableMonitor.Hold h = monitor.enter()) {
if (wakeupTriggered.isDone())
throw new WakeupException();
Map<Partition, Queue<SequencedMessage>> partitionQueues = new HashMap<>();
partitions.forEach(ExtractStatus.rethrowAsRuntime((partition, subscriber) -> partitionQueues.put(partition, subscriber.getMessages())));
return partitionQueues;
}
} catch (Throwable t) {
throw toKafka(t);
}
}
use of com.google.cloud.pubsublite.SequencedMessage in project java-pubsublite-kafka by googleapis.
the class SingleSubscriptionConsumerImpl method poll.
@Override
public ConsumerRecords<byte[], byte[]> poll(Duration duration) {
if (autocommit) {
ApiFuture<?> future = commitAll();
ApiFutures.addCallback(future, new ApiFutureCallback<Object>() {
@Override
public void onFailure(Throwable throwable) {
logger.atWarning().withCause(throwable).log("Failed to commit offsets.");
}
@Override
public void onSuccess(Object result) {
}
}, MoreExecutors.directExecutor());
}
Map<Partition, Queue<SequencedMessage>> partitionQueues = doPoll(duration);
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
partitionQueues.forEach((partition, queue) -> {
if (queue.isEmpty())
return;
List<ConsumerRecord<byte[], byte[]>> partitionRecords = queue.stream().map(message -> RecordTransforms.fromMessage(message, topic, partition)).collect(Collectors.toList());
records.put(new TopicPartition(topic.toString(), (int) partition.value()), partitionRecords);
});
return new ConsumerRecords<>(records);
}
use of com.google.cloud.pubsublite.SequencedMessage in project java-pubsublite-spark by googleapis.
the class PslMicroBatchInputPartitionReaderTest method testPartitionReader.
@Test
public void testPartitionReader() throws Exception {
long endOffset = 14L;
createReader(endOffset);
SequencedMessage message1 = newMessage(10L);
SequencedMessage message2 = newMessage(endOffset);
// Multiple get w/o next will return same msg.
when(subscriber.onData()).thenReturn(ApiFutures.immediateFuture(null));
when(subscriber.messageIfAvailable()).thenReturn(Optional.of(message1));
assertThat(reader.next()).isTrue();
verifyInternalRow(reader.get(), 10L);
verifyInternalRow(reader.get(), 10L);
// Next will advance to next msg which is also the last msg in the batch.
when(subscriber.onData()).thenReturn(ApiFutures.immediateFuture(null));
when(subscriber.messageIfAvailable()).thenReturn(Optional.of(message2));
assertThat(reader.next()).isTrue();
verifyInternalRow(reader.get(), 14L);
// Now it already reached the end of the batch
assertThat(reader.next()).isFalse();
}
Aggregations