use of com.hedera.mirror.grpc.domain.TopicMessage in project hedera-mirror-node by hashgraph.
the class ConsensusControllerTest method fragmentedMessagesGroupAcrossHistoricAndIncoming.
@Test
void fragmentedMessagesGroupAcrossHistoricAndIncoming() {
Instant now = Instant.now();
domainBuilder.topicMessage(t -> t.sequenceNumber(1)).block();
domainBuilder.topicMessage(t -> t.sequenceNumber(2).chunkNum(1).chunkTotal(2).validStartTimestamp(now).payerAccountId(1L).consensusTimestamp(now.plusNanos(1))).block();
domainBuilder.topicMessage(t -> t.sequenceNumber(3).chunkNum(2).chunkTotal(2).validStartTimestamp(now.plusNanos(1)).payerAccountId(1L).consensusTimestamp(now.plusNanos(2))).block();
domainBuilder.topicMessage(t -> t.sequenceNumber(4).consensusTimestamp(now.plusNanos(3))).block();
domainBuilder.topicMessage(t -> t.sequenceNumber(5).chunkNum(1).chunkTotal(3).validStartTimestamp(now.plusNanos(3)).payerAccountId(1L).consensusTimestamp(now.plusNanos(4))).block();
// fragment message split across historic and incoming
Flux<TopicMessage> generator = Flux.concat(domainBuilder.topicMessage(t -> t.sequenceNumber(6).chunkNum(2).chunkTotal(3).validStartTimestamp(now.plusNanos(4)).payerAccountId(1L).consensusTimestamp(now.plusSeconds(5)).initialTransactionId(null)), domainBuilder.topicMessage(t -> t.sequenceNumber(7).chunkNum(3).chunkTotal(3).validStartTimestamp(now.plusNanos(5)).payerAccountId(1L).consensusTimestamp(now.plusSeconds(6)).initialTransactionId(new byte[] { 1, 2 })), domainBuilder.topicMessage(t -> t.sequenceNumber(8).consensusTimestamp(now.plusSeconds(7))));
ConsensusTopicQuery query = ConsensusTopicQuery.newBuilder().setConsensusStartTime(Timestamp.newBuilder().setSeconds(0).build()).setTopicID(TopicID.newBuilder().setRealmNum(0).setTopicNum(100).build()).build();
grpcConsensusService.subscribeTopic(Mono.just(query)).map(x -> x.hasChunkInfo() ? x.getChunkInfo().getNumber() : 0).as(StepVerifier::create).thenAwait(Duration.ofMillis(100)).expectNext(0, 1, 2, 0, 1).then(generator::blockLast).expectNext(2, 3, // incoming messages
0).thenCancel().verify(Duration.ofMillis(2000));
}
use of com.hedera.mirror.grpc.domain.TopicMessage in project hedera-mirror-node by hashgraph.
the class TopicMessageServiceImpl method missingMessages.
/**
* A flow can have missing messages if the importer is down for a long time when the client subscribes. When the
* incoming flow catches up and receives the next message for the topic, it will fill in any missing messages from
* when it was down.
*/
private Flux<TopicMessage> missingMessages(TopicContext topicContext, TopicMessage current) {
if (topicContext.isNext(current)) {
return Flux.just(current);
}
TopicMessage last = topicContext.getLast();
long numMissingMessages = current.getSequenceNumber() - last.getSequenceNumber() - 1;
// fail fast on out of order messages
if (numMissingMessages < -1) {
throw new IllegalStateException(String.format("Encountered out of order missing messages, last: %s, current: %s", last, current));
}
// ignore duplicate message already processed by larger subscribe context
if (numMissingMessages == -1) {
log.debug("Encountered duplicate missing message to be ignored, last: {}, current: {}", last, current);
return Flux.empty();
}
TopicMessageFilter newFilter = topicContext.getFilter().toBuilder().endTime(current.getConsensusTimestampInstant()).limit(numMissingMessages).startTime(last.getConsensusTimestampInstant().plusNanos(1)).build();
log.info("[{}] Querying topic {} for missing messages between sequence {} and {}", newFilter.getSubscriberId(), topicContext.getTopicId(), last.getSequenceNumber(), current.getSequenceNumber());
return topicMessageRetriever.retrieve(newFilter, false).concatWithValues(current).name("findMissing").metrics();
}
use of com.hedera.mirror.grpc.domain.TopicMessage in project hedera-mirror-node by hashgraph.
the class PollingTopicListener method poll.
private Flux<TopicMessage> poll(PollingContext context) {
TopicMessageFilter filter = context.getFilter();
TopicMessage last = context.getLast();
int limit = filter.hasLimit() ? (int) (filter.getLimit() - context.getCount().get()) : Integer.MAX_VALUE;
int pageSize = Math.min(limit, listenerProperties.getMaxPageSize());
Instant startTime = last != null ? last.getConsensusTimestampInstant().plusNanos(1) : filter.getStartTime();
TopicMessageFilter newFilter = filter.toBuilder().limit(pageSize).startTime(startTime).build();
return Flux.fromStream(topicMessageRepository.findByFilter(newFilter)).name("findByFilter").metrics();
}
Aggregations