use of org.apache.pulsar.common.api.proto.MessageIdData in project pulsar by apache.
the class ConsumerImpl method messageReceived.
void messageReceived(CommandMessage cmdMessage, ByteBuf headersAndPayload, ClientCnx cnx) {
List<Long> ackSet = Collections.emptyList();
if (cmdMessage.getAckSetsCount() > 0) {
ackSet = new ArrayList<>(cmdMessage.getAckSetsCount());
for (int i = 0; i < cmdMessage.getAckSetsCount(); i++) {
ackSet.add(cmdMessage.getAckSetAt(i));
}
}
int redeliveryCount = cmdMessage.getRedeliveryCount();
MessageIdData messageId = cmdMessage.getMessageId();
long consumerEpoch = DEFAULT_CONSUMER_EPOCH;
// if broker send messages to client with consumerEpoch, we should set consumerEpoch to message
if (cmdMessage.hasConsumerEpoch()) {
consumerEpoch = cmdMessage.getConsumerEpoch();
}
if (log.isDebugEnabled()) {
log.debug("[{}][{}] Received message: {}/{}", topic, subscription, messageId.getLedgerId(), messageId.getEntryId());
}
if (!verifyChecksum(headersAndPayload, messageId)) {
// discard message with checksum error
discardCorruptedMessage(messageId, cnx, ValidationError.ChecksumMismatch);
return;
}
BrokerEntryMetadata brokerEntryMetadata;
MessageMetadata msgMetadata;
try {
brokerEntryMetadata = Commands.parseBrokerEntryMetadataIfExist(headersAndPayload);
msgMetadata = Commands.parseMessageMetadata(headersAndPayload);
} catch (Throwable t) {
discardCorruptedMessage(messageId, cnx, ValidationError.ChecksumMismatch);
return;
}
final int numMessages = msgMetadata.getNumMessagesInBatch();
final int numChunks = msgMetadata.hasNumChunksFromMsg() ? msgMetadata.getNumChunksFromMsg() : 0;
final boolean isChunkedMessage = numChunks > 1 && conf.getSubscriptionType() != SubscriptionType.Shared;
MessageIdImpl msgId = new MessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), getPartitionIndex());
if (acknowledgmentsGroupingTracker.isDuplicate(msgId)) {
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Ignoring message as it was already being acked earlier by same consumer {}/{}", topic, subscription, consumerName, msgId);
}
increaseAvailablePermits(cnx, numMessages);
return;
}
ByteBuf decryptedPayload = decryptPayloadIfNeeded(messageId, redeliveryCount, msgMetadata, headersAndPayload, cnx);
boolean isMessageUndecryptable = isMessageUndecryptable(msgMetadata);
if (decryptedPayload == null) {
// Message was discarded or CryptoKeyReader isn't implemented
return;
}
// uncompress decryptedPayload and release decryptedPayload-ByteBuf
ByteBuf uncompressedPayload = (isMessageUndecryptable || isChunkedMessage) ? decryptedPayload.retain() : uncompressPayloadIfNeeded(messageId, msgMetadata, decryptedPayload, cnx, true);
decryptedPayload.release();
if (uncompressedPayload == null) {
// Message was discarded on decompression error
return;
}
if (conf.getPayloadProcessor() != null) {
// uncompressedPayload is released in this method so we don't need to call release() again
processPayloadByProcessor(brokerEntryMetadata, msgMetadata, uncompressedPayload, msgId, schema, redeliveryCount, ackSet, consumerEpoch);
return;
}
// and return undecrypted payload
if (isMessageUndecryptable || (numMessages == 1 && !msgMetadata.hasNumMessagesInBatch())) {
// right now, chunked messages are only supported by non-shared subscription
if (isChunkedMessage) {
uncompressedPayload = processMessageChunk(uncompressedPayload, msgMetadata, msgId, messageId, cnx);
if (uncompressedPayload == null) {
return;
}
// last chunk received: so, stitch chunked-messages and clear up chunkedMsgBuffer
if (log.isDebugEnabled()) {
log.debug("Chunked message completed chunkId {}, total-chunks {}, msgId {} sequenceId {}", msgMetadata.getChunkId(), msgMetadata.getNumChunksFromMsg(), msgId, msgMetadata.getSequenceId());
}
// remove buffer from the map, set the chunk message id
ChunkedMessageCtx chunkedMsgCtx = chunkedMessagesMap.remove(msgMetadata.getUuid());
if (chunkedMsgCtx.chunkedMessageIds.length > 0) {
msgId = new ChunkMessageIdImpl(chunkedMsgCtx.chunkedMessageIds[0], chunkedMsgCtx.chunkedMessageIds[chunkedMsgCtx.chunkedMessageIds.length - 1]);
}
// add chunked messageId to unack-message tracker, and reduce pending-chunked-message count
unAckedChunkedMessageIdSequenceMap.put(msgId, chunkedMsgCtx.chunkedMessageIds);
pendingChunkedMessageCount--;
chunkedMsgCtx.recycle();
}
// If the topic is non-persistent, we should not ignore any messages.
if (this.topicName.isPersistent() && isSameEntry(msgId) && isPriorEntryIndex(messageId.getEntryId())) {
// We need to discard entries that were prior to startMessageId
if (log.isDebugEnabled()) {
log.debug("[{}] [{}] Ignoring message from before the startMessageId: {}", subscription, consumerName, startMessageId);
}
uncompressedPayload.release();
return;
}
final MessageImpl<T> message = newMessage(msgId, brokerEntryMetadata, msgMetadata, uncompressedPayload, schema, redeliveryCount, consumerEpoch);
uncompressedPayload.release();
if (deadLetterPolicy != null && possibleSendToDeadLetterTopicMessages != null && redeliveryCount >= deadLetterPolicy.getMaxRedeliverCount()) {
possibleSendToDeadLetterTopicMessages.put((MessageIdImpl) message.getMessageId(), Collections.singletonList(message));
}
executeNotifyCallback(message);
} else {
// handle batch message enqueuing; uncompressed payload has all messages in batch
receiveIndividualMessagesFromBatch(brokerEntryMetadata, msgMetadata, redeliveryCount, ackSet, uncompressedPayload, messageId, cnx, consumerEpoch);
uncompressedPayload.release();
}
tryTriggerListener();
}
use of org.apache.pulsar.common.api.proto.MessageIdData in project pulsar by apache.
the class ConsumerImpl method getRedeliveryMessageIdData.
private CompletableFuture<List<MessageIdData>> getRedeliveryMessageIdData(List<MessageIdImpl> messageIds) {
if (messageIds == null || messageIds.isEmpty()) {
return CompletableFuture.completedFuture(Collections.emptyList());
}
List<MessageIdData> data = new ArrayList<>(messageIds.size());
List<CompletableFuture<Void>> futures = new ArrayList<>(messageIds.size());
messageIds.forEach(messageId -> {
CompletableFuture<Boolean> future = processPossibleToDLQ(messageId);
futures.add(future.thenAccept(sendToDLQ -> {
if (!sendToDLQ) {
data.add(new MessageIdData().setPartition(messageId.getPartitionIndex()).setLedgerId(messageId.getLedgerId()).setEntryId(messageId.getEntryId()));
}
}));
});
return FutureUtil.waitForAll(futures).thenCompose(v -> CompletableFuture.completedFuture(data));
}
use of org.apache.pulsar.common.api.proto.MessageIdData in project pulsar by apache.
the class ConsumerImpl method internalGetLastMessageIdAsync.
private void internalGetLastMessageIdAsync(final Backoff backoff, final AtomicLong remainingTime, CompletableFuture<GetLastMessageIdResponse> future) {
ClientCnx cnx = cnx();
if (isConnected() && cnx != null) {
if (!Commands.peerSupportsGetLastMessageId(cnx.getRemoteEndpointProtocolVersion())) {
future.completeExceptionally(new PulsarClientException.NotSupportedException(String.format("The command `GetLastMessageId` is not supported for the protocol version %d. " + "The consumer is %s, topic %s, subscription %s", cnx.getRemoteEndpointProtocolVersion(), consumerName, topicName.toString(), subscription)));
return;
}
long requestId = client.newRequestId();
ByteBuf getLastIdCmd = Commands.newGetLastMessageId(consumerId, requestId);
log.info("[{}][{}] Get topic last message Id", topic, subscription);
cnx.sendGetLastMessageId(getLastIdCmd, requestId).thenAccept(cmd -> {
MessageIdData lastMessageId = cmd.getLastMessageId();
MessageIdImpl markDeletePosition = null;
if (cmd.hasConsumerMarkDeletePosition()) {
markDeletePosition = new MessageIdImpl(cmd.getConsumerMarkDeletePosition().getLedgerId(), cmd.getConsumerMarkDeletePosition().getEntryId(), -1);
}
log.info("[{}][{}] Successfully getLastMessageId {}:{}", topic, subscription, lastMessageId.getLedgerId(), lastMessageId.getEntryId());
MessageId lastMsgId = lastMessageId.getBatchIndex() <= 0 ? new MessageIdImpl(lastMessageId.getLedgerId(), lastMessageId.getEntryId(), lastMessageId.getPartition()) : new BatchMessageIdImpl(lastMessageId.getLedgerId(), lastMessageId.getEntryId(), lastMessageId.getPartition(), lastMessageId.getBatchIndex());
future.complete(new GetLastMessageIdResponse(lastMsgId, markDeletePosition));
}).exceptionally(e -> {
log.error("[{}][{}] Failed getLastMessageId command", topic, subscription);
future.completeExceptionally(PulsarClientException.wrap(e.getCause(), String.format("The subscription %s of the topic %s gets the last message id was failed", subscription, topicName.toString())));
return null;
});
} else {
long nextDelay = Math.min(backoff.next(), remainingTime.get());
if (nextDelay <= 0) {
future.completeExceptionally(new PulsarClientException.TimeoutException(String.format("The subscription %s of the topic %s could not get the last message id " + "withing configured timeout", subscription, topicName.toString())));
return;
}
internalPinnedExecutor.schedule(() -> {
log.warn("[{}] [{}] Could not get connection while getLastMessageId -- Will try again in {} ms", topic, getHandlerName(), nextDelay);
remainingTime.addAndGet(-nextDelay);
internalGetLastMessageIdAsync(backoff, remainingTime, future);
}, nextDelay, TimeUnit.MILLISECONDS);
}
}
use of org.apache.pulsar.common.api.proto.MessageIdData in project pulsar by apache.
the class ConsumerImpl method connectionOpened.
@Override
public void connectionOpened(final ClientCnx cnx) {
previousExceptions.clear();
if (getState() == State.Closing || getState() == State.Closed) {
setState(State.Closed);
closeConsumerTasks();
deregisterFromClientCnx();
client.cleanupConsumer(this);
clearReceiverQueue();
return;
}
log.info("[{}][{}] Subscribing to topic on cnx {}, consumerId {}", topic, subscription, cnx.ctx().channel(), consumerId);
long requestId = client.newRequestId();
if (duringSeek.get()) {
acknowledgmentsGroupingTracker.flushAndClean();
}
SUBSCRIBE_DEADLINE_UPDATER.compareAndSet(this, 0L, System.currentTimeMillis() + client.getConfiguration().getOperationTimeoutMs());
int currentSize;
synchronized (this) {
currentSize = incomingMessages.size();
startMessageId = clearReceiverQueue();
if (possibleSendToDeadLetterTopicMessages != null) {
possibleSendToDeadLetterTopicMessages.clear();
}
}
boolean isDurable = subscriptionMode == SubscriptionMode.Durable;
final MessageIdData startMessageIdData;
// For non-durable we are going to restart from the next entry.
if (!isDurable && startMessageId != null) {
startMessageIdData = new MessageIdData().setLedgerId(startMessageId.getLedgerId()).setEntryId(startMessageId.getEntryId()).setBatchIndex(startMessageId.getBatchIndex());
} else {
startMessageIdData = null;
}
SchemaInfo si = schema.getSchemaInfo();
if (si != null && (SchemaType.BYTES == si.getType() || SchemaType.NONE == si.getType())) {
// don't set schema for Schema.BYTES
si = null;
}
// startMessageRollbackDurationInSec should be consider only once when consumer connects to first time
long startMessageRollbackDuration = (startMessageRollbackDurationInSec > 0 && startMessageId != null && startMessageId.equals(initialStartMessageId)) ? startMessageRollbackDurationInSec : 0;
// synchronized this, because redeliverUnAckMessage eliminate the epoch inconsistency between them
synchronized (this) {
setClientCnx(cnx);
ByteBuf request = Commands.newSubscribe(topic, subscription, consumerId, requestId, getSubType(), priorityLevel, consumerName, isDurable, startMessageIdData, metadata, readCompacted, conf.isReplicateSubscriptionState(), InitialPosition.valueOf(subscriptionInitialPosition.getValue()), startMessageRollbackDuration, si, createTopicIfDoesNotExist, conf.getKeySharedPolicy(), // Use the current epoch to subscribe.
conf.getSubscriptionProperties(), CONSUMER_EPOCH.get(this));
cnx.sendRequestWithId(request, requestId).thenRun(() -> {
synchronized (ConsumerImpl.this) {
if (changeToReadyState()) {
consumerIsReconnectedToBroker(cnx, currentSize);
} else {
// Consumer was closed while reconnecting, close the connection to make sure the broker
// drops the consumer on its side
setState(State.Closed);
deregisterFromClientCnx();
client.cleanupConsumer(this);
cnx.channel().close();
return;
}
}
resetBackoff();
boolean firstTimeConnect = subscribeFuture.complete(this);
// command to receive messages.
if (!(firstTimeConnect && hasParentConsumer) && getCurrentReceiverQueueSize() != 0) {
increaseAvailablePermits(cnx, getCurrentReceiverQueueSize());
}
}).exceptionally((e) -> {
deregisterFromClientCnx();
if (getState() == State.Closing || getState() == State.Closed) {
// Consumer was closed while reconnecting, close the connection to make sure the broker
// drops the consumer on its side
cnx.channel().close();
return null;
}
log.warn("[{}][{}] Failed to subscribe to topic on {}", topic, subscription, cnx.channel().remoteAddress());
if (e.getCause() instanceof PulsarClientException && PulsarClientException.isRetriableError(e.getCause()) && System.currentTimeMillis() < SUBSCRIBE_DEADLINE_UPDATER.get(ConsumerImpl.this)) {
reconnectLater(e.getCause());
} else if (!subscribeFuture.isDone()) {
// unable to create new consumer, fail operation
setState(State.Failed);
closeConsumerTasks();
subscribeFuture.completeExceptionally(PulsarClientException.wrap(e, String.format("Failed to subscribe the topic %s " + "with subscription name %s when connecting to the broker", topicName.toString(), subscription)));
client.cleanupConsumer(this);
} else if (e.getCause() instanceof TopicDoesNotExistException) {
// The topic was deleted after the consumer was created, and we're
// not allowed to recreate the topic. This can happen in few cases:
// * Regex consumer getting error after topic gets deleted
// * Regular consumer after topic is manually delete and with
// auto-topic-creation set to false
// No more retries are needed in this case.
setState(State.Failed);
closeConsumerTasks();
client.cleanupConsumer(this);
log.warn("[{}][{}] Closed consumer because topic does not exist anymore {}", topic, subscription, cnx.channel().remoteAddress());
} else {
// consumer was subscribed and connected but we got some error, keep trying
reconnectLater(e.getCause());
}
return null;
});
}
}
use of org.apache.pulsar.common.api.proto.MessageIdData in project pulsar by apache.
the class MessageIdImpl method toByteArray.
// batchIndex is -1 if message is non-batched message and has the batchIndex for a batch message
protected byte[] toByteArray(int batchIndex, int batchSize) {
MessageIdData msgId = writeMessageIdData(null, batchIndex, batchSize);
int size = msgId.getSerializedSize();
ByteBuf serialized = Unpooled.buffer(size, size);
msgId.writeTo(serialized);
return serialized.array();
}
Aggregations