use of org.apache.pulsar.common.policies.data.ManagedLedgerInternalStats.CursorStats in project pulsar by apache.
the class PersistentTopic method getInternalStats.
@Override
public CompletableFuture<PersistentTopicInternalStats> getInternalStats(boolean includeLedgerMetadata) {
CompletableFuture<PersistentTopicInternalStats> statFuture = new CompletableFuture<>();
PersistentTopicInternalStats stats = new PersistentTopicInternalStats();
ManagedLedgerImpl ml = (ManagedLedgerImpl) ledger;
stats.entriesAddedCounter = ml.getEntriesAddedCounter();
stats.numberOfEntries = ml.getNumberOfEntries();
stats.totalSize = ml.getTotalSize();
stats.currentLedgerEntries = ml.getCurrentLedgerEntries();
stats.currentLedgerSize = ml.getCurrentLedgerSize();
stats.lastLedgerCreatedTimestamp = DateFormatter.format(ml.getLastLedgerCreatedTimestamp());
if (ml.getLastLedgerCreationFailureTimestamp() != 0) {
stats.lastLedgerCreationFailureTimestamp = DateFormatter.format(ml.getLastLedgerCreationFailureTimestamp());
}
stats.waitingCursorsCount = ml.getWaitingCursorsCount();
stats.pendingAddEntriesCount = ml.getPendingAddEntriesCount();
stats.lastConfirmedEntry = ml.getLastConfirmedEntry().toString();
stats.state = ml.getState().toString();
stats.ledgers = Lists.newArrayList();
List<CompletableFuture<String>> futures = Lists.newArrayList();
CompletableFuture<Set<String>> availableBookiesFuture = brokerService.pulsar().getPulsarResources().getBookieResources().listAvailableBookiesAsync();
futures.add(availableBookiesFuture.handle((strings, throwable) -> null));
availableBookiesFuture.whenComplete((bookies, e) -> {
if (e != null) {
log.error("[{}] Failed to fetch available bookies.", topic, e);
statFuture.completeExceptionally(e);
} else {
ml.getLedgersInfo().forEach((id, li) -> {
LedgerInfo info = new LedgerInfo();
info.ledgerId = li.getLedgerId();
info.entries = li.getEntries();
info.size = li.getSize();
info.offloaded = li.hasOffloadContext() && li.getOffloadContext().getComplete();
stats.ledgers.add(info);
if (includeLedgerMetadata) {
futures.add(ml.getLedgerMetadata(li.getLedgerId()).handle((lMetadata, ex) -> {
if (ex == null) {
info.metadata = lMetadata;
}
return null;
}));
futures.add(ml.getEnsemblesAsync(li.getLedgerId()).handle((ensembles, ex) -> {
if (ex == null) {
info.underReplicated = !bookies.containsAll(ensembles.stream().map(BookieId::toString).collect(Collectors.toList()));
}
return null;
}));
}
});
}
});
// Add ledger info for compacted topic ledger if exist.
LedgerInfo info = new LedgerInfo();
info.ledgerId = -1;
info.entries = -1;
info.size = -1;
Optional<CompactedTopicContext> compactedTopicContext = getCompactedTopicContext();
if (compactedTopicContext.isPresent()) {
CompactedTopicContext ledgerContext = compactedTopicContext.get();
info.ledgerId = ledgerContext.getLedger().getId();
info.entries = ledgerContext.getLedger().getLastAddConfirmed() + 1;
info.size = ledgerContext.getLedger().getLength();
}
stats.compactedLedger = info;
stats.cursors = Maps.newTreeMap();
ml.getCursors().forEach(c -> {
ManagedCursorImpl cursor = (ManagedCursorImpl) c;
CursorStats cs = new CursorStats();
cs.markDeletePosition = cursor.getMarkDeletedPosition().toString();
cs.readPosition = cursor.getReadPosition().toString();
cs.waitingReadOp = cursor.hasPendingReadRequest();
cs.pendingReadOps = cursor.getPendingReadOpsCount();
cs.messagesConsumedCounter = cursor.getMessagesConsumedCounter();
cs.cursorLedger = cursor.getCursorLedger();
cs.cursorLedgerLastEntry = cursor.getCursorLedgerLastEntry();
cs.individuallyDeletedMessages = cursor.getIndividuallyDeletedMessages();
cs.lastLedgerSwitchTimestamp = DateFormatter.format(cursor.getLastLedgerSwitchTimestamp());
cs.state = cursor.getState();
cs.numberOfEntriesSinceFirstNotAckedMessage = cursor.getNumberOfEntriesSinceFirstNotAckedMessage();
cs.totalNonContiguousDeletedMessagesRange = cursor.getTotalNonContiguousDeletedMessagesRange();
cs.properties = cursor.getProperties();
// subscription metrics
PersistentSubscription sub = subscriptions.get(Codec.decode(c.getName()));
if (sub != null) {
if (sub.getDispatcher() instanceof PersistentDispatcherMultipleConsumers) {
PersistentDispatcherMultipleConsumers dispatcher = (PersistentDispatcherMultipleConsumers) sub.getDispatcher();
cs.subscriptionHavePendingRead = dispatcher.havePendingRead;
cs.subscriptionHavePendingReplayRead = dispatcher.havePendingReplayRead;
} else if (sub.getDispatcher() instanceof PersistentDispatcherSingleActiveConsumer) {
PersistentDispatcherSingleActiveConsumer dispatcher = (PersistentDispatcherSingleActiveConsumer) sub.getDispatcher();
cs.subscriptionHavePendingRead = dispatcher.havePendingRead;
}
}
stats.cursors.put(cursor.getName(), cs);
});
// Schema store ledgers
String schemaId;
try {
schemaId = TopicName.get(topic).getSchemaName();
} catch (Throwable t) {
statFuture.completeExceptionally(t);
return statFuture;
}
CompletableFuture<Void> schemaStoreLedgersFuture = new CompletableFuture<>();
stats.schemaLedgers = Collections.synchronizedList(new ArrayList<>());
if (brokerService.getPulsar().getSchemaStorage() != null && brokerService.getPulsar().getSchemaStorage() instanceof BookkeeperSchemaStorage) {
((BookkeeperSchemaStorage) brokerService.getPulsar().getSchemaStorage()).getStoreLedgerIdsBySchemaId(schemaId).thenAccept(ledgers -> {
List<CompletableFuture<Void>> getLedgerMetadataFutures = new ArrayList<>();
ledgers.forEach(ledgerId -> {
CompletableFuture<Void> completableFuture = new CompletableFuture<>();
getLedgerMetadataFutures.add(completableFuture);
CompletableFuture<LedgerMetadata> metadataFuture = null;
try {
metadataFuture = brokerService.getPulsar().getBookKeeperClient().getLedgerMetadata(ledgerId);
} catch (NullPointerException e) {
// related to bookkeeper issue https://github.com/apache/bookkeeper/issues/2741
if (log.isDebugEnabled()) {
log.debug("{{}} Failed to get ledger metadata for the schema ledger {}", topic, ledgerId, e);
}
}
if (metadataFuture != null) {
metadataFuture.thenAccept(metadata -> {
LedgerInfo schemaLedgerInfo = new LedgerInfo();
schemaLedgerInfo.ledgerId = metadata.getLedgerId();
schemaLedgerInfo.entries = metadata.getLastEntryId() + 1;
schemaLedgerInfo.size = metadata.getLength();
if (includeLedgerMetadata) {
info.metadata = metadata.toSafeString();
}
stats.schemaLedgers.add(schemaLedgerInfo);
completableFuture.complete(null);
}).exceptionally(e -> {
completableFuture.completeExceptionally(e);
return null;
});
} else {
completableFuture.complete(null);
}
});
FutureUtil.waitForAll(getLedgerMetadataFutures).thenRun(() -> {
schemaStoreLedgersFuture.complete(null);
}).exceptionally(e -> {
schemaStoreLedgersFuture.completeExceptionally(e);
return null;
});
}).exceptionally(e -> {
schemaStoreLedgersFuture.completeExceptionally(e);
return null;
});
} else {
schemaStoreLedgersFuture.complete(null);
}
schemaStoreLedgersFuture.thenRun(() -> {
if (futures != null) {
FutureUtil.waitForAll(futures).handle((res, ex) -> {
statFuture.complete(stats);
return null;
});
} else {
statFuture.complete(stats);
}
}).exceptionally(e -> {
statFuture.completeExceptionally(e);
return null;
});
return statFuture;
}
use of org.apache.pulsar.common.policies.data.ManagedLedgerInternalStats.CursorStats in project pulsar by yahoo.
the class PulsarBrokerStatsClientTest method testTopicInternalStats.
@Test
public void testTopicInternalStats() throws Exception {
log.info("-- Starting {} test --", methodName);
final String topicName = "persistent://my-property/my-ns/my-topic1";
final String subscriptionName = "my-subscriber-name";
Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).acknowledgmentGroupTime(0, TimeUnit.SECONDS).subscribe();
Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).create();
final int numberOfMsgs = 1000;
for (int i = 0; i < numberOfMsgs; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
}
Message<byte[]> msg;
int count = 0;
for (int i = 0; i < numberOfMsgs; i++) {
msg = consumer.receive(5, TimeUnit.SECONDS);
if (msg != null && count++ % 2 == 0) {
consumer.acknowledge(msg);
}
}
PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getOrCreateTopic(topicName).get();
PersistentTopicInternalStats internalStats = topic.getInternalStats(true).get();
assertNotNull(internalStats.ledgers.get(0).metadata);
// For the mock test, the default ensembles is ["192.0.2.1:1234","192.0.2.2:1234","192.0.2.3:1234"]
// The registed bookie ID is 192.168.1.1:5000
assertTrue(internalStats.ledgers.get(0).underReplicated);
CursorStats cursor = internalStats.cursors.get(subscriptionName);
assertEquals(cursor.numberOfEntriesSinceFirstNotAckedMessage, numberOfMsgs);
assertTrue(cursor.totalNonContiguousDeletedMessagesRange > 0 && (cursor.totalNonContiguousDeletedMessagesRange) < numberOfMsgs / 2);
assertFalse(cursor.subscriptionHavePendingRead);
assertFalse(cursor.subscriptionHavePendingReplayRead);
producer.close();
consumer.close();
log.info("-- Exiting {} test --", methodName);
}
use of org.apache.pulsar.common.policies.data.ManagedLedgerInternalStats.CursorStats in project incubator-pulsar by apache.
the class MessageTTLTest method testMessageExpiryAfterTopicUnload.
@Test
public void testMessageExpiryAfterTopicUnload() throws Exception {
int numMsgs = 50;
final String topicName = "persistent://prop/ns-abc/testttl";
final String subscriptionName = "ttl-sub-1";
pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).subscribe().close();
Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).enableBatching(// this makes the test easier and predictable
false).create();
List<CompletableFuture<MessageId>> sendFutureList = Lists.newArrayList();
for (int i = 0; i < numMsgs; i++) {
byte[] message = ("my-message-" + i).getBytes();
sendFutureList.add(producer.sendAsync(message));
}
FutureUtil.waitForAll(sendFutureList).get();
producer.close();
// unload a reload the topic
// this action created a new ledger
// having a managed ledger with more than one
// ledger should not impact message expiration
admin.topics().unload(topicName);
admin.topics().getStats(topicName);
PersistentTopicInternalStats internalStatsBeforeExpire = admin.topics().getInternalStats(topicName);
CursorStats statsBeforeExpire = internalStatsBeforeExpire.cursors.get(subscriptionName);
log.info("markDeletePosition before expire {}", statsBeforeExpire.markDeletePosition);
assertEquals(statsBeforeExpire.markDeletePosition, PositionImpl.get(3, -1).toString());
// wall clock time, we have to make the message to be considered "expired"
Thread.sleep(this.conf.getTtlDurationDefaultInSeconds() * 2000L);
log.info("***** run message expiry now");
this.runMessageExpiryCheck();
// verify that the markDeletePosition was moved forward, and exacly to the last message
PersistentTopicInternalStats internalStatsAfterExpire = admin.topics().getInternalStats(topicName);
CursorStats statsAfterExpire = internalStatsAfterExpire.cursors.get(subscriptionName);
log.info("markDeletePosition after expire {}", statsAfterExpire.markDeletePosition);
assertEquals(statsAfterExpire.markDeletePosition, PositionImpl.get(3, numMsgs - 1).toString());
}
use of org.apache.pulsar.common.policies.data.ManagedLedgerInternalStats.CursorStats in project incubator-pulsar by apache.
the class PulsarBrokerStatsClientTest method testTopicInternalStats.
@Test
public void testTopicInternalStats() throws Exception {
log.info("-- Starting {} test --", methodName);
final String topicName = "persistent://my-property/my-ns/my-topic1";
final String subscriptionName = "my-subscriber-name";
Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subscriptionName).acknowledgmentGroupTime(0, TimeUnit.SECONDS).subscribe();
Producer<byte[]> producer = pulsarClient.newProducer().topic(topicName).create();
final int numberOfMsgs = 1000;
for (int i = 0; i < numberOfMsgs; i++) {
String message = "my-message-" + i;
producer.send(message.getBytes());
}
Message<byte[]> msg;
int count = 0;
for (int i = 0; i < numberOfMsgs; i++) {
msg = consumer.receive(5, TimeUnit.SECONDS);
if (msg != null && count++ % 2 == 0) {
consumer.acknowledge(msg);
}
}
PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getOrCreateTopic(topicName).get();
PersistentTopicInternalStats internalStats = topic.getInternalStats(true).get();
assertNotNull(internalStats.ledgers.get(0).metadata);
// For the mock test, the default ensembles is ["192.0.2.1:1234","192.0.2.2:1234","192.0.2.3:1234"]
// The registed bookie ID is 192.168.1.1:5000
assertTrue(internalStats.ledgers.get(0).underReplicated);
CursorStats cursor = internalStats.cursors.get(subscriptionName);
assertEquals(cursor.numberOfEntriesSinceFirstNotAckedMessage, numberOfMsgs);
assertTrue(cursor.totalNonContiguousDeletedMessagesRange > 0 && (cursor.totalNonContiguousDeletedMessagesRange) < numberOfMsgs / 2);
assertFalse(cursor.subscriptionHavePendingRead);
assertFalse(cursor.subscriptionHavePendingReplayRead);
producer.close();
consumer.close();
log.info("-- Exiting {} test --", methodName);
}
use of org.apache.pulsar.common.policies.data.ManagedLedgerInternalStats.CursorStats in project incubator-pulsar by apache.
the class PersistentTopic method getInternalStats.
@Override
public CompletableFuture<PersistentTopicInternalStats> getInternalStats(boolean includeLedgerMetadata) {
CompletableFuture<PersistentTopicInternalStats> statFuture = new CompletableFuture<>();
PersistentTopicInternalStats stats = new PersistentTopicInternalStats();
ManagedLedgerImpl ml = (ManagedLedgerImpl) ledger;
stats.entriesAddedCounter = ml.getEntriesAddedCounter();
stats.numberOfEntries = ml.getNumberOfEntries();
stats.totalSize = ml.getTotalSize();
stats.currentLedgerEntries = ml.getCurrentLedgerEntries();
stats.currentLedgerSize = ml.getCurrentLedgerSize();
stats.lastLedgerCreatedTimestamp = DateFormatter.format(ml.getLastLedgerCreatedTimestamp());
if (ml.getLastLedgerCreationFailureTimestamp() != 0) {
stats.lastLedgerCreationFailureTimestamp = DateFormatter.format(ml.getLastLedgerCreationFailureTimestamp());
}
stats.waitingCursorsCount = ml.getWaitingCursorsCount();
stats.pendingAddEntriesCount = ml.getPendingAddEntriesCount();
stats.lastConfirmedEntry = ml.getLastConfirmedEntry().toString();
stats.state = ml.getState().toString();
stats.ledgers = Lists.newArrayList();
List<CompletableFuture<String>> futures = Lists.newArrayList();
CompletableFuture<Set<String>> availableBookiesFuture = brokerService.pulsar().getPulsarResources().getBookieResources().listAvailableBookiesAsync();
futures.add(availableBookiesFuture.handle((strings, throwable) -> null));
availableBookiesFuture.whenComplete((bookies, e) -> {
if (e != null) {
log.error("[{}] Failed to fetch available bookies.", topic, e);
statFuture.completeExceptionally(e);
} else {
ml.getLedgersInfo().forEach((id, li) -> {
LedgerInfo info = new LedgerInfo();
info.ledgerId = li.getLedgerId();
info.entries = li.getEntries();
info.size = li.getSize();
info.offloaded = li.hasOffloadContext() && li.getOffloadContext().getComplete();
stats.ledgers.add(info);
if (includeLedgerMetadata) {
futures.add(ml.getLedgerMetadata(li.getLedgerId()).handle((lMetadata, ex) -> {
if (ex == null) {
info.metadata = lMetadata;
}
return null;
}));
futures.add(ml.getEnsemblesAsync(li.getLedgerId()).handle((ensembles, ex) -> {
if (ex == null) {
info.underReplicated = !bookies.containsAll(ensembles.stream().map(BookieId::toString).collect(Collectors.toList()));
}
return null;
}));
}
});
}
});
// Add ledger info for compacted topic ledger if exist.
LedgerInfo info = new LedgerInfo();
info.ledgerId = -1;
info.entries = -1;
info.size = -1;
Optional<CompactedTopicContext> compactedTopicContext = getCompactedTopicContext();
if (compactedTopicContext.isPresent()) {
CompactedTopicContext ledgerContext = compactedTopicContext.get();
info.ledgerId = ledgerContext.getLedger().getId();
info.entries = ledgerContext.getLedger().getLastAddConfirmed() + 1;
info.size = ledgerContext.getLedger().getLength();
}
stats.compactedLedger = info;
stats.cursors = Maps.newTreeMap();
ml.getCursors().forEach(c -> {
ManagedCursorImpl cursor = (ManagedCursorImpl) c;
CursorStats cs = new CursorStats();
cs.markDeletePosition = cursor.getMarkDeletedPosition().toString();
cs.readPosition = cursor.getReadPosition().toString();
cs.waitingReadOp = cursor.hasPendingReadRequest();
cs.pendingReadOps = cursor.getPendingReadOpsCount();
cs.messagesConsumedCounter = cursor.getMessagesConsumedCounter();
cs.cursorLedger = cursor.getCursorLedger();
cs.cursorLedgerLastEntry = cursor.getCursorLedgerLastEntry();
cs.individuallyDeletedMessages = cursor.getIndividuallyDeletedMessages();
cs.lastLedgerSwitchTimestamp = DateFormatter.format(cursor.getLastLedgerSwitchTimestamp());
cs.state = cursor.getState();
cs.numberOfEntriesSinceFirstNotAckedMessage = cursor.getNumberOfEntriesSinceFirstNotAckedMessage();
cs.totalNonContiguousDeletedMessagesRange = cursor.getTotalNonContiguousDeletedMessagesRange();
cs.properties = cursor.getProperties();
// subscription metrics
PersistentSubscription sub = subscriptions.get(Codec.decode(c.getName()));
if (sub != null) {
if (sub.getDispatcher() instanceof PersistentDispatcherMultipleConsumers) {
PersistentDispatcherMultipleConsumers dispatcher = (PersistentDispatcherMultipleConsumers) sub.getDispatcher();
cs.subscriptionHavePendingRead = dispatcher.havePendingRead;
cs.subscriptionHavePendingReplayRead = dispatcher.havePendingReplayRead;
} else if (sub.getDispatcher() instanceof PersistentDispatcherSingleActiveConsumer) {
PersistentDispatcherSingleActiveConsumer dispatcher = (PersistentDispatcherSingleActiveConsumer) sub.getDispatcher();
cs.subscriptionHavePendingRead = dispatcher.havePendingRead;
}
}
stats.cursors.put(cursor.getName(), cs);
});
// Schema store ledgers
String schemaId;
try {
schemaId = TopicName.get(topic).getSchemaName();
} catch (Throwable t) {
statFuture.completeExceptionally(t);
return statFuture;
}
CompletableFuture<Void> schemaStoreLedgersFuture = new CompletableFuture<>();
stats.schemaLedgers = Collections.synchronizedList(new ArrayList<>());
if (brokerService.getPulsar().getSchemaStorage() != null && brokerService.getPulsar().getSchemaStorage() instanceof BookkeeperSchemaStorage) {
((BookkeeperSchemaStorage) brokerService.getPulsar().getSchemaStorage()).getStoreLedgerIdsBySchemaId(schemaId).thenAccept(ledgers -> {
List<CompletableFuture<Void>> getLedgerMetadataFutures = new ArrayList<>();
ledgers.forEach(ledgerId -> {
CompletableFuture<Void> completableFuture = new CompletableFuture<>();
getLedgerMetadataFutures.add(completableFuture);
CompletableFuture<LedgerMetadata> metadataFuture = null;
try {
metadataFuture = brokerService.getPulsar().getBookKeeperClient().getLedgerMetadata(ledgerId);
} catch (NullPointerException e) {
// related to bookkeeper issue https://github.com/apache/bookkeeper/issues/2741
if (log.isDebugEnabled()) {
log.debug("{{}} Failed to get ledger metadata for the schema ledger {}", topic, ledgerId, e);
}
}
if (metadataFuture != null) {
metadataFuture.thenAccept(metadata -> {
LedgerInfo schemaLedgerInfo = new LedgerInfo();
schemaLedgerInfo.ledgerId = metadata.getLedgerId();
schemaLedgerInfo.entries = metadata.getLastEntryId() + 1;
schemaLedgerInfo.size = metadata.getLength();
if (includeLedgerMetadata) {
info.metadata = metadata.toSafeString();
}
stats.schemaLedgers.add(schemaLedgerInfo);
completableFuture.complete(null);
}).exceptionally(e -> {
completableFuture.completeExceptionally(e);
return null;
});
} else {
completableFuture.complete(null);
}
});
FutureUtil.waitForAll(getLedgerMetadataFutures).thenRun(() -> {
schemaStoreLedgersFuture.complete(null);
}).exceptionally(e -> {
schemaStoreLedgersFuture.completeExceptionally(e);
return null;
});
}).exceptionally(e -> {
schemaStoreLedgersFuture.completeExceptionally(e);
return null;
});
} else {
schemaStoreLedgersFuture.complete(null);
}
schemaStoreLedgersFuture.thenRun(() -> {
if (futures != null) {
FutureUtil.waitForAll(futures).handle((res, ex) -> {
statFuture.complete(stats);
return null;
});
} else {
statFuture.complete(stats);
}
}).exceptionally(e -> {
statFuture.completeExceptionally(e);
return null;
});
return statFuture;
}
Aggregations