use of org.apache.pulsar.metadata.api.Stat in project pulsar by apache.
the class ManagedLedgerTest method testSetTopicMetadata.
@Test
public void testSetTopicMetadata() throws Exception {
Map<String, String> properties = new HashMap<>();
properties.put("key1", "value1");
properties.put("key2", "value2");
final MetaStore store = factory.getMetaStore();
final CountDownLatch latch = new CountDownLatch(1);
final ManagedLedgerInfo[] storedMLInfo = new ManagedLedgerInfo[1];
store.getManagedLedgerInfo("my_test_ledger", true, properties, new MetaStoreCallback<ManagedLedgerInfo>() {
@Override
public void operationComplete(ManagedLedgerInfo result, Stat version) {
storedMLInfo[0] = result;
latch.countDown();
}
@Override
public void operationFailed(MetaStoreException e) {
latch.countDown();
fail("Should have failed here");
}
});
latch.await();
assertEquals(storedMLInfo[0].getPropertiesCount(), 2);
assertEquals(storedMLInfo[0].getPropertiesList().get(0).getKey(), "key1");
assertEquals(storedMLInfo[0].getPropertiesList().get(0).getValue(), "value1");
assertEquals(storedMLInfo[0].getPropertiesList().get(1).getKey(), "key2");
assertEquals(storedMLInfo[0].getPropertiesList().get(1).getValue(), "value2");
}
use of org.apache.pulsar.metadata.api.Stat in project pulsar by apache.
the class ManagedLedgerImpl method internalTrimLedgers.
void internalTrimLedgers(boolean isTruncate, CompletableFuture<?> promise) {
if (!factory.isMetadataServiceAvailable()) {
// Defer trimming of ledger if we cannot connect to metadata service
promise.complete(null);
return;
}
// Ensure only one trimming operation is active
if (!trimmerMutex.tryLock()) {
scheduleDeferredTrimming(isTruncate, promise);
return;
}
List<LedgerInfo> ledgersToDelete = Lists.newArrayList();
List<LedgerInfo> offloadedLedgersToDelete = Lists.newArrayList();
Optional<OffloadPolicies> optionalOffloadPolicies = Optional.ofNullable(config.getLedgerOffloader() != null && config.getLedgerOffloader() != NullLedgerOffloader.INSTANCE ? config.getLedgerOffloader().getOffloadPolicies() : null);
synchronized (this) {
if (log.isDebugEnabled()) {
log.debug("[{}] Start TrimConsumedLedgers. ledgers={} totalSize={}", name, ledgers.keySet(), TOTAL_SIZE_UPDATER.get(this));
}
if (STATE_UPDATER.get(this) == State.Closed) {
log.debug("[{}] Ignoring trimming request since the managed ledger was already closed", name);
trimmerMutex.unlock();
promise.completeExceptionally(new ManagedLedgerAlreadyClosedException("Can't trim closed ledger"));
return;
}
long slowestReaderLedgerId = -1;
if (!cursors.hasDurableCursors()) {
// At this point the lastLedger will be pointing to the
// ledger that has just been closed, therefore the +1 to
// include lastLedger in the trimming.
slowestReaderLedgerId = currentLedger.getId() + 1;
} else {
PositionImpl slowestReaderPosition = cursors.getSlowestReaderPosition();
if (slowestReaderPosition != null) {
slowestReaderLedgerId = slowestReaderPosition.getLedgerId();
} else {
promise.completeExceptionally(new ManagedLedgerException("Couldn't find reader position"));
trimmerMutex.unlock();
return;
}
}
if (log.isDebugEnabled()) {
log.debug("[{}] Slowest consumer ledger id: {}", name, slowestReaderLedgerId);
}
long totalSizeToDelete = 0;
// skip ledger if retention constraint met
for (LedgerInfo ls : ledgers.headMap(slowestReaderLedgerId, false).values()) {
// currentLedger can not be deleted
if (ls.getLedgerId() == currentLedger.getId()) {
if (log.isDebugEnabled()) {
log.debug("[{}] Ledger {} skipped for deletion as it is currently being written to", name, ls.getLedgerId());
}
break;
}
// if truncate, all ledgers besides currentLedger are going to be deleted
if (isTruncate) {
if (log.isDebugEnabled()) {
log.debug("[{}] Ledger {} will be truncated with ts {}", name, ls.getLedgerId(), ls.getTimestamp());
}
ledgersToDelete.add(ls);
continue;
}
totalSizeToDelete += ls.getSize();
boolean overRetentionQuota = isLedgerRetentionOverSizeQuota(totalSizeToDelete);
boolean expired = hasLedgerRetentionExpired(ls.getTimestamp());
if (log.isDebugEnabled()) {
log.debug("[{}] Checking ledger {} -- time-old: {} sec -- " + "expired: {} -- over-quota: {} -- current-ledger: {}", name, ls.getLedgerId(), (clock.millis() - ls.getTimestamp()) / 1000.0, expired, overRetentionQuota, currentLedger.getId());
}
if (expired || overRetentionQuota) {
if (log.isDebugEnabled()) {
log.debug("[{}] Ledger {} has expired or over quota, expired is: {}, ts: {}, " + "overRetentionQuota is: {}, ledge size: {}", name, ls.getLedgerId(), expired, ls.getTimestamp(), overRetentionQuota, ls.getSize());
}
ledgersToDelete.add(ls);
} else {
// once retention constraint has been met, skip check
if (log.isDebugEnabled()) {
log.debug("[{}] Ledger {} not deleted. Neither expired nor over-quota", name, ls.getLedgerId());
}
invalidateReadHandle(ls.getLedgerId());
}
}
for (LedgerInfo ls : ledgers.values()) {
if (isOffloadedNeedsDelete(ls.getOffloadContext(), optionalOffloadPolicies) && !ledgersToDelete.contains(ls)) {
log.debug("[{}] Ledger {} has been offloaded, bookkeeper ledger needs to be deleted", name, ls.getLedgerId());
offloadedLedgersToDelete.add(ls);
}
}
if (ledgersToDelete.isEmpty() && offloadedLedgersToDelete.isEmpty()) {
trimmerMutex.unlock();
promise.complete(null);
return;
}
if (// Give up now and schedule a new trimming
STATE_UPDATER.get(this) == State.CreatingLedger || !metadataMutex.tryLock()) {
// Avoid deadlocks with other operations updating the ledgers list
scheduleDeferredTrimming(isTruncate, promise);
trimmerMutex.unlock();
return;
}
advanceCursorsIfNecessary(ledgersToDelete);
PositionImpl currentLastConfirmedEntry = lastConfirmedEntry;
// Update metadata
for (LedgerInfo ls : ledgersToDelete) {
if (currentLastConfirmedEntry != null && ls.getLedgerId() == currentLastConfirmedEntry.getLedgerId()) {
// this info is relevant because the lastMessageId won't be available anymore
log.info("[{}] Ledger {} contains the current last confirmed entry {}, and it is going to be " + "deleted", name, ls.getLedgerId(), currentLastConfirmedEntry);
}
invalidateReadHandle(ls.getLedgerId());
ledgers.remove(ls.getLedgerId());
NUMBER_OF_ENTRIES_UPDATER.addAndGet(this, -ls.getEntries());
TOTAL_SIZE_UPDATER.addAndGet(this, -ls.getSize());
entryCache.invalidateAllEntries(ls.getLedgerId());
}
for (LedgerInfo ls : offloadedLedgersToDelete) {
LedgerInfo.Builder newInfoBuilder = ls.toBuilder();
newInfoBuilder.getOffloadContextBuilder().setBookkeeperDeleted(true);
String driverName = OffloadUtils.getOffloadDriverName(ls, config.getLedgerOffloader().getOffloadDriverName());
Map<String, String> driverMetadata = OffloadUtils.getOffloadDriverMetadata(ls, config.getLedgerOffloader().getOffloadDriverMetadata());
OffloadUtils.setOffloadDriverMetadata(newInfoBuilder, driverName, driverMetadata);
ledgers.put(ls.getLedgerId(), newInfoBuilder.build());
}
if (log.isDebugEnabled()) {
log.debug("[{}] Updating of ledgers list after trimming", name);
}
store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, new MetaStoreCallback<Void>() {
@Override
public void operationComplete(Void result, Stat stat) {
log.info("[{}] End TrimConsumedLedgers. ledgers={} totalSize={}", name, ledgers.size(), TOTAL_SIZE_UPDATER.get(ManagedLedgerImpl.this));
ledgersStat = stat;
metadataMutex.unlock();
trimmerMutex.unlock();
for (LedgerInfo ls : ledgersToDelete) {
log.info("[{}] Removing ledger {} - size: {}", name, ls.getLedgerId(), ls.getSize());
asyncDeleteLedger(ls.getLedgerId(), ls);
}
for (LedgerInfo ls : offloadedLedgersToDelete) {
log.info("[{}] Deleting offloaded ledger {} from bookkeeper - size: {}", name, ls.getLedgerId(), ls.getSize());
asyncDeleteLedgerFromBookKeeper(ls.getLedgerId());
}
promise.complete(null);
}
@Override
public void operationFailed(MetaStoreException e) {
log.warn("[{}] Failed to update the list of ledgers after trimming", name, e);
metadataMutex.unlock();
trimmerMutex.unlock();
promise.completeExceptionally(e);
}
});
}
}
use of org.apache.pulsar.metadata.api.Stat in project pulsar by apache.
the class ManagedLedgerImpl method initializeBookKeeper.
private synchronized void initializeBookKeeper(final ManagedLedgerInitializeLedgerCallback callback) {
if (log.isDebugEnabled()) {
log.debug("[{}] initializing bookkeeper; ledgers {}", name, ledgers);
}
// Calculate total entries and size
Iterator<LedgerInfo> iterator = ledgers.values().iterator();
while (iterator.hasNext()) {
LedgerInfo li = iterator.next();
if (li.getEntries() > 0) {
NUMBER_OF_ENTRIES_UPDATER.addAndGet(this, li.getEntries());
TOTAL_SIZE_UPDATER.addAndGet(this, li.getSize());
} else {
iterator.remove();
bookKeeper.asyncDeleteLedger(li.getLedgerId(), (rc, ctx) -> {
if (log.isDebugEnabled()) {
log.debug("[{}] Deleted empty ledger ledgerId={} rc={}", name, li.getLedgerId(), rc);
}
}, null);
}
}
if (state == State.Terminated) {
// When recovering a terminated managed ledger, we don't need to create
// a new ledger for writing, since no more writes are allowed.
// We just move on to the next stage
initializeCursors(callback);
return;
}
final MetaStoreCallback<Void> storeLedgersCb = new MetaStoreCallback<Void>() {
@Override
public void operationComplete(Void v, Stat stat) {
ledgersStat = stat;
initializeCursors(callback);
}
@Override
public void operationFailed(MetaStoreException e) {
callback.initializeFailed(new ManagedLedgerException(e));
}
};
// Create a new ledger to start writing
this.lastLedgerCreationInitiationTimestamp = System.currentTimeMillis();
mbean.startDataLedgerCreateOp();
asyncCreateLedger(bookKeeper, config, digestType, (rc, lh, ctx) -> {
if (checkAndCompleteLedgerOpTask(rc, lh, ctx)) {
return;
}
executor.executeOrdered(name, safeRun(() -> {
mbean.endDataLedgerCreateOp();
if (rc != BKException.Code.OK) {
callback.initializeFailed(createManagedLedgerException(rc));
return;
}
log.info("[{}] Created ledger {}", name, lh.getId());
STATE_UPDATER.set(this, State.LedgerOpened);
updateLastLedgerCreatedTimeAndScheduleRolloverTask();
currentLedger = lh;
lastConfirmedEntry = new PositionImpl(lh.getId(), -1);
// bypass empty ledgers, find last ledger with Message if possible.
while (lastConfirmedEntry.getEntryId() == -1) {
Map.Entry<Long, LedgerInfo> formerLedger = ledgers.lowerEntry(lastConfirmedEntry.getLedgerId());
if (formerLedger != null) {
LedgerInfo ledgerInfo = formerLedger.getValue();
lastConfirmedEntry = PositionImpl.get(ledgerInfo.getLedgerId(), ledgerInfo.getEntries() - 1);
} else {
break;
}
}
LedgerInfo info = LedgerInfo.newBuilder().setLedgerId(lh.getId()).setTimestamp(0).build();
ledgers.put(lh.getId(), info);
// Save it back to ensure all nodes exist
store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, storeLedgersCb);
}));
}, ledgerMetadata);
}
use of org.apache.pulsar.metadata.api.Stat in project pulsar by apache.
the class ManagedLedgerImpl method createComplete.
// //////////////////////////////////////////////////////////////////////
// Callbacks
@Override
public synchronized void createComplete(int rc, final LedgerHandle lh, Object ctx) {
if (log.isDebugEnabled()) {
log.debug("[{}] createComplete rc={} ledger={}", name, rc, lh != null ? lh.getId() : -1);
}
if (checkAndCompleteLedgerOpTask(rc, lh, ctx)) {
return;
}
mbean.endDataLedgerCreateOp();
if (rc != BKException.Code.OK) {
log.error("[{}] Error creating ledger rc={} {}", name, rc, BKException.getMessage(rc));
ManagedLedgerException status = createManagedLedgerException(rc);
// no pending entries means that creating this new ledger is NOT caused by write failure
if (pendingAddEntries.isEmpty()) {
STATE_UPDATER.set(this, State.ClosedLedger);
} else {
STATE_UPDATER.set(this, State.WriteFailed);
}
// Empty the list of pending requests and make all of them fail
clearPendingAddEntries(status);
lastLedgerCreationFailureTimestamp = clock.millis();
} else {
log.info("[{}] Created new ledger {}", name, lh.getId());
ledgers.put(lh.getId(), LedgerInfo.newBuilder().setLedgerId(lh.getId()).setTimestamp(0).build());
currentLedger = lh;
currentLedgerEntries = 0;
currentLedgerSize = 0;
final MetaStoreCallback<Void> cb = new MetaStoreCallback<Void>() {
@Override
public void operationComplete(Void v, Stat stat) {
if (log.isDebugEnabled()) {
log.debug("[{}] Updating of ledgers list after create complete. version={}", name, stat);
}
ledgersStat = stat;
metadataMutex.unlock();
updateLedgersIdsComplete(stat);
synchronized (ManagedLedgerImpl.this) {
mbean.addLedgerSwitchLatencySample(System.currentTimeMillis() - lastLedgerCreationInitiationTimestamp, TimeUnit.MILLISECONDS);
}
// May need to update the cursor position
maybeUpdateCursorBeforeTrimmingConsumedLedger();
}
@Override
public void operationFailed(MetaStoreException e) {
log.warn("[{}] Error updating meta data with the new list of ledgers: {}", name, e.getMessage());
// Remove the ledger, since we failed to update the list
ledgers.remove(lh.getId());
mbean.startDataLedgerDeleteOp();
bookKeeper.asyncDeleteLedger(lh.getId(), (rc1, ctx1) -> {
mbean.endDataLedgerDeleteOp();
if (rc1 != BKException.Code.OK) {
log.warn("[{}] Failed to delete ledger {}: {}", name, lh.getId(), BKException.getMessage(rc1));
}
}, null);
if (e instanceof BadVersionException) {
synchronized (ManagedLedgerImpl.this) {
log.error("[{}] Failed to update ledger list. z-node version mismatch. Closing managed ledger", name);
lastLedgerCreationFailureTimestamp = clock.millis();
STATE_UPDATER.set(ManagedLedgerImpl.this, State.Fenced);
// Return ManagedLedgerFencedException to addFailed callback
// to indicate that the ledger is now fenced and topic needs to be closed
clearPendingAddEntries(new ManagedLedgerFencedException(e));
// anyways
return;
}
}
metadataMutex.unlock();
synchronized (ManagedLedgerImpl.this) {
lastLedgerCreationFailureTimestamp = clock.millis();
STATE_UPDATER.set(ManagedLedgerImpl.this, State.ClosedLedger);
clearPendingAddEntries(e);
}
}
};
updateLedgersListAfterRollover(cb);
}
}
use of org.apache.pulsar.metadata.api.Stat in project pulsar by apache.
the class ManagedLedgerImpl method asyncTerminate.
@Override
public synchronized void asyncTerminate(TerminateCallback callback, Object ctx) {
if (state == State.Fenced) {
callback.terminateFailed(new ManagedLedgerFencedException(), ctx);
return;
} else if (state == State.Terminated) {
if (log.isDebugEnabled()) {
log.debug("[{}] Ignoring request to terminate an already terminated managed ledger", name);
}
callback.terminateComplete(lastConfirmedEntry, ctx);
return;
}
log.info("[{}] Terminating managed ledger", name);
state = State.Terminated;
LedgerHandle lh = currentLedger;
if (log.isDebugEnabled()) {
log.debug("[{}] Closing current writing ledger {}", name, lh.getId());
}
mbean.startDataLedgerCloseOp();
lh.asyncClose((rc, lh1, ctx1) -> {
if (log.isDebugEnabled()) {
log.debug("[{}] Close complete for ledger {}: rc = {}", name, lh.getId(), rc);
}
mbean.endDataLedgerCloseOp();
if (rc != BKException.Code.OK) {
callback.terminateFailed(createManagedLedgerException(rc), ctx);
} else {
lastConfirmedEntry = new PositionImpl(lh.getId(), lh.getLastAddConfirmed());
// Store the new state in metadata
store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, new MetaStoreCallback<Void>() {
@Override
public void operationComplete(Void result, Stat stat) {
ledgersStat = stat;
log.info("[{}] Terminated managed ledger at {}", name, lastConfirmedEntry);
callback.terminateComplete(lastConfirmedEntry, ctx);
}
@Override
public void operationFailed(MetaStoreException e) {
log.error("[{}] Failed to terminate managed ledger: {}", name, e.getMessage());
callback.terminateFailed(new ManagedLedgerException(e), ctx);
}
});
}
}, null);
}
Aggregations