use of org.apache.activemq.artemis.core.paging.impl.Page in project activemq-artemis by apache.
the class PageSubscriptionImpl method cleanupEntries.
/**
* It will cleanup all the records for completed pages
*/
@Override
public void cleanupEntries(final boolean completeDelete) throws Exception {
if (completeDelete) {
counter.delete();
}
if (logger.isTraceEnabled()) {
logger.trace("cleanupEntries", new Exception("trace"));
}
Transaction tx = new TransactionImpl(store);
boolean persist = false;
final ArrayList<PageCursorInfo> completedPages = new ArrayList<>();
// First get the completed pages using a lock
synchronized (consumedPages) {
// lastAckedPosition = null means no acks were done yet, so we are not ready to cleanup
if (lastAckedPosition == null) {
return;
}
for (Entry<Long, PageCursorInfo> entry : consumedPages.entrySet()) {
PageCursorInfo info = entry.getValue();
if (info.isDone() && !info.isPendingDelete()) {
Page currentPage = pageStore.getCurrentPage();
if (currentPage != null && entry.getKey() == pageStore.getCurrentPage().getPageId() && currentPage.isLive()) {
logger.trace("We can't clear page " + entry.getKey() + " now since it's the current page");
} else {
info.setPendingDelete();
completedPages.add(entry.getValue());
}
}
}
}
for (PageCursorInfo infoPG : completedPages) {
// first will mark the page as complete
if (isPersistent()) {
PagePosition completePage = new PagePositionImpl(infoPG.getPageId(), infoPG.getNumberOfMessages());
infoPG.setCompleteInfo(completePage);
store.storePageCompleteTransactional(tx.getID(), this.getId(), completePage);
if (!persist) {
persist = true;
tx.setContainsPersistent();
}
}
// it will delete the page ack records
for (PagePosition pos : infoPG.acks) {
if (pos.getRecordID() >= 0) {
store.deleteCursorAcknowledgeTransactional(tx.getID(), pos.getRecordID());
if (!persist) {
// only need to set it once
tx.setContainsPersistent();
persist = true;
}
}
}
infoPG.acks.clear();
infoPG.acks = Collections.synchronizedSet(new LinkedHashSet<PagePosition>());
infoPG.removedReferences.clear();
infoPG.removedReferences = new ConcurrentHashSet<>();
}
tx.addOperation(new TransactionOperationAbstract() {
@Override
public void afterCommit(final Transaction tx1) {
executor.execute(new Runnable() {
@Override
public void run() {
if (!completeDelete) {
cursorProvider.scheduleCleanup();
}
}
});
}
});
tx.commit();
}
use of org.apache.activemq.artemis.core.paging.impl.Page in project activemq-artemis by apache.
the class PageSubscriptionCounterImpl method cleanupNonTXCounters.
/**
* Cleanup temporary page counters on non transactional paged messages
*
* @param pageID
*/
@Override
public void cleanupNonTXCounters(final long pageID) throws Exception {
PendingCounter pendingInfo;
synchronized (this) {
pendingInfo = pendingCounters.remove(pageID);
}
if (pendingInfo != null) {
final int valueCleaned = pendingInfo.getCount();
final long valueSizeCleaned = pendingInfo.getPersistentSize();
Transaction tx = new TransactionImpl(storage);
storage.deletePendingPageCounter(tx.getID(), pendingInfo.getId());
// To apply the increment of the value just being cleaned
increment(tx, valueCleaned, valueSizeCleaned);
tx.addOperation(new TransactionOperationAbstract() {
@Override
public void afterCommit(Transaction tx) {
pendingValue.addAndGet(-valueCleaned);
pendingPersistentSize.updateAndGet(val -> val >= valueSizeCleaned ? val - valueSizeCleaned : 0);
}
});
tx.commit();
}
}
use of org.apache.activemq.artemis.core.paging.impl.Page in project activemq-artemis by apache.
the class PageCursorProviderImpl method cleanup.
@Override
public void cleanup() {
logger.tracef("performing page cleanup %s", this);
ArrayList<Page> depagedPages = new ArrayList<>();
while (true) {
if (pagingStore.lock(100)) {
break;
}
if (!pagingStore.isStarted())
return;
}
logger.tracef("%s locked", this);
synchronized (this) {
try {
if (!pagingStore.isStarted()) {
return;
}
if (pagingStore.getNumberOfPages() == 0) {
return;
}
ArrayList<PageSubscription> cursorList = cloneSubscriptions();
long minPage = checkMinPage(cursorList);
logger.debugf("Asserting cleanup for address %s, firstPage=%d", pagingStore.getAddress(), minPage);
// on that case we need to move to verify it in a different way
if (minPage == pagingStore.getCurrentWritingPage() && pagingStore.getCurrentPage().getNumberOfMessages() > 0) {
boolean complete = checkPageCompletion(cursorList, minPage);
if (!pagingStore.isStarted()) {
return;
}
// All the pages on the cursor are complete.. so we will cleanup everything and store a bookmark
if (complete) {
cleanupComplete(cursorList);
}
}
for (long i = pagingStore.getFirstPage(); i < minPage; i++) {
if (!checkPageCompletion(cursorList, i)) {
break;
}
Page page = pagingStore.depage();
if (page == null) {
break;
}
depagedPages.add(page);
}
if (pagingStore.getNumberOfPages() == 0 || pagingStore.getNumberOfPages() == 1 && pagingStore.getCurrentPage().getNumberOfMessages() == 0) {
pagingStore.stopPaging();
} else {
if (logger.isTraceEnabled()) {
logger.trace("Couldn't cleanup page on address " + this.pagingStore.getAddress() + " as numberOfPages == " + pagingStore.getNumberOfPages() + " and currentPage.numberOfMessages = " + pagingStore.getCurrentPage().getNumberOfMessages());
}
}
} catch (Exception ex) {
ActiveMQServerLogger.LOGGER.problemCleaningPageAddress(ex, pagingStore.getAddress());
return;
} finally {
pagingStore.unlock();
}
}
finishCleanup(depagedPages);
}
use of org.apache.activemq.artemis.core.paging.impl.Page in project activemq-artemis by apache.
the class PageCursorProviderImpl method readPage.
private void readPage(int pageId, PageCache cache) throws Exception {
Page page = null;
try {
page = pagingStore.createPage(pageId);
storageManager.beforePageRead();
page.open();
List<PagedMessage> pgdMessages = page.read(storageManager);
cache.setMessages(pgdMessages.toArray(new PagedMessage[pgdMessages.size()]));
} finally {
try {
if (page != null) {
page.close(false);
}
} catch (Throwable ignored) {
}
storageManager.afterPageRead();
}
}
use of org.apache.activemq.artemis.core.paging.impl.Page in project activemq-artemis by apache.
the class ReplicationEndpoint method handleReplicationSynchronization.
/**
* Receives 'raw' journal/page/large-message data from live server for synchronization of logs.
*
* @param msg
* @throws Exception
*/
private void handleReplicationSynchronization(ReplicationSyncFileMessage msg) throws Exception {
long id = msg.getId();
byte[] data = msg.getData();
SequentialFile channel1;
switch(msg.getFileType()) {
case LARGE_MESSAGE:
{
ReplicatedLargeMessage largeMessage = lookupLargeMessage(id, false, false);
if (!(largeMessage instanceof LargeServerMessageInSync)) {
ActiveMQServerLogger.LOGGER.largeMessageIncompatible();
return;
}
LargeServerMessageInSync largeMessageInSync = (LargeServerMessageInSync) largeMessage;
channel1 = largeMessageInSync.getSyncFile();
break;
}
case PAGE:
{
Page page = getPage(msg.getPageStore(), (int) msg.getId());
channel1 = page.getFile();
break;
}
case JOURNAL:
{
JournalSyncFile journalSyncFile = filesReservedForSync.get(msg.getJournalContent()).get(id);
FileChannel channel2 = journalSyncFile.getChannel();
if (data == null) {
channel2.close();
return;
}
channel2.write(ByteBuffer.wrap(data));
return;
}
default:
throw ActiveMQMessageBundle.BUNDLE.replicationUnhandledFileType(msg.getFileType());
}
if (data == null) {
return;
}
if (!channel1.isOpen()) {
channel1.open();
}
channel1.writeDirect(ByteBuffer.wrap(data), false);
}
Aggregations