use of org.apache.activemq.artemis.core.paging.cursor.PageSubscription in project activemq-artemis by apache.
the class AbstractJournalStorageManager method locateSubscription.
/**
* @param queueID
* @param pageSubscriptions
* @param queueInfos
* @return
*/
private static PageSubscription locateSubscription(final long queueID, final Map<Long, PageSubscription> pageSubscriptions, final Map<Long, QueueBindingInfo> queueInfos, final PagingManager pagingManager) throws Exception {
PageSubscription subs = pageSubscriptions.get(queueID);
if (subs == null) {
QueueBindingInfo queueInfo = queueInfos.get(queueID);
if (queueInfo != null) {
SimpleString address = queueInfo.getAddress();
PagingStore store = pagingManager.getPageStore(address);
subs = store.getCursorProvider().getSubscription(queueID);
pageSubscriptions.put(queueID, subs);
}
}
return subs;
}
use of org.apache.activemq.artemis.core.paging.cursor.PageSubscription in project activemq-artemis by apache.
the class AbstractJournalStorageManager method loadMessageJournal.
@Override
public JournalLoadInformation loadMessageJournal(final PostOffice postOffice, final PagingManager pagingManager, final ResourceManager resourceManager, Map<Long, QueueBindingInfo> queueInfos, final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap, final Set<Pair<Long, Long>> pendingLargeMessages, List<PageCountPending> pendingNonTXPageCounter, final JournalLoader journalLoader) throws Exception {
List<RecordInfo> records = new ArrayList<>();
List<PreparedTransactionInfo> preparedTransactions = new ArrayList<>();
Set<PageTransactionInfo> invalidPageTransactions = null;
Map<Long, Message> messages = new HashMap<>();
readLock();
try {
JournalLoadInformation info = messageJournal.load(records, preparedTransactions, new LargeMessageTXFailureCallback(this, messages));
ArrayList<LargeServerMessage> largeMessages = new ArrayList<>();
Map<Long, Map<Long, AddMessageRecord>> queueMap = new HashMap<>();
Map<Long, PageSubscription> pageSubscriptions = new HashMap<>();
final int totalSize = records.size();
for (int reccount = 0; reccount < totalSize; reccount++) {
// It will show log.info only with large journals (more than 1 million records)
if (reccount > 0 && reccount % 1000000 == 0) {
long percent = (long) ((((double) reccount) / ((double) totalSize)) * 100f);
ActiveMQServerLogger.LOGGER.percentLoaded(percent);
}
RecordInfo record = records.get(reccount);
byte[] data = record.data;
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
byte recordType = record.getUserRecordType();
switch(recordType) {
case JournalRecordIds.ADD_LARGE_MESSAGE_PENDING:
{
PendingLargeMessageEncoding pending = new PendingLargeMessageEncoding();
pending.decode(buff);
if (pendingLargeMessages != null) {
// it could be null on tests, and we don't need anything on that case
pendingLargeMessages.add(new Pair<>(record.id, pending.largeMessageID));
}
break;
}
case JournalRecordIds.ADD_LARGE_MESSAGE:
{
LargeServerMessage largeMessage = parseLargeMessage(messages, buff);
messages.put(record.id, largeMessage);
largeMessages.add(largeMessage);
break;
}
case JournalRecordIds.ADD_MESSAGE:
{
throw new IllegalStateException("This is using old journal data, export your data and import at the correct version");
}
case JournalRecordIds.ADD_MESSAGE_PROTOCOL:
{
Message message = MessagePersister.getInstance().decode(buff, null);
messages.put(record.id, message);
break;
}
case JournalRecordIds.ADD_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
queueMessages = new LinkedHashMap<>();
queueMap.put(encoding.queueID, queueMessages);
}
Message message = messages.get(messageID);
if (message == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(record.id);
} else {
queueMessages.put(messageID, new AddMessageRecord(message));
}
break;
}
case JournalRecordIds.ACKNOWLEDGE_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueue(encoding.queueID, messageID);
} else {
AddMessageRecord rec = queueMessages.remove(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
}
}
break;
}
case JournalRecordIds.UPDATE_DELIVERY_COUNT:
{
long messageID = record.id;
DeliveryCountUpdateEncoding encoding = new DeliveryCountUpdateEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueDelCount(encoding.queueID);
} else {
AddMessageRecord rec = queueMessages.get(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindMessageDelCount(messageID);
} else {
rec.setDeliveryCount(encoding.count);
}
}
break;
}
case JournalRecordIds.PAGE_TRANSACTION:
{
PageTransactionInfo invalidPGTx = null;
if (record.isUpdate) {
PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding();
pageUpdate.decode(buff);
PageTransactionInfo pageTX = pagingManager.getTransaction(pageUpdate.pageTX);
if (pageTX == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindPageTX(pageUpdate.pageTX);
} else {
if (!pageTX.onUpdate(pageUpdate.recods, null, null)) {
invalidPGTx = pageTX;
}
}
} else {
PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl();
pageTransactionInfo.decode(buff);
pageTransactionInfo.setRecordID(record.id);
pagingManager.addTransaction(pageTransactionInfo);
if (!pageTransactionInfo.checkSize(null, null)) {
invalidPGTx = pageTransactionInfo;
}
}
if (invalidPGTx != null) {
if (invalidPageTransactions == null) {
invalidPageTransactions = new HashSet<>();
}
invalidPageTransactions.add(invalidPGTx);
}
break;
}
case JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME:
{
long messageID = record.id;
ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueScheduled(encoding.queueID, messageID);
} else {
AddMessageRecord rec = queueMessages.get(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
} else {
rec.setScheduledDeliveryTime(encoding.scheduledDeliveryTime);
}
}
break;
}
case JournalRecordIds.DUPLICATE_ID:
{
DuplicateIDEncoding encoding = new DuplicateIDEncoding();
encoding.decode(buff);
List<Pair<byte[], Long>> ids = duplicateIDMap.get(encoding.address);
if (ids == null) {
ids = new ArrayList<>();
duplicateIDMap.put(encoding.address, ids);
}
ids.add(new Pair<>(encoding.duplID, record.id));
break;
}
case JournalRecordIds.HEURISTIC_COMPLETION:
{
HeuristicCompletionEncoding encoding = new HeuristicCompletionEncoding();
encoding.decode(buff);
resourceManager.putHeuristicCompletion(record.id, encoding.xid, encoding.isCommit);
break;
}
case JournalRecordIds.ACKNOWLEDGE_CURSOR:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.reloadACK(encoding.position);
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloading(encoding.queueID);
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE:
{
PageCountRecord encoding = new PageCountRecord();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().loadValue(record.id, encoding.getValue(), encoding.getPersistentSize());
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPage(encoding.getQueueID());
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COUNTER_INC:
{
PageCountRecordInc encoding = new PageCountRecordInc();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().loadInc(record.id, encoding.getValue(), encoding.getPersistentSize());
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPageCursor(encoding.getQueueID());
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COMPLETE:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
if (!sub.reloadPageCompletion(encoding.position)) {
if (logger.isDebugEnabled()) {
logger.debug("Complete page " + encoding.position.getPageNr() + " doesn't exist on page manager " + sub.getPagingStore().getAddress());
}
messageJournal.appendDeleteRecord(record.id, false);
}
} else {
ActiveMQServerLogger.LOGGER.cantFindQueueOnPageComplete(encoding.queueID);
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_PENDING_COUNTER:
{
PageCountPendingImpl pendingCountEncoding = new PageCountPendingImpl();
pendingCountEncoding.decode(buff);
pendingCountEncoding.setID(record.id);
// This can be null on testcases not interested on this outcome
if (pendingNonTXPageCounter != null) {
pendingNonTXPageCounter.add(pendingCountEncoding);
}
break;
}
default:
{
throw new IllegalStateException("Invalid record type " + recordType);
}
}
// This will free up memory sooner. The record is not needed any more
// and its byte array would consume memory during the load process even though it's not necessary any longer
// what would delay processing time during load
records.set(reccount, null);
}
// Release the memory as soon as not needed any longer
records.clear();
records = null;
journalLoader.handleAddMessage(queueMap);
loadPreparedTransactions(postOffice, pagingManager, resourceManager, queueInfos, preparedTransactions, duplicateIDMap, pageSubscriptions, pendingLargeMessages, journalLoader);
for (PageSubscription sub : pageSubscriptions.values()) {
sub.getCounter().processReload();
}
for (LargeServerMessage msg : largeMessages) {
if (msg.getRefCount() == 0) {
ActiveMQServerLogger.LOGGER.largeMessageWithNoRef(msg.getMessageID());
msg.decrementDelayDeletionCount();
}
}
journalLoader.handleNoMessageReferences(messages);
// To recover positions on Iterators
if (pagingManager != null) {
// it could be null on certain tests that are not dealing with paging
// This could also be the case in certain embedded conditions
pagingManager.processReload();
}
journalLoader.postLoad(messageJournal, resourceManager, duplicateIDMap);
checkInvalidPageTransactions(pagingManager, invalidPageTransactions);
journalLoaded = true;
return info;
} finally {
readUnLock();
}
}
use of org.apache.activemq.artemis.core.paging.cursor.PageSubscription in project activemq-artemis by apache.
the class AbstractJournalStorageManager method loadPreparedTransactions.
private void loadPreparedTransactions(final PostOffice postOffice, final PagingManager pagingManager, final ResourceManager resourceManager, final Map<Long, QueueBindingInfo> queueInfos, final List<PreparedTransactionInfo> preparedTransactions, final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap, final Map<Long, PageSubscription> pageSubscriptions, final Set<Pair<Long, Long>> pendingLargeMessages, JournalLoader journalLoader) throws Exception {
// recover prepared transactions
for (PreparedTransactionInfo preparedTransaction : preparedTransactions) {
XidEncoding encodingXid = new XidEncoding(preparedTransaction.getExtraData());
Xid xid = encodingXid.xid;
Transaction tx = new TransactionImpl(preparedTransaction.getId(), xid, this);
List<MessageReference> referencesToAck = new ArrayList<>();
Map<Long, Message> messages = new HashMap<>();
// first get any sent messages for this tx and recreate
for (RecordInfo record : preparedTransaction.getRecords()) {
byte[] data = record.data;
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
byte recordType = record.getUserRecordType();
switch(recordType) {
case JournalRecordIds.ADD_LARGE_MESSAGE:
{
messages.put(record.id, parseLargeMessage(messages, buff));
break;
}
case JournalRecordIds.ADD_MESSAGE:
{
break;
}
case JournalRecordIds.ADD_MESSAGE_PROTOCOL:
{
Message message = MessagePersister.getInstance().decode(buff, null);
messages.put(record.id, message);
break;
}
case JournalRecordIds.ADD_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Message message = messages.get(messageID);
if (message == null) {
throw new IllegalStateException("Cannot find message with id " + messageID);
}
journalLoader.handlePreparedSendMessage(message, tx, encoding.queueID);
break;
}
case JournalRecordIds.ACKNOWLEDGE_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
journalLoader.handlePreparedAcknowledge(messageID, referencesToAck, encoding.queueID);
break;
}
case JournalRecordIds.PAGE_TRANSACTION:
{
PageTransactionInfo pageTransactionInfo = new PageTransactionInfoImpl();
pageTransactionInfo.decode(buff);
if (record.isUpdate) {
PageTransactionInfo pgTX = pagingManager.getTransaction(pageTransactionInfo.getTransactionID());
pgTX.reloadUpdate(this, pagingManager, tx, pageTransactionInfo.getNumberOfMessages());
} else {
pageTransactionInfo.setCommitted(false);
tx.putProperty(TransactionPropertyIndexes.PAGE_TRANSACTION, pageTransactionInfo);
pagingManager.addTransaction(pageTransactionInfo);
tx.addOperation(new FinishPageMessageOperation());
}
break;
}
case SET_SCHEDULED_DELIVERY_TIME:
{
break;
}
case DUPLICATE_ID:
{
// We need load the duplicate ids at prepare time too
DuplicateIDEncoding encoding = new DuplicateIDEncoding();
encoding.decode(buff);
DuplicateIDCache cache = postOffice.getDuplicateIDCache(encoding.address);
cache.load(tx, encoding.duplID);
break;
}
case ACKNOWLEDGE_CURSOR:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.reloadPreparedACK(tx, encoding.position);
referencesToAck.add(new PagedReferenceImpl(encoding.position, null, sub));
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.queueID);
}
break;
}
case PAGE_CURSOR_COUNTER_VALUE:
{
ActiveMQServerLogger.LOGGER.journalPAGEOnPrepared();
break;
}
case PAGE_CURSOR_COUNTER_INC:
{
PageCountRecordInc encoding = new PageCountRecordInc();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().applyIncrementOnTX(tx, record.id, encoding.getValue(), encoding.getPersistentSize());
sub.notEmpty();
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.getQueueID());
}
break;
}
default:
{
ActiveMQServerLogger.LOGGER.journalInvalidRecordType(recordType);
}
}
}
for (RecordInfo recordDeleted : preparedTransaction.getRecordsToDelete()) {
byte[] data = recordDeleted.data;
if (data.length > 0) {
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
byte b = buff.readByte();
switch(b) {
case ADD_LARGE_MESSAGE_PENDING:
{
long messageID = buff.readLong();
if (!pendingLargeMessages.remove(new Pair<>(recordDeleted.id, messageID))) {
ActiveMQServerLogger.LOGGER.largeMessageNotFound(recordDeleted.id);
}
installLargeMessageConfirmationOnTX(tx, recordDeleted.id);
break;
}
default:
ActiveMQServerLogger.LOGGER.journalInvalidRecordTypeOnPreparedTX(b);
}
}
}
journalLoader.handlePreparedTransaction(tx, referencesToAck, xid, resourceManager);
}
}
use of org.apache.activemq.artemis.core.paging.cursor.PageSubscription in project activemq-artemis by apache.
the class PageCursorProviderImpl method cleanup.
@Override
public void cleanup() {
logger.tracef("performing page cleanup %s", this);
ArrayList<Page> depagedPages = new ArrayList<>();
while (true) {
if (pagingStore.lock(100)) {
break;
}
if (!pagingStore.isStarted())
return;
}
logger.tracef("%s locked", this);
synchronized (this) {
try {
if (!pagingStore.isStarted()) {
return;
}
if (pagingStore.getNumberOfPages() == 0) {
return;
}
ArrayList<PageSubscription> cursorList = cloneSubscriptions();
long minPage = checkMinPage(cursorList);
logger.debugf("Asserting cleanup for address %s, firstPage=%d", pagingStore.getAddress(), minPage);
// on that case we need to move to verify it in a different way
if (minPage == pagingStore.getCurrentWritingPage() && pagingStore.getCurrentPage().getNumberOfMessages() > 0) {
boolean complete = checkPageCompletion(cursorList, minPage);
if (!pagingStore.isStarted()) {
return;
}
// All the pages on the cursor are complete.. so we will cleanup everything and store a bookmark
if (complete) {
cleanupComplete(cursorList);
}
}
for (long i = pagingStore.getFirstPage(); i < minPage; i++) {
if (!checkPageCompletion(cursorList, i)) {
break;
}
Page page = pagingStore.depage();
if (page == null) {
break;
}
depagedPages.add(page);
}
if (pagingStore.getNumberOfPages() == 0 || pagingStore.getNumberOfPages() == 1 && pagingStore.getCurrentPage().getNumberOfMessages() == 0) {
pagingStore.stopPaging();
} else {
if (logger.isTraceEnabled()) {
logger.trace("Couldn't cleanup page on address " + this.pagingStore.getAddress() + " as numberOfPages == " + pagingStore.getNumberOfPages() + " and currentPage.numberOfMessages = " + pagingStore.getCurrentPage().getNumberOfMessages());
}
}
} catch (Exception ex) {
ActiveMQServerLogger.LOGGER.problemCleaningPageAddress(ex, pagingStore.getAddress());
return;
} finally {
pagingStore.unlock();
}
}
finishCleanup(depagedPages);
}
use of org.apache.activemq.artemis.core.paging.cursor.PageSubscription in project activemq-artemis by apache.
the class PageCursorProviderImpl method onPageModeCleared.
/**
* Delete everything associated with any queue on this address.
* This is to be called when the address is about to be released from paging.
* Hence the PagingStore will be holding a write lock, meaning no messages are going to be paged at this time.
* So, we shouldn't lock anything after this method, to avoid dead locks between the writeLock and any synchronization with the CursorProvider.
*/
@Override
public void onPageModeCleared() {
ArrayList<PageSubscription> subscriptions = cloneSubscriptions();
Transaction tx = new TransactionImpl(storageManager);
for (PageSubscription sub : subscriptions) {
try {
sub.onPageModeCleared(tx);
} catch (Exception e) {
ActiveMQServerLogger.LOGGER.errorCleaningPagingOnQueue(e, sub.getQueue().getName().toString());
}
}
try {
tx.commit();
} catch (Exception e) {
ActiveMQServerLogger.LOGGER.errorCleaningPagingDuringCommit(e);
}
}
Aggregations