use of org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc in project activemq-artemis by apache.
the class AbstractJournalStorageManager method loadMessageJournal.
@Override
public JournalLoadInformation loadMessageJournal(final PostOffice postOffice, final PagingManager pagingManager, final ResourceManager resourceManager, Map<Long, QueueBindingInfo> queueInfos, final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap, final Set<Pair<Long, Long>> pendingLargeMessages, List<PageCountPending> pendingNonTXPageCounter, final JournalLoader journalLoader) throws Exception {
List<RecordInfo> records = new ArrayList<>();
List<PreparedTransactionInfo> preparedTransactions = new ArrayList<>();
Set<PageTransactionInfo> invalidPageTransactions = null;
Map<Long, Message> messages = new HashMap<>();
readLock();
try {
JournalLoadInformation info = messageJournal.load(records, preparedTransactions, new LargeMessageTXFailureCallback(this, messages));
ArrayList<LargeServerMessage> largeMessages = new ArrayList<>();
Map<Long, Map<Long, AddMessageRecord>> queueMap = new HashMap<>();
Map<Long, PageSubscription> pageSubscriptions = new HashMap<>();
final int totalSize = records.size();
for (int reccount = 0; reccount < totalSize; reccount++) {
// It will show log.info only with large journals (more than 1 million records)
if (reccount > 0 && reccount % 1000000 == 0) {
long percent = (long) ((((double) reccount) / ((double) totalSize)) * 100f);
ActiveMQServerLogger.LOGGER.percentLoaded(percent);
}
RecordInfo record = records.get(reccount);
byte[] data = record.data;
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
byte recordType = record.getUserRecordType();
switch(recordType) {
case JournalRecordIds.ADD_LARGE_MESSAGE_PENDING:
{
PendingLargeMessageEncoding pending = new PendingLargeMessageEncoding();
pending.decode(buff);
if (pendingLargeMessages != null) {
// it could be null on tests, and we don't need anything on that case
pendingLargeMessages.add(new Pair<>(record.id, pending.largeMessageID));
}
break;
}
case JournalRecordIds.ADD_LARGE_MESSAGE:
{
LargeServerMessage largeMessage = parseLargeMessage(messages, buff);
messages.put(record.id, largeMessage);
largeMessages.add(largeMessage);
break;
}
case JournalRecordIds.ADD_MESSAGE:
{
throw new IllegalStateException("This is using old journal data, export your data and import at the correct version");
}
case JournalRecordIds.ADD_MESSAGE_PROTOCOL:
{
Message message = MessagePersister.getInstance().decode(buff, null);
messages.put(record.id, message);
break;
}
case JournalRecordIds.ADD_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
queueMessages = new LinkedHashMap<>();
queueMap.put(encoding.queueID, queueMessages);
}
Message message = messages.get(messageID);
if (message == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(record.id);
} else {
queueMessages.put(messageID, new AddMessageRecord(message));
}
break;
}
case JournalRecordIds.ACKNOWLEDGE_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueue(encoding.queueID, messageID);
} else {
AddMessageRecord rec = queueMessages.remove(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
}
}
break;
}
case JournalRecordIds.UPDATE_DELIVERY_COUNT:
{
long messageID = record.id;
DeliveryCountUpdateEncoding encoding = new DeliveryCountUpdateEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueDelCount(encoding.queueID);
} else {
AddMessageRecord rec = queueMessages.get(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindMessageDelCount(messageID);
} else {
rec.setDeliveryCount(encoding.count);
}
}
break;
}
case JournalRecordIds.PAGE_TRANSACTION:
{
PageTransactionInfo invalidPGTx = null;
if (record.isUpdate) {
PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding();
pageUpdate.decode(buff);
PageTransactionInfo pageTX = pagingManager.getTransaction(pageUpdate.pageTX);
if (pageTX == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindPageTX(pageUpdate.pageTX);
} else {
if (!pageTX.onUpdate(pageUpdate.recods, null, null)) {
invalidPGTx = pageTX;
}
}
} else {
PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl();
pageTransactionInfo.decode(buff);
pageTransactionInfo.setRecordID(record.id);
pagingManager.addTransaction(pageTransactionInfo);
if (!pageTransactionInfo.checkSize(null, null)) {
invalidPGTx = pageTransactionInfo;
}
}
if (invalidPGTx != null) {
if (invalidPageTransactions == null) {
invalidPageTransactions = new HashSet<>();
}
invalidPageTransactions.add(invalidPGTx);
}
break;
}
case JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME:
{
long messageID = record.id;
ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueScheduled(encoding.queueID, messageID);
} else {
AddMessageRecord rec = queueMessages.get(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
} else {
rec.setScheduledDeliveryTime(encoding.scheduledDeliveryTime);
}
}
break;
}
case JournalRecordIds.DUPLICATE_ID:
{
DuplicateIDEncoding encoding = new DuplicateIDEncoding();
encoding.decode(buff);
List<Pair<byte[], Long>> ids = duplicateIDMap.get(encoding.address);
if (ids == null) {
ids = new ArrayList<>();
duplicateIDMap.put(encoding.address, ids);
}
ids.add(new Pair<>(encoding.duplID, record.id));
break;
}
case JournalRecordIds.HEURISTIC_COMPLETION:
{
HeuristicCompletionEncoding encoding = new HeuristicCompletionEncoding();
encoding.decode(buff);
resourceManager.putHeuristicCompletion(record.id, encoding.xid, encoding.isCommit);
break;
}
case JournalRecordIds.ACKNOWLEDGE_CURSOR:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.reloadACK(encoding.position);
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloading(encoding.queueID);
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE:
{
PageCountRecord encoding = new PageCountRecord();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().loadValue(record.id, encoding.getValue(), encoding.getPersistentSize());
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPage(encoding.getQueueID());
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COUNTER_INC:
{
PageCountRecordInc encoding = new PageCountRecordInc();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().loadInc(record.id, encoding.getValue(), encoding.getPersistentSize());
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPageCursor(encoding.getQueueID());
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COMPLETE:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
if (!sub.reloadPageCompletion(encoding.position)) {
if (logger.isDebugEnabled()) {
logger.debug("Complete page " + encoding.position.getPageNr() + " doesn't exist on page manager " + sub.getPagingStore().getAddress());
}
messageJournal.appendDeleteRecord(record.id, false);
}
} else {
ActiveMQServerLogger.LOGGER.cantFindQueueOnPageComplete(encoding.queueID);
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_PENDING_COUNTER:
{
PageCountPendingImpl pendingCountEncoding = new PageCountPendingImpl();
pendingCountEncoding.decode(buff);
pendingCountEncoding.setID(record.id);
// This can be null on testcases not interested on this outcome
if (pendingNonTXPageCounter != null) {
pendingNonTXPageCounter.add(pendingCountEncoding);
}
break;
}
default:
{
throw new IllegalStateException("Invalid record type " + recordType);
}
}
// This will free up memory sooner. The record is not needed any more
// and its byte array would consume memory during the load process even though it's not necessary any longer
// what would delay processing time during load
records.set(reccount, null);
}
// Release the memory as soon as not needed any longer
records.clear();
records = null;
journalLoader.handleAddMessage(queueMap);
loadPreparedTransactions(postOffice, pagingManager, resourceManager, queueInfos, preparedTransactions, duplicateIDMap, pageSubscriptions, pendingLargeMessages, journalLoader);
for (PageSubscription sub : pageSubscriptions.values()) {
sub.getCounter().processReload();
}
for (LargeServerMessage msg : largeMessages) {
if (msg.getRefCount() == 0) {
ActiveMQServerLogger.LOGGER.largeMessageWithNoRef(msg.getMessageID());
msg.decrementDelayDeletionCount();
}
}
journalLoader.handleNoMessageReferences(messages);
// To recover positions on Iterators
if (pagingManager != null) {
// it could be null on certain tests that are not dealing with paging
// This could also be the case in certain embedded conditions
pagingManager.processReload();
}
journalLoader.postLoad(messageJournal, resourceManager, duplicateIDMap);
checkInvalidPageTransactions(pagingManager, invalidPageTransactions);
journalLoaded = true;
return info;
} finally {
readUnLock();
}
}
use of org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc in project activemq-artemis by apache.
the class AbstractJournalStorageManager method loadPreparedTransactions.
private void loadPreparedTransactions(final PostOffice postOffice, final PagingManager pagingManager, final ResourceManager resourceManager, final Map<Long, QueueBindingInfo> queueInfos, final List<PreparedTransactionInfo> preparedTransactions, final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap, final Map<Long, PageSubscription> pageSubscriptions, final Set<Pair<Long, Long>> pendingLargeMessages, JournalLoader journalLoader) throws Exception {
// recover prepared transactions
for (PreparedTransactionInfo preparedTransaction : preparedTransactions) {
XidEncoding encodingXid = new XidEncoding(preparedTransaction.getExtraData());
Xid xid = encodingXid.xid;
Transaction tx = new TransactionImpl(preparedTransaction.getId(), xid, this);
List<MessageReference> referencesToAck = new ArrayList<>();
Map<Long, Message> messages = new HashMap<>();
// first get any sent messages for this tx and recreate
for (RecordInfo record : preparedTransaction.getRecords()) {
byte[] data = record.data;
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
byte recordType = record.getUserRecordType();
switch(recordType) {
case JournalRecordIds.ADD_LARGE_MESSAGE:
{
messages.put(record.id, parseLargeMessage(messages, buff));
break;
}
case JournalRecordIds.ADD_MESSAGE:
{
break;
}
case JournalRecordIds.ADD_MESSAGE_PROTOCOL:
{
Message message = MessagePersister.getInstance().decode(buff, null);
messages.put(record.id, message);
break;
}
case JournalRecordIds.ADD_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Message message = messages.get(messageID);
if (message == null) {
throw new IllegalStateException("Cannot find message with id " + messageID);
}
journalLoader.handlePreparedSendMessage(message, tx, encoding.queueID);
break;
}
case JournalRecordIds.ACKNOWLEDGE_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
journalLoader.handlePreparedAcknowledge(messageID, referencesToAck, encoding.queueID);
break;
}
case JournalRecordIds.PAGE_TRANSACTION:
{
PageTransactionInfo pageTransactionInfo = new PageTransactionInfoImpl();
pageTransactionInfo.decode(buff);
if (record.isUpdate) {
PageTransactionInfo pgTX = pagingManager.getTransaction(pageTransactionInfo.getTransactionID());
pgTX.reloadUpdate(this, pagingManager, tx, pageTransactionInfo.getNumberOfMessages());
} else {
pageTransactionInfo.setCommitted(false);
tx.putProperty(TransactionPropertyIndexes.PAGE_TRANSACTION, pageTransactionInfo);
pagingManager.addTransaction(pageTransactionInfo);
tx.addOperation(new FinishPageMessageOperation());
}
break;
}
case SET_SCHEDULED_DELIVERY_TIME:
{
break;
}
case DUPLICATE_ID:
{
// We need load the duplicate ids at prepare time too
DuplicateIDEncoding encoding = new DuplicateIDEncoding();
encoding.decode(buff);
DuplicateIDCache cache = postOffice.getDuplicateIDCache(encoding.address);
cache.load(tx, encoding.duplID);
break;
}
case ACKNOWLEDGE_CURSOR:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.reloadPreparedACK(tx, encoding.position);
referencesToAck.add(new PagedReferenceImpl(encoding.position, null, sub));
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.queueID);
}
break;
}
case PAGE_CURSOR_COUNTER_VALUE:
{
ActiveMQServerLogger.LOGGER.journalPAGEOnPrepared();
break;
}
case PAGE_CURSOR_COUNTER_INC:
{
PageCountRecordInc encoding = new PageCountRecordInc();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().applyIncrementOnTX(tx, record.id, encoding.getValue(), encoding.getPersistentSize());
sub.notEmpty();
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.getQueueID());
}
break;
}
default:
{
ActiveMQServerLogger.LOGGER.journalInvalidRecordType(recordType);
}
}
}
for (RecordInfo recordDeleted : preparedTransaction.getRecordsToDelete()) {
byte[] data = recordDeleted.data;
if (data.length > 0) {
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
byte b = buff.readByte();
switch(b) {
case ADD_LARGE_MESSAGE_PENDING:
{
long messageID = buff.readLong();
if (!pendingLargeMessages.remove(new Pair<>(recordDeleted.id, messageID))) {
ActiveMQServerLogger.LOGGER.largeMessageNotFound(recordDeleted.id);
}
installLargeMessageConfirmationOnTX(tx, recordDeleted.id);
break;
}
default:
ActiveMQServerLogger.LOGGER.journalInvalidRecordTypeOnPreparedTX(b);
}
}
}
journalLoader.handlePreparedTransaction(tx, referencesToAck, xid, resourceManager);
}
}
use of org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc in project activemq-artemis by apache.
the class DescribeJournal method newObjectEncoding.
public static Object newObjectEncoding(RecordInfo info, JournalStorageManager storageManager) {
ActiveMQBuffer buffer = ActiveMQBuffers.wrappedBuffer(info.data);
long id = info.id;
int rec = info.getUserRecordType();
switch(rec) {
case ADD_LARGE_MESSAGE_PENDING:
{
PendingLargeMessageEncoding lmEncoding = new PendingLargeMessageEncoding();
lmEncoding.decode(buffer);
return lmEncoding;
}
case ADD_LARGE_MESSAGE:
{
LargeServerMessage largeMessage = new LargeServerMessageImpl(storageManager);
LargeMessagePersister.getInstance().decode(buffer, largeMessage);
return new MessageDescribe(largeMessage);
}
case ADD_MESSAGE:
{
return "ADD-MESSAGE is not supported any longer, use export/import";
}
case ADD_MESSAGE_PROTOCOL:
{
Message message = MessagePersister.getInstance().decode(buffer, null);
return new MessageDescribe(message);
}
case ADD_REF:
{
final RefEncoding encoding = new RefEncoding();
encoding.decode(buffer);
return new ReferenceDescribe(encoding);
}
case ACKNOWLEDGE_REF:
{
final RefEncoding encoding = new RefEncoding();
encoding.decode(buffer);
return new AckDescribe(encoding);
}
case UPDATE_DELIVERY_COUNT:
{
DeliveryCountUpdateEncoding updateDeliveryCount = new DeliveryCountUpdateEncoding();
updateDeliveryCount.decode(buffer);
return updateDeliveryCount;
}
case PAGE_TRANSACTION:
{
if (info.isUpdate) {
PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding();
pageUpdate.decode(buffer);
return pageUpdate;
} else {
PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl();
pageTransactionInfo.decode(buffer);
pageTransactionInfo.setRecordID(info.id);
return pageTransactionInfo;
}
}
case SET_SCHEDULED_DELIVERY_TIME:
{
ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding();
encoding.decode(buffer);
return encoding;
}
case DUPLICATE_ID:
{
DuplicateIDEncoding encoding = new DuplicateIDEncoding();
encoding.decode(buffer);
return encoding;
}
case HEURISTIC_COMPLETION:
{
HeuristicCompletionEncoding encoding = new HeuristicCompletionEncoding();
encoding.decode(buffer);
return encoding;
}
case ACKNOWLEDGE_CURSOR:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buffer);
return encoding;
}
case PAGE_CURSOR_COUNTER_VALUE:
{
PageCountRecord encoding = new PageCountRecord();
encoding.decode(buffer);
return encoding;
}
case PAGE_CURSOR_COMPLETE:
{
CursorAckRecordEncoding encoding = new PageCompleteCursorAckRecordEncoding();
encoding.decode(buffer);
return encoding;
}
case PAGE_CURSOR_COUNTER_INC:
{
PageCountRecordInc encoding = new PageCountRecordInc();
encoding.decode(buffer);
return encoding;
}
case PAGE_CURSOR_PENDING_COUNTER:
{
PageCountPendingImpl encoding = new PageCountPendingImpl();
encoding.decode(buffer);
encoding.setID(info.id);
return encoding;
}
case QUEUE_STATUS_RECORD:
return AbstractJournalStorageManager.newQueueStatusEncoding(id, buffer);
case QUEUE_BINDING_RECORD:
return AbstractJournalStorageManager.newQueueBindingEncoding(id, buffer);
case ID_COUNTER_RECORD:
EncodingSupport idReturn = new IDCounterEncoding();
idReturn.decode(buffer);
return idReturn;
case JournalRecordIds.GROUP_RECORD:
return AbstractJournalStorageManager.newGroupEncoding(id, buffer);
case ADDRESS_SETTING_RECORD:
return AbstractJournalStorageManager.newAddressEncoding(id, buffer);
case SECURITY_RECORD:
return AbstractJournalStorageManager.newSecurityRecord(id, buffer);
case ADDRESS_BINDING_RECORD:
return AbstractJournalStorageManager.newAddressBindingEncoding(id, buffer);
default:
return null;
}
}
use of org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc in project activemq-artemis by apache.
the class JournalPageCountSizeTest method testPageCursorCounterRecordSize.
@Test
public void testPageCursorCounterRecordSize() throws Exception {
server.getStorageManager().storePageCounterInc(1, 1, 1000);
server.getStorageManager().stop();
JournalStorageManager journalStorageManager = (JournalStorageManager) server.getStorageManager();
List<RecordInfo> committedRecords = new LinkedList<>();
List<PreparedTransactionInfo> preparedTransactions = new LinkedList<>();
try {
journalStorageManager.getMessageJournal().start();
journalStorageManager.getMessageJournal().load(committedRecords, preparedTransactions, transactionFailure);
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(committedRecords.get(0).data);
PageCountRecordInc encoding = new PageCountRecordInc();
encoding.decode(buff);
Assert.assertEquals(1000, encoding.getPersistentSize());
} finally {
journalStorageManager.getMessageJournal().stop();
}
}
use of org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc in project activemq-artemis by apache.
the class DescribeJournal method printSurvivingRecords.
public static DescribeJournal printSurvivingRecords(Journal journal, PrintStream out, boolean safe) throws Exception {
final Map<Long, PageSubscriptionCounterImpl> counters = new HashMap<>();
out.println("### Surviving Records Summary ###");
List<RecordInfo> records = new LinkedList<>();
List<PreparedTransactionInfo> preparedTransactions = new LinkedList<>();
journal.start();
final StringBuffer bufferFailingTransactions = new StringBuffer();
int messageCount = 0;
Map<Long, Integer> messageRefCounts = new HashMap<>();
int preparedMessageCount = 0;
Map<Long, Integer> preparedMessageRefCount = new HashMap<>();
journal.load(records, preparedTransactions, new TransactionFailureCallback() {
@Override
public void failedTransaction(long transactionID, List<RecordInfo> records1, List<RecordInfo> recordsToDelete) {
bufferFailingTransactions.append("Transaction " + transactionID + " failed with these records:\n");
for (RecordInfo info : records1) {
bufferFailingTransactions.append("- " + describeRecord(info, safe) + "\n");
}
for (RecordInfo info : recordsToDelete) {
bufferFailingTransactions.append("- " + describeRecord(info, safe) + " <marked to delete>\n");
}
}
}, false);
for (RecordInfo info : records) {
PageSubscriptionCounterImpl subsCounter = null;
long queueIDForCounter = 0;
Object o = newObjectEncoding(info);
if (info.getUserRecordType() == JournalRecordIds.ADD_MESSAGE) {
messageCount++;
} else if (info.getUserRecordType() == JournalRecordIds.ADD_REF) {
ReferenceDescribe ref = (ReferenceDescribe) o;
Integer count = messageRefCounts.get(ref.refEncoding.queueID);
if (count == null) {
count = 1;
messageRefCounts.put(ref.refEncoding.queueID, count);
} else {
messageRefCounts.put(ref.refEncoding.queueID, count + 1);
}
} else if (info.getUserRecordType() == JournalRecordIds.ACKNOWLEDGE_REF) {
AckDescribe ref = (AckDescribe) o;
Integer count = messageRefCounts.get(ref.refEncoding.queueID);
if (count == null) {
messageRefCounts.put(ref.refEncoding.queueID, 0);
} else {
messageRefCounts.put(ref.refEncoding.queueID, count - 1);
}
} else if (info.getUserRecordType() == JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE) {
PageCountRecord encoding = (PageCountRecord) o;
queueIDForCounter = encoding.getQueueID();
subsCounter = lookupCounter(counters, queueIDForCounter);
subsCounter.loadValue(info.id, encoding.getValue(), encoding.getPersistentSize());
subsCounter.processReload();
} else if (info.getUserRecordType() == JournalRecordIds.PAGE_CURSOR_COUNTER_INC) {
PageCountRecordInc encoding = (PageCountRecordInc) o;
queueIDForCounter = encoding.getQueueID();
subsCounter = lookupCounter(counters, queueIDForCounter);
subsCounter.loadInc(info.id, encoding.getValue(), encoding.getPersistentSize());
subsCounter.processReload();
}
out.println(describeRecord(info, o, safe));
if (subsCounter != null) {
out.println("##SubsCounter for queue=" + queueIDForCounter + ", value=" + subsCounter.getValue());
out.println();
}
}
if (counters.size() > 0) {
out.println("### Page Counters");
printCounters(out, counters);
}
out.println();
out.println("### Prepared TX ###");
for (PreparedTransactionInfo tx : preparedTransactions) {
out.println(tx.getId());
for (RecordInfo info : tx.getRecords()) {
Object o = newObjectEncoding(info);
out.println("- " + describeRecord(info, o, safe));
if (info.getUserRecordType() == 31) {
preparedMessageCount++;
} else if (info.getUserRecordType() == 32) {
ReferenceDescribe ref = (ReferenceDescribe) o;
Integer count = preparedMessageRefCount.get(ref.refEncoding.queueID);
if (count == null) {
count = 1;
preparedMessageRefCount.put(ref.refEncoding.queueID, count);
} else {
preparedMessageRefCount.put(ref.refEncoding.queueID, count + 1);
}
}
}
for (RecordInfo info : tx.getRecordsToDelete()) {
out.println("- " + describeRecord(info, safe) + " <marked to delete>");
}
}
String missingTX = bufferFailingTransactions.toString();
if (missingTX.length() > 0) {
out.println();
out.println("### Failed Transactions (Missing commit/prepare/rollback record) ###");
}
out.println(bufferFailingTransactions.toString());
out.println("### Message Counts ###");
out.println("message count=" + messageCount);
out.println("message reference count");
for (Map.Entry<Long, Integer> longIntegerEntry : messageRefCounts.entrySet()) {
out.println("queue id " + longIntegerEntry.getKey() + ",count=" + longIntegerEntry.getValue());
}
out.println("prepared message count=" + preparedMessageCount);
for (Map.Entry<Long, Integer> longIntegerEntry : preparedMessageRefCount.entrySet()) {
out.println("queue id " + longIntegerEntry.getKey() + ",count=" + longIntegerEntry.getValue());
}
journal.stop();
return new DescribeJournal(records, preparedTransactions);
}
Aggregations