use of org.apache.activemq.artemis.core.journal.JournalLoadInformation in project activemq-artemis by apache.
the class AbstractJournalStorageManager method loadMessageJournal.
@Override
public JournalLoadInformation loadMessageJournal(final PostOffice postOffice, final PagingManager pagingManager, final ResourceManager resourceManager, Map<Long, QueueBindingInfo> queueInfos, final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap, final Set<Pair<Long, Long>> pendingLargeMessages, List<PageCountPending> pendingNonTXPageCounter, final JournalLoader journalLoader) throws Exception {
List<RecordInfo> records = new ArrayList<>();
List<PreparedTransactionInfo> preparedTransactions = new ArrayList<>();
Set<PageTransactionInfo> invalidPageTransactions = null;
Map<Long, Message> messages = new HashMap<>();
readLock();
try {
JournalLoadInformation info = messageJournal.load(records, preparedTransactions, new LargeMessageTXFailureCallback(this, messages));
ArrayList<LargeServerMessage> largeMessages = new ArrayList<>();
Map<Long, Map<Long, AddMessageRecord>> queueMap = new HashMap<>();
Map<Long, PageSubscription> pageSubscriptions = new HashMap<>();
final int totalSize = records.size();
for (int reccount = 0; reccount < totalSize; reccount++) {
// It will show log.info only with large journals (more than 1 million records)
if (reccount > 0 && reccount % 1000000 == 0) {
long percent = (long) ((((double) reccount) / ((double) totalSize)) * 100f);
ActiveMQServerLogger.LOGGER.percentLoaded(percent);
}
RecordInfo record = records.get(reccount);
byte[] data = record.data;
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
byte recordType = record.getUserRecordType();
switch(recordType) {
case JournalRecordIds.ADD_LARGE_MESSAGE_PENDING:
{
PendingLargeMessageEncoding pending = new PendingLargeMessageEncoding();
pending.decode(buff);
if (pendingLargeMessages != null) {
// it could be null on tests, and we don't need anything on that case
pendingLargeMessages.add(new Pair<>(record.id, pending.largeMessageID));
}
break;
}
case JournalRecordIds.ADD_LARGE_MESSAGE:
{
LargeServerMessage largeMessage = parseLargeMessage(messages, buff);
messages.put(record.id, largeMessage);
largeMessages.add(largeMessage);
break;
}
case JournalRecordIds.ADD_MESSAGE:
{
throw new IllegalStateException("This is using old journal data, export your data and import at the correct version");
}
case JournalRecordIds.ADD_MESSAGE_PROTOCOL:
{
Message message = MessagePersister.getInstance().decode(buff, null);
messages.put(record.id, message);
break;
}
case JournalRecordIds.ADD_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
queueMessages = new LinkedHashMap<>();
queueMap.put(encoding.queueID, queueMessages);
}
Message message = messages.get(messageID);
if (message == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(record.id);
} else {
queueMessages.put(messageID, new AddMessageRecord(message));
}
break;
}
case JournalRecordIds.ACKNOWLEDGE_REF:
{
long messageID = record.id;
RefEncoding encoding = new RefEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueue(encoding.queueID, messageID);
} else {
AddMessageRecord rec = queueMessages.remove(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
}
}
break;
}
case JournalRecordIds.UPDATE_DELIVERY_COUNT:
{
long messageID = record.id;
DeliveryCountUpdateEncoding encoding = new DeliveryCountUpdateEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueDelCount(encoding.queueID);
} else {
AddMessageRecord rec = queueMessages.get(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindMessageDelCount(messageID);
} else {
rec.setDeliveryCount(encoding.count);
}
}
break;
}
case JournalRecordIds.PAGE_TRANSACTION:
{
PageTransactionInfo invalidPGTx = null;
if (record.isUpdate) {
PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding();
pageUpdate.decode(buff);
PageTransactionInfo pageTX = pagingManager.getTransaction(pageUpdate.pageTX);
if (pageTX == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindPageTX(pageUpdate.pageTX);
} else {
if (!pageTX.onUpdate(pageUpdate.recods, null, null)) {
invalidPGTx = pageTX;
}
}
} else {
PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl();
pageTransactionInfo.decode(buff);
pageTransactionInfo.setRecordID(record.id);
pagingManager.addTransaction(pageTransactionInfo);
if (!pageTransactionInfo.checkSize(null, null)) {
invalidPGTx = pageTransactionInfo;
}
}
if (invalidPGTx != null) {
if (invalidPageTransactions == null) {
invalidPageTransactions = new HashSet<>();
}
invalidPageTransactions.add(invalidPGTx);
}
break;
}
case JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME:
{
long messageID = record.id;
ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding();
encoding.decode(buff);
Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
if (queueMessages == null) {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueScheduled(encoding.queueID, messageID);
} else {
AddMessageRecord rec = queueMessages.get(messageID);
if (rec == null) {
ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
} else {
rec.setScheduledDeliveryTime(encoding.scheduledDeliveryTime);
}
}
break;
}
case JournalRecordIds.DUPLICATE_ID:
{
DuplicateIDEncoding encoding = new DuplicateIDEncoding();
encoding.decode(buff);
List<Pair<byte[], Long>> ids = duplicateIDMap.get(encoding.address);
if (ids == null) {
ids = new ArrayList<>();
duplicateIDMap.put(encoding.address, ids);
}
ids.add(new Pair<>(encoding.duplID, record.id));
break;
}
case JournalRecordIds.HEURISTIC_COMPLETION:
{
HeuristicCompletionEncoding encoding = new HeuristicCompletionEncoding();
encoding.decode(buff);
resourceManager.putHeuristicCompletion(record.id, encoding.xid, encoding.isCommit);
break;
}
case JournalRecordIds.ACKNOWLEDGE_CURSOR:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.reloadACK(encoding.position);
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloading(encoding.queueID);
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE:
{
PageCountRecord encoding = new PageCountRecord();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().loadValue(record.id, encoding.getValue(), encoding.getPersistentSize());
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPage(encoding.getQueueID());
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COUNTER_INC:
{
PageCountRecordInc encoding = new PageCountRecordInc();
encoding.decode(buff);
PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
sub.getCounter().loadInc(record.id, encoding.getValue(), encoding.getPersistentSize());
} else {
ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPageCursor(encoding.getQueueID());
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_COMPLETE:
{
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
encoding.position.setRecordID(record.id);
PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
if (sub != null) {
if (!sub.reloadPageCompletion(encoding.position)) {
if (logger.isDebugEnabled()) {
logger.debug("Complete page " + encoding.position.getPageNr() + " doesn't exist on page manager " + sub.getPagingStore().getAddress());
}
messageJournal.appendDeleteRecord(record.id, false);
}
} else {
ActiveMQServerLogger.LOGGER.cantFindQueueOnPageComplete(encoding.queueID);
messageJournal.appendDeleteRecord(record.id, false);
}
break;
}
case JournalRecordIds.PAGE_CURSOR_PENDING_COUNTER:
{
PageCountPendingImpl pendingCountEncoding = new PageCountPendingImpl();
pendingCountEncoding.decode(buff);
pendingCountEncoding.setID(record.id);
// This can be null on testcases not interested on this outcome
if (pendingNonTXPageCounter != null) {
pendingNonTXPageCounter.add(pendingCountEncoding);
}
break;
}
default:
{
throw new IllegalStateException("Invalid record type " + recordType);
}
}
// This will free up memory sooner. The record is not needed any more
// and its byte array would consume memory during the load process even though it's not necessary any longer
// what would delay processing time during load
records.set(reccount, null);
}
// Release the memory as soon as not needed any longer
records.clear();
records = null;
journalLoader.handleAddMessage(queueMap);
loadPreparedTransactions(postOffice, pagingManager, resourceManager, queueInfos, preparedTransactions, duplicateIDMap, pageSubscriptions, pendingLargeMessages, journalLoader);
for (PageSubscription sub : pageSubscriptions.values()) {
sub.getCounter().processReload();
}
for (LargeServerMessage msg : largeMessages) {
if (msg.getRefCount() == 0) {
ActiveMQServerLogger.LOGGER.largeMessageWithNoRef(msg.getMessageID());
msg.decrementDelayDeletionCount();
}
}
journalLoader.handleNoMessageReferences(messages);
// To recover positions on Iterators
if (pagingManager != null) {
// it could be null on certain tests that are not dealing with paging
// This could also be the case in certain embedded conditions
pagingManager.processReload();
}
journalLoader.postLoad(messageJournal, resourceManager, duplicateIDMap);
checkInvalidPageTransactions(pagingManager, invalidPageTransactions);
journalLoaded = true;
return info;
} finally {
readUnLock();
}
}
use of org.apache.activemq.artemis.core.journal.JournalLoadInformation in project activemq-artemis by apache.
the class JournalImpl method load.
/**
* @param loadManager
* @param changeData
* @param replicationSync {@code true} will place
* @return
* @throws Exception
*/
private synchronized JournalLoadInformation load(final LoaderCallback loadManager, final boolean changeData, final JournalState replicationSync) throws Exception {
if (state == JournalState.STOPPED || state == JournalState.LOADED) {
throw new IllegalStateException("Journal " + this + " must be in " + JournalState.STARTED + " state, was " + state);
}
if (state == replicationSync) {
throw new IllegalStateException("Journal cannot be in state " + JournalState.STARTED);
}
checkControlFile();
records.clear();
filesRepository.clear();
transactions.clear();
currentFile = null;
final Map<Long, TransactionHolder> loadTransactions = new LinkedHashMap<>();
final List<JournalFile> orderedFiles = orderFiles();
filesRepository.calculateNextfileID(orderedFiles);
int lastDataPos = JournalImpl.SIZE_HEADER;
// AtomicLong is used only as a reference, not as an Atomic value
final AtomicLong maxID = new AtomicLong(-1);
for (final JournalFile file : orderedFiles) {
logger.trace("Loading file " + file.getFile().getFileName());
final AtomicBoolean hasData = new AtomicBoolean(false);
int resultLastPost = JournalImpl.readJournalFile(fileFactory, file, new JournalReaderCallback() {
private void checkID(final long id) {
if (id > maxID.longValue()) {
maxID.set(id);
}
}
@Override
public void onReadAddRecord(final RecordInfo info) throws Exception {
checkID(info.id);
hasData.set(true);
loadManager.addRecord(info);
records.put(info.id, new JournalRecord(file, info.data.length + JournalImpl.SIZE_ADD_RECORD + 1));
}
@Override
public void onReadUpdateRecord(final RecordInfo info) throws Exception {
checkID(info.id);
hasData.set(true);
loadManager.updateRecord(info);
JournalRecord posFiles = records.get(info.id);
if (posFiles != null) {
// It's legal for this to be null. The file(s) with the may
// have been deleted
// just leaving some updates in this file
// +1 = compact
posFiles.addUpdateFile(file, info.data.length + JournalImpl.SIZE_ADD_RECORD + 1);
// count
}
}
@Override
public void onReadDeleteRecord(final long recordID) throws Exception {
hasData.set(true);
loadManager.deleteRecord(recordID);
JournalRecord posFiles = records.remove(recordID);
if (posFiles != null) {
posFiles.delete(file);
}
}
@Override
public void onReadUpdateRecordTX(final long transactionID, final RecordInfo info) throws Exception {
onReadAddRecordTX(transactionID, info);
}
@Override
public void onReadAddRecordTX(final long transactionID, final RecordInfo info) throws Exception {
checkID(info.id);
hasData.set(true);
TransactionHolder tx = loadTransactions.get(transactionID);
if (tx == null) {
tx = new TransactionHolder(transactionID);
loadTransactions.put(transactionID, tx);
}
tx.recordInfos.add(info);
JournalTransaction tnp = transactions.get(transactionID);
if (tnp == null) {
tnp = new JournalTransaction(transactionID, JournalImpl.this);
transactions.put(transactionID, tnp);
}
// +1 = compact
tnp.addPositive(file, info.id, info.data.length + JournalImpl.SIZE_ADD_RECORD_TX + 1);
// count
}
@Override
public void onReadDeleteRecordTX(final long transactionID, final RecordInfo info) throws Exception {
hasData.set(true);
TransactionHolder tx = loadTransactions.get(transactionID);
if (tx == null) {
tx = new TransactionHolder(transactionID);
loadTransactions.put(transactionID, tx);
}
tx.recordsToDelete.add(info);
JournalTransaction tnp = transactions.get(transactionID);
if (tnp == null) {
tnp = new JournalTransaction(transactionID, JournalImpl.this);
transactions.put(transactionID, tnp);
}
tnp.addNegative(file, info.id);
}
@Override
public void onReadPrepareRecord(final long transactionID, final byte[] extraData, final int numberOfRecords) throws Exception {
hasData.set(true);
TransactionHolder tx = loadTransactions.get(transactionID);
if (tx == null) {
// The user could choose to prepare empty transactions
tx = new TransactionHolder(transactionID);
loadTransactions.put(transactionID, tx);
}
tx.prepared = true;
tx.extraData = extraData;
JournalTransaction journalTransaction = transactions.get(transactionID);
if (journalTransaction == null) {
journalTransaction = new JournalTransaction(transactionID, JournalImpl.this);
transactions.put(transactionID, journalTransaction);
}
boolean healthy = checkTransactionHealth(file, journalTransaction, orderedFiles, numberOfRecords);
if (healthy) {
journalTransaction.prepare(file);
} else {
ActiveMQJournalLogger.LOGGER.preparedTXIncomplete(transactionID);
tx.invalid = true;
}
}
@Override
public void onReadCommitRecord(final long transactionID, final int numberOfRecords) throws Exception {
TransactionHolder tx = loadTransactions.remove(transactionID);
// ignore this
if (tx != null) {
JournalTransaction journalTransaction = transactions.remove(transactionID);
if (journalTransaction == null) {
throw new IllegalStateException("Cannot find tx " + transactionID);
}
boolean healthy = checkTransactionHealth(file, journalTransaction, orderedFiles, numberOfRecords);
if (healthy) {
for (RecordInfo txRecord : tx.recordInfos) {
if (txRecord.isUpdate) {
loadManager.updateRecord(txRecord);
} else {
loadManager.addRecord(txRecord);
}
}
for (RecordInfo deleteValue : tx.recordsToDelete) {
loadManager.deleteRecord(deleteValue.id);
}
journalTransaction.commit(file);
} else {
ActiveMQJournalLogger.LOGGER.txMissingElements(transactionID);
journalTransaction.forget();
}
hasData.set(true);
}
}
@Override
public void onReadRollbackRecord(final long transactionID) throws Exception {
TransactionHolder tx = loadTransactions.remove(transactionID);
// point
if (tx != null) {
JournalTransaction tnp = transactions.remove(transactionID);
if (tnp == null) {
throw new IllegalStateException("Cannot find tx " + transactionID);
}
// There is no need to validate summaries/holes on
// Rollbacks.. We will ignore the data anyway.
tnp.rollback(file);
hasData.set(true);
}
}
@Override
public void markAsDataFile(final JournalFile file) {
hasData.set(true);
}
});
if (hasData.get()) {
lastDataPos = resultLastPost;
filesRepository.addDataFileOnBottom(file);
} else {
if (changeData) {
// Empty dataFiles with no data
filesRepository.addFreeFile(file, false, false);
}
}
}
if (replicationSync == JournalState.SYNCING) {
assert filesRepository.getDataFiles().isEmpty();
setJournalState(JournalState.SYNCING);
return new JournalLoadInformation(0, -1);
}
setUpCurrentFile(lastDataPos);
setJournalState(JournalState.LOADED);
for (TransactionHolder transaction : loadTransactions.values()) {
if ((!transaction.prepared || transaction.invalid) && replicationSync != JournalState.SYNCING_UP_TO_DATE) {
ActiveMQJournalLogger.LOGGER.uncomittedTxFound(transaction.transactionID);
if (changeData) {
// I append a rollback record here, because otherwise compacting will be throwing messages because of unknown transactions
this.appendRollbackRecord(transaction.transactionID, false);
}
loadManager.failedTransaction(transaction.transactionID, transaction.recordInfos, transaction.recordsToDelete);
} else {
for (RecordInfo info : transaction.recordInfos) {
if (info.id > maxID.get()) {
maxID.set(info.id);
}
}
PreparedTransactionInfo info = new PreparedTransactionInfo(transaction.transactionID, transaction.extraData);
info.getRecords().addAll(transaction.recordInfos);
info.getRecordsToDelete().addAll(transaction.recordsToDelete);
loadManager.addPreparedTransaction(info);
}
}
checkReclaimStatus();
return new JournalLoadInformation(records.size(), maxID.longValue());
}
use of org.apache.activemq.artemis.core.journal.JournalLoadInformation in project activemq-artemis by apache.
the class ActiveMQServerImpl method loadJournals.
private JournalLoadInformation[] loadJournals() throws Exception {
JournalLoader journalLoader = activation.createJournalLoader(postOffice, pagingManager, storageManager, queueFactory, nodeManager, managementService, groupingHandler, configuration, parentServer);
JournalLoadInformation[] journalInfo = new JournalLoadInformation[2];
List<QueueBindingInfo> queueBindingInfos = new ArrayList<>();
List<GroupingInfo> groupingInfos = new ArrayList<>();
List<AddressBindingInfo> addressBindingInfos = new ArrayList<>();
journalInfo[0] = storageManager.loadBindingJournal(queueBindingInfos, groupingInfos, addressBindingInfos);
recoverStoredConfigs();
Map<Long, AddressBindingInfo> addressBindingInfosMap = new HashMap<>();
journalLoader.initAddresses(addressBindingInfosMap, addressBindingInfos);
Map<Long, QueueBindingInfo> queueBindingInfosMap = new HashMap<>();
journalLoader.initQueues(queueBindingInfosMap, queueBindingInfos);
journalLoader.handleGroupingBindings(groupingInfos);
Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap = new HashMap<>();
HashSet<Pair<Long, Long>> pendingLargeMessages = new HashSet<>();
List<PageCountPending> pendingNonTXPageCounter = new LinkedList<>();
journalInfo[1] = storageManager.loadMessageJournal(postOffice, pagingManager, resourceManager, queueBindingInfosMap, duplicateIDMap, pendingLargeMessages, pendingNonTXPageCounter, journalLoader);
journalLoader.handleDuplicateIds(duplicateIDMap);
for (Pair<Long, Long> msgToDelete : pendingLargeMessages) {
ActiveMQServerLogger.LOGGER.deletingPendingMessage(msgToDelete);
LargeServerMessage msg = storageManager.createLargeMessage();
msg.setMessageID(msgToDelete.getB());
msg.setPendingRecordID(msgToDelete.getA());
msg.setDurable(true);
msg.deleteFile();
}
if (pendingNonTXPageCounter.size() != 0) {
try {
journalLoader.recoverPendingPageCounters(pendingNonTXPageCounter);
} catch (Throwable e) {
ActiveMQServerLogger.LOGGER.errorRecoveringPageCounter(e);
}
}
journalLoader.cleanUp();
return journalInfo;
}
use of org.apache.activemq.artemis.core.journal.JournalLoadInformation in project narayana by jbosstm.
the class HornetqJournalStore method start.
public void start() throws Exception {
journal.start();
List<RecordInfo> committedRecords = new LinkedList<RecordInfo>();
List<PreparedTransactionInfo> preparedTransactions = new LinkedList<PreparedTransactionInfo>();
TransactionFailureCallback failureCallback = new TransactionFailureCallback() {
public void failedTransaction(long l, List<RecordInfo> recordInfos, List<RecordInfo> recordInfos1) {
tsLogger.i18NLogger.warn_journal_load_error();
}
};
JournalLoadInformation journalLoadInformation = journal.load(committedRecords, preparedTransactions, failureCallback);
maxID.set(journalLoadInformation.getMaxID());
if (!preparedTransactions.isEmpty()) {
tsLogger.i18NLogger.warn_journal_load_error();
}
for (RecordInfo record : committedRecords) {
InputBuffer inputBuffer = new InputBuffer(record.data);
Uid uid = UidHelper.unpackFrom(inputBuffer);
String typeName = inputBuffer.unpackString();
getContentForType(typeName).put(uid, record);
// don't unpack the rest yet, we may never need it. read_committed does it on demand.
}
}
use of org.apache.activemq.artemis.core.journal.JournalLoadInformation in project activemq-artemis by apache.
the class AbstractJournalStorageManager method loadBindingJournal.
@Override
public JournalLoadInformation loadBindingJournal(final List<QueueBindingInfo> queueBindingInfos, final List<GroupingInfo> groupingInfos, final List<AddressBindingInfo> addressBindingInfos) throws Exception {
List<RecordInfo> records = new ArrayList<>();
List<PreparedTransactionInfo> preparedTransactions = new ArrayList<>();
JournalLoadInformation bindingsInfo = bindingsJournal.load(records, preparedTransactions, null);
HashMap<Long, PersistentQueueBindingEncoding> mapBindings = new HashMap<>();
for (RecordInfo record : records) {
long id = record.id;
ActiveMQBuffer buffer = ActiveMQBuffers.wrappedBuffer(record.data);
byte rec = record.getUserRecordType();
if (rec == JournalRecordIds.QUEUE_BINDING_RECORD) {
PersistentQueueBindingEncoding bindingEncoding = newQueueBindingEncoding(id, buffer);
mapBindings.put(bindingEncoding.getId(), bindingEncoding);
} else if (rec == JournalRecordIds.ID_COUNTER_RECORD) {
idGenerator.loadState(record.id, buffer);
} else if (rec == JournalRecordIds.ADDRESS_BINDING_RECORD) {
PersistentAddressBindingEncoding bindingEncoding = newAddressBindingEncoding(id, buffer);
addressBindingInfos.add(bindingEncoding);
} else if (rec == JournalRecordIds.GROUP_RECORD) {
GroupingEncoding encoding = newGroupEncoding(id, buffer);
groupingInfos.add(encoding);
} else if (rec == JournalRecordIds.ADDRESS_SETTING_RECORD) {
PersistedAddressSetting setting = newAddressEncoding(id, buffer);
mapPersistedAddressSettings.put(setting.getAddressMatch(), setting);
} else if (rec == JournalRecordIds.SECURITY_RECORD) {
PersistedRoles roles = newSecurityRecord(id, buffer);
mapPersistedRoles.put(roles.getAddressMatch(), roles);
} else if (rec == JournalRecordIds.QUEUE_STATUS_RECORD) {
QueueStatusEncoding statusEncoding = newQueueStatusEncoding(id, buffer);
PersistentQueueBindingEncoding queueBindingEncoding = mapBindings.get(statusEncoding.queueID);
if (queueBindingEncoding != null) {
queueBindingEncoding.addQueueStatusEncoding(statusEncoding);
} else {
// unlikely to happen, so I didn't bother about the Logger method
ActiveMQServerLogger.LOGGER.infoNoQueueWithID(statusEncoding.queueID, statusEncoding.getId());
this.deleteQueueStatus(statusEncoding.getId());
}
} else {
// unlikely to happen
ActiveMQServerLogger.LOGGER.invalidRecordType(rec, new Exception("invalid record type " + rec));
}
}
for (PersistentQueueBindingEncoding queue : mapBindings.values()) {
queueBindingInfos.add(queue);
}
// just to give a hand to GC
mapBindings.clear();
// This will instruct the IDGenerator to beforeStop old records
idGenerator.cleanup();
return bindingsInfo;
}
Aggregations