Search in sources :

Example 1 with RecordInfo

use of org.apache.activemq.artemis.core.journal.RecordInfo in project narayana by jbosstm.

the class HornetqJournalStore method start.

public void start() throws Exception {
    journal.start();
    List<RecordInfo> committedRecords = new LinkedList<RecordInfo>();
    List<PreparedTransactionInfo> preparedTransactions = new LinkedList<PreparedTransactionInfo>();
    TransactionFailureCallback failureCallback = new TransactionFailureCallback() {

        public void failedTransaction(long l, List<RecordInfo> recordInfos, List<RecordInfo> recordInfos1) {
            tsLogger.i18NLogger.warn_journal_load_error();
        }
    };
    JournalLoadInformation journalLoadInformation = journal.load(committedRecords, preparedTransactions, failureCallback);
    maxID.set(journalLoadInformation.getMaxID());
    if (!preparedTransactions.isEmpty()) {
        tsLogger.i18NLogger.warn_journal_load_error();
    }
    for (RecordInfo record : committedRecords) {
        InputBuffer inputBuffer = new InputBuffer(record.data);
        Uid uid = UidHelper.unpackFrom(inputBuffer);
        String typeName = inputBuffer.unpackString();
        getContentForType(typeName).put(uid, record);
    // don't unpack the rest yet, we may never need it. read_committed does it on demand.
    }
}
Also used : PreparedTransactionInfo(org.apache.activemq.artemis.core.journal.PreparedTransactionInfo) JournalLoadInformation(org.apache.activemq.artemis.core.journal.JournalLoadInformation) Uid(com.arjuna.ats.arjuna.common.Uid) RecordInfo(org.apache.activemq.artemis.core.journal.RecordInfo) InputBuffer(com.arjuna.ats.arjuna.state.InputBuffer) LinkedList(java.util.LinkedList) List(java.util.List) TransactionFailureCallback(org.apache.activemq.artemis.core.journal.TransactionFailureCallback) LinkedList(java.util.LinkedList)

Example 2 with RecordInfo

use of org.apache.activemq.artemis.core.journal.RecordInfo in project narayana by jbosstm.

the class HornetqJournalStore method write_committed.

/**
 * Write a new copy of the object's committed state.
 *
 * @param uid    The object to work on.
 * @param typeName   The type of the object to work on.
 * @param txData The state to write.
 * @return <code>true</code> if no errors occurred, <code>false</code>
 *         otherwise.
 * @throws ObjectStoreException if things go wrong.
 */
public boolean write_committed(Uid uid, String typeName, OutputObjectState txData) throws ObjectStoreException {
    RecordInfo previousRecord = null;
    try {
        OutputBuffer outputBuffer = new OutputBuffer();
        UidHelper.packInto(uid, outputBuffer);
        outputBuffer.packString(typeName);
        outputBuffer.packBytes(txData.buffer());
        byte[] data = outputBuffer.buffer();
        RecordInfo record = new RecordInfo(getId(uid, typeName), RECORD_TYPE, data, false, (short) 0);
        previousRecord = getContentForType(typeName).putIfAbsent(uid, record);
        if (previousRecord != null) {
            // the packed data may have changed so updated the map with the latest data
            getContentForType(typeName).replace(uid, record);
            journal.appendUpdateRecord(previousRecord.id, RECORD_TYPE, data, syncWrites);
        } else {
            journal.appendAddRecord(record.id, RECORD_TYPE, data, syncWrites);
        }
    } catch (Exception e) {
        if (previousRecord == null) {
            // if appendAddRecord() fails, remove record from map. Leave it there if appendUpdateRecord() fails.
            getContentForType(typeName).remove(uid);
        }
        throw new ObjectStoreException(e);
    }
    return true;
}
Also used : ObjectStoreException(com.arjuna.ats.arjuna.exceptions.ObjectStoreException) RecordInfo(org.apache.activemq.artemis.core.journal.RecordInfo) OutputBuffer(com.arjuna.ats.arjuna.state.OutputBuffer) IOException(java.io.IOException) ObjectStoreException(com.arjuna.ats.arjuna.exceptions.ObjectStoreException)

Example 3 with RecordInfo

use of org.apache.activemq.artemis.core.journal.RecordInfo in project activemq-artemis by apache.

the class AbstractJournalStorageManager method loadMessageJournal.

@Override
public JournalLoadInformation loadMessageJournal(final PostOffice postOffice, final PagingManager pagingManager, final ResourceManager resourceManager, Map<Long, QueueBindingInfo> queueInfos, final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap, final Set<Pair<Long, Long>> pendingLargeMessages, List<PageCountPending> pendingNonTXPageCounter, final JournalLoader journalLoader) throws Exception {
    List<RecordInfo> records = new ArrayList<>();
    List<PreparedTransactionInfo> preparedTransactions = new ArrayList<>();
    Set<PageTransactionInfo> invalidPageTransactions = null;
    Map<Long, Message> messages = new HashMap<>();
    readLock();
    try {
        JournalLoadInformation info = messageJournal.load(records, preparedTransactions, new LargeMessageTXFailureCallback(this, messages));
        ArrayList<LargeServerMessage> largeMessages = new ArrayList<>();
        Map<Long, Map<Long, AddMessageRecord>> queueMap = new HashMap<>();
        Map<Long, PageSubscription> pageSubscriptions = new HashMap<>();
        final int totalSize = records.size();
        for (int reccount = 0; reccount < totalSize; reccount++) {
            // It will show log.info only with large journals (more than 1 million records)
            if (reccount > 0 && reccount % 1000000 == 0) {
                long percent = (long) ((((double) reccount) / ((double) totalSize)) * 100f);
                ActiveMQServerLogger.LOGGER.percentLoaded(percent);
            }
            RecordInfo record = records.get(reccount);
            byte[] data = record.data;
            ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
            byte recordType = record.getUserRecordType();
            switch(recordType) {
                case JournalRecordIds.ADD_LARGE_MESSAGE_PENDING:
                    {
                        PendingLargeMessageEncoding pending = new PendingLargeMessageEncoding();
                        pending.decode(buff);
                        if (pendingLargeMessages != null) {
                            // it could be null on tests, and we don't need anything on that case
                            pendingLargeMessages.add(new Pair<>(record.id, pending.largeMessageID));
                        }
                        break;
                    }
                case JournalRecordIds.ADD_LARGE_MESSAGE:
                    {
                        LargeServerMessage largeMessage = parseLargeMessage(messages, buff);
                        messages.put(record.id, largeMessage);
                        largeMessages.add(largeMessage);
                        break;
                    }
                case JournalRecordIds.ADD_MESSAGE:
                    {
                        throw new IllegalStateException("This is using old journal data, export your data and import at the correct version");
                    }
                case JournalRecordIds.ADD_MESSAGE_PROTOCOL:
                    {
                        Message message = MessagePersister.getInstance().decode(buff, null);
                        messages.put(record.id, message);
                        break;
                    }
                case JournalRecordIds.ADD_REF:
                    {
                        long messageID = record.id;
                        RefEncoding encoding = new RefEncoding();
                        encoding.decode(buff);
                        Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
                        if (queueMessages == null) {
                            queueMessages = new LinkedHashMap<>();
                            queueMap.put(encoding.queueID, queueMessages);
                        }
                        Message message = messages.get(messageID);
                        if (message == null) {
                            ActiveMQServerLogger.LOGGER.cannotFindMessage(record.id);
                        } else {
                            queueMessages.put(messageID, new AddMessageRecord(message));
                        }
                        break;
                    }
                case JournalRecordIds.ACKNOWLEDGE_REF:
                    {
                        long messageID = record.id;
                        RefEncoding encoding = new RefEncoding();
                        encoding.decode(buff);
                        Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
                        if (queueMessages == null) {
                            ActiveMQServerLogger.LOGGER.journalCannotFindQueue(encoding.queueID, messageID);
                        } else {
                            AddMessageRecord rec = queueMessages.remove(messageID);
                            if (rec == null) {
                                ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
                            }
                        }
                        break;
                    }
                case JournalRecordIds.UPDATE_DELIVERY_COUNT:
                    {
                        long messageID = record.id;
                        DeliveryCountUpdateEncoding encoding = new DeliveryCountUpdateEncoding();
                        encoding.decode(buff);
                        Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
                        if (queueMessages == null) {
                            ActiveMQServerLogger.LOGGER.journalCannotFindQueueDelCount(encoding.queueID);
                        } else {
                            AddMessageRecord rec = queueMessages.get(messageID);
                            if (rec == null) {
                                ActiveMQServerLogger.LOGGER.journalCannotFindMessageDelCount(messageID);
                            } else {
                                rec.setDeliveryCount(encoding.count);
                            }
                        }
                        break;
                    }
                case JournalRecordIds.PAGE_TRANSACTION:
                    {
                        PageTransactionInfo invalidPGTx = null;
                        if (record.isUpdate) {
                            PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding();
                            pageUpdate.decode(buff);
                            PageTransactionInfo pageTX = pagingManager.getTransaction(pageUpdate.pageTX);
                            if (pageTX == null) {
                                ActiveMQServerLogger.LOGGER.journalCannotFindPageTX(pageUpdate.pageTX);
                            } else {
                                if (!pageTX.onUpdate(pageUpdate.recods, null, null)) {
                                    invalidPGTx = pageTX;
                                }
                            }
                        } else {
                            PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl();
                            pageTransactionInfo.decode(buff);
                            pageTransactionInfo.setRecordID(record.id);
                            pagingManager.addTransaction(pageTransactionInfo);
                            if (!pageTransactionInfo.checkSize(null, null)) {
                                invalidPGTx = pageTransactionInfo;
                            }
                        }
                        if (invalidPGTx != null) {
                            if (invalidPageTransactions == null) {
                                invalidPageTransactions = new HashSet<>();
                            }
                            invalidPageTransactions.add(invalidPGTx);
                        }
                        break;
                    }
                case JournalRecordIds.SET_SCHEDULED_DELIVERY_TIME:
                    {
                        long messageID = record.id;
                        ScheduledDeliveryEncoding encoding = new ScheduledDeliveryEncoding();
                        encoding.decode(buff);
                        Map<Long, AddMessageRecord> queueMessages = queueMap.get(encoding.queueID);
                        if (queueMessages == null) {
                            ActiveMQServerLogger.LOGGER.journalCannotFindQueueScheduled(encoding.queueID, messageID);
                        } else {
                            AddMessageRecord rec = queueMessages.get(messageID);
                            if (rec == null) {
                                ActiveMQServerLogger.LOGGER.cannotFindMessage(messageID);
                            } else {
                                rec.setScheduledDeliveryTime(encoding.scheduledDeliveryTime);
                            }
                        }
                        break;
                    }
                case JournalRecordIds.DUPLICATE_ID:
                    {
                        DuplicateIDEncoding encoding = new DuplicateIDEncoding();
                        encoding.decode(buff);
                        List<Pair<byte[], Long>> ids = duplicateIDMap.get(encoding.address);
                        if (ids == null) {
                            ids = new ArrayList<>();
                            duplicateIDMap.put(encoding.address, ids);
                        }
                        ids.add(new Pair<>(encoding.duplID, record.id));
                        break;
                    }
                case JournalRecordIds.HEURISTIC_COMPLETION:
                    {
                        HeuristicCompletionEncoding encoding = new HeuristicCompletionEncoding();
                        encoding.decode(buff);
                        resourceManager.putHeuristicCompletion(record.id, encoding.xid, encoding.isCommit);
                        break;
                    }
                case JournalRecordIds.ACKNOWLEDGE_CURSOR:
                    {
                        CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
                        encoding.decode(buff);
                        encoding.position.setRecordID(record.id);
                        PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
                        if (sub != null) {
                            sub.reloadACK(encoding.position);
                        } else {
                            ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloading(encoding.queueID);
                            messageJournal.appendDeleteRecord(record.id, false);
                        }
                        break;
                    }
                case JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE:
                    {
                        PageCountRecord encoding = new PageCountRecord();
                        encoding.decode(buff);
                        PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
                        if (sub != null) {
                            sub.getCounter().loadValue(record.id, encoding.getValue(), encoding.getPersistentSize());
                        } else {
                            ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPage(encoding.getQueueID());
                            messageJournal.appendDeleteRecord(record.id, false);
                        }
                        break;
                    }
                case JournalRecordIds.PAGE_CURSOR_COUNTER_INC:
                    {
                        PageCountRecordInc encoding = new PageCountRecordInc();
                        encoding.decode(buff);
                        PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
                        if (sub != null) {
                            sub.getCounter().loadInc(record.id, encoding.getValue(), encoding.getPersistentSize());
                        } else {
                            ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingPageCursor(encoding.getQueueID());
                            messageJournal.appendDeleteRecord(record.id, false);
                        }
                        break;
                    }
                case JournalRecordIds.PAGE_CURSOR_COMPLETE:
                    {
                        CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
                        encoding.decode(buff);
                        encoding.position.setRecordID(record.id);
                        PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
                        if (sub != null) {
                            if (!sub.reloadPageCompletion(encoding.position)) {
                                if (logger.isDebugEnabled()) {
                                    logger.debug("Complete page " + encoding.position.getPageNr() + " doesn't exist on page manager " + sub.getPagingStore().getAddress());
                                }
                                messageJournal.appendDeleteRecord(record.id, false);
                            }
                        } else {
                            ActiveMQServerLogger.LOGGER.cantFindQueueOnPageComplete(encoding.queueID);
                            messageJournal.appendDeleteRecord(record.id, false);
                        }
                        break;
                    }
                case JournalRecordIds.PAGE_CURSOR_PENDING_COUNTER:
                    {
                        PageCountPendingImpl pendingCountEncoding = new PageCountPendingImpl();
                        pendingCountEncoding.decode(buff);
                        pendingCountEncoding.setID(record.id);
                        // This can be null on testcases not interested on this outcome
                        if (pendingNonTXPageCounter != null) {
                            pendingNonTXPageCounter.add(pendingCountEncoding);
                        }
                        break;
                    }
                default:
                    {
                        throw new IllegalStateException("Invalid record type " + recordType);
                    }
            }
            // This will free up memory sooner. The record is not needed any more
            // and its byte array would consume memory during the load process even though it's not necessary any longer
            // what would delay processing time during load
            records.set(reccount, null);
        }
        // Release the memory as soon as not needed any longer
        records.clear();
        records = null;
        journalLoader.handleAddMessage(queueMap);
        loadPreparedTransactions(postOffice, pagingManager, resourceManager, queueInfos, preparedTransactions, duplicateIDMap, pageSubscriptions, pendingLargeMessages, journalLoader);
        for (PageSubscription sub : pageSubscriptions.values()) {
            sub.getCounter().processReload();
        }
        for (LargeServerMessage msg : largeMessages) {
            if (msg.getRefCount() == 0) {
                ActiveMQServerLogger.LOGGER.largeMessageWithNoRef(msg.getMessageID());
                msg.decrementDelayDeletionCount();
            }
        }
        journalLoader.handleNoMessageReferences(messages);
        // To recover positions on Iterators
        if (pagingManager != null) {
            // it could be null on certain tests that are not dealing with paging
            // This could also be the case in certain embedded conditions
            pagingManager.processReload();
        }
        journalLoader.postLoad(messageJournal, resourceManager, duplicateIDMap);
        checkInvalidPageTransactions(pagingManager, invalidPageTransactions);
        journalLoaded = true;
        return info;
    } finally {
        readUnLock();
    }
}
Also used : PreparedTransactionInfo(org.apache.activemq.artemis.core.journal.PreparedTransactionInfo) PageTransactionInfo(org.apache.activemq.artemis.core.paging.PageTransactionInfo) LargeServerMessage(org.apache.activemq.artemis.core.server.LargeServerMessage) Message(org.apache.activemq.artemis.api.core.Message) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) PageCountPendingImpl(org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountPendingImpl) PageTransactionInfoImpl(org.apache.activemq.artemis.core.paging.impl.PageTransactionInfoImpl) ArrayList(java.util.ArrayList) DeliveryCountUpdateEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.DeliveryCountUpdateEncoding) LinkedHashMap(java.util.LinkedHashMap) PageUpdateTXEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageUpdateTXEncoding) ScheduledDeliveryEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.ScheduledDeliveryEncoding) DuplicateIDEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.DuplicateIDEncoding) ArrayList(java.util.ArrayList) List(java.util.List) RouteContextList(org.apache.activemq.artemis.core.server.RouteContextList) ActiveMQBuffer(org.apache.activemq.artemis.api.core.ActiveMQBuffer) Pair(org.apache.activemq.artemis.api.core.Pair) HashSet(java.util.HashSet) PendingLargeMessageEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.PendingLargeMessageEncoding) RecordInfo(org.apache.activemq.artemis.core.journal.RecordInfo) PageSubscription(org.apache.activemq.artemis.core.paging.cursor.PageSubscription) PageCountRecord(org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecord) JournalLoadInformation(org.apache.activemq.artemis.core.journal.JournalLoadInformation) CursorAckRecordEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.CursorAckRecordEncoding) PageCountRecordInc(org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc) LargeServerMessage(org.apache.activemq.artemis.core.server.LargeServerMessage) HeuristicCompletionEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.HeuristicCompletionEncoding) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) RefEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.RefEncoding)

Example 4 with RecordInfo

use of org.apache.activemq.artemis.core.journal.RecordInfo in project activemq-artemis by apache.

the class AbstractJournalStorageManager method loadPreparedTransactions.

private void loadPreparedTransactions(final PostOffice postOffice, final PagingManager pagingManager, final ResourceManager resourceManager, final Map<Long, QueueBindingInfo> queueInfos, final List<PreparedTransactionInfo> preparedTransactions, final Map<SimpleString, List<Pair<byte[], Long>>> duplicateIDMap, final Map<Long, PageSubscription> pageSubscriptions, final Set<Pair<Long, Long>> pendingLargeMessages, JournalLoader journalLoader) throws Exception {
    // recover prepared transactions
    for (PreparedTransactionInfo preparedTransaction : preparedTransactions) {
        XidEncoding encodingXid = new XidEncoding(preparedTransaction.getExtraData());
        Xid xid = encodingXid.xid;
        Transaction tx = new TransactionImpl(preparedTransaction.getId(), xid, this);
        List<MessageReference> referencesToAck = new ArrayList<>();
        Map<Long, Message> messages = new HashMap<>();
        // first get any sent messages for this tx and recreate
        for (RecordInfo record : preparedTransaction.getRecords()) {
            byte[] data = record.data;
            ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
            byte recordType = record.getUserRecordType();
            switch(recordType) {
                case JournalRecordIds.ADD_LARGE_MESSAGE:
                    {
                        messages.put(record.id, parseLargeMessage(messages, buff));
                        break;
                    }
                case JournalRecordIds.ADD_MESSAGE:
                    {
                        break;
                    }
                case JournalRecordIds.ADD_MESSAGE_PROTOCOL:
                    {
                        Message message = MessagePersister.getInstance().decode(buff, null);
                        messages.put(record.id, message);
                        break;
                    }
                case JournalRecordIds.ADD_REF:
                    {
                        long messageID = record.id;
                        RefEncoding encoding = new RefEncoding();
                        encoding.decode(buff);
                        Message message = messages.get(messageID);
                        if (message == null) {
                            throw new IllegalStateException("Cannot find message with id " + messageID);
                        }
                        journalLoader.handlePreparedSendMessage(message, tx, encoding.queueID);
                        break;
                    }
                case JournalRecordIds.ACKNOWLEDGE_REF:
                    {
                        long messageID = record.id;
                        RefEncoding encoding = new RefEncoding();
                        encoding.decode(buff);
                        journalLoader.handlePreparedAcknowledge(messageID, referencesToAck, encoding.queueID);
                        break;
                    }
                case JournalRecordIds.PAGE_TRANSACTION:
                    {
                        PageTransactionInfo pageTransactionInfo = new PageTransactionInfoImpl();
                        pageTransactionInfo.decode(buff);
                        if (record.isUpdate) {
                            PageTransactionInfo pgTX = pagingManager.getTransaction(pageTransactionInfo.getTransactionID());
                            pgTX.reloadUpdate(this, pagingManager, tx, pageTransactionInfo.getNumberOfMessages());
                        } else {
                            pageTransactionInfo.setCommitted(false);
                            tx.putProperty(TransactionPropertyIndexes.PAGE_TRANSACTION, pageTransactionInfo);
                            pagingManager.addTransaction(pageTransactionInfo);
                            tx.addOperation(new FinishPageMessageOperation());
                        }
                        break;
                    }
                case SET_SCHEDULED_DELIVERY_TIME:
                    {
                        break;
                    }
                case DUPLICATE_ID:
                    {
                        // We need load the duplicate ids at prepare time too
                        DuplicateIDEncoding encoding = new DuplicateIDEncoding();
                        encoding.decode(buff);
                        DuplicateIDCache cache = postOffice.getDuplicateIDCache(encoding.address);
                        cache.load(tx, encoding.duplID);
                        break;
                    }
                case ACKNOWLEDGE_CURSOR:
                    {
                        CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
                        encoding.decode(buff);
                        encoding.position.setRecordID(record.id);
                        PageSubscription sub = locateSubscription(encoding.queueID, pageSubscriptions, queueInfos, pagingManager);
                        if (sub != null) {
                            sub.reloadPreparedACK(tx, encoding.position);
                            referencesToAck.add(new PagedReferenceImpl(encoding.position, null, sub));
                        } else {
                            ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.queueID);
                        }
                        break;
                    }
                case PAGE_CURSOR_COUNTER_VALUE:
                    {
                        ActiveMQServerLogger.LOGGER.journalPAGEOnPrepared();
                        break;
                    }
                case PAGE_CURSOR_COUNTER_INC:
                    {
                        PageCountRecordInc encoding = new PageCountRecordInc();
                        encoding.decode(buff);
                        PageSubscription sub = locateSubscription(encoding.getQueueID(), pageSubscriptions, queueInfos, pagingManager);
                        if (sub != null) {
                            sub.getCounter().applyIncrementOnTX(tx, record.id, encoding.getValue(), encoding.getPersistentSize());
                            sub.notEmpty();
                        } else {
                            ActiveMQServerLogger.LOGGER.journalCannotFindQueueReloadingACK(encoding.getQueueID());
                        }
                        break;
                    }
                default:
                    {
                        ActiveMQServerLogger.LOGGER.journalInvalidRecordType(recordType);
                    }
            }
        }
        for (RecordInfo recordDeleted : preparedTransaction.getRecordsToDelete()) {
            byte[] data = recordDeleted.data;
            if (data.length > 0) {
                ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
                byte b = buff.readByte();
                switch(b) {
                    case ADD_LARGE_MESSAGE_PENDING:
                        {
                            long messageID = buff.readLong();
                            if (!pendingLargeMessages.remove(new Pair<>(recordDeleted.id, messageID))) {
                                ActiveMQServerLogger.LOGGER.largeMessageNotFound(recordDeleted.id);
                            }
                            installLargeMessageConfirmationOnTX(tx, recordDeleted.id);
                            break;
                        }
                    default:
                        ActiveMQServerLogger.LOGGER.journalInvalidRecordTypeOnPreparedTX(b);
                }
            }
        }
        journalLoader.handlePreparedTransaction(tx, referencesToAck, xid, resourceManager);
    }
}
Also used : PreparedTransactionInfo(org.apache.activemq.artemis.core.journal.PreparedTransactionInfo) PageTransactionInfo(org.apache.activemq.artemis.core.paging.PageTransactionInfo) DuplicateIDCache(org.apache.activemq.artemis.core.postoffice.DuplicateIDCache) LargeServerMessage(org.apache.activemq.artemis.core.server.LargeServerMessage) Message(org.apache.activemq.artemis.api.core.Message) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) PageTransactionInfoImpl(org.apache.activemq.artemis.core.paging.impl.PageTransactionInfoImpl) ArrayList(java.util.ArrayList) DuplicateIDEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.DuplicateIDEncoding) ActiveMQBuffer(org.apache.activemq.artemis.api.core.ActiveMQBuffer) RecordInfo(org.apache.activemq.artemis.core.journal.RecordInfo) PagedReferenceImpl(org.apache.activemq.artemis.core.paging.cursor.PagedReferenceImpl) TransactionImpl(org.apache.activemq.artemis.core.transaction.impl.TransactionImpl) PageSubscription(org.apache.activemq.artemis.core.paging.cursor.PageSubscription) MessageReference(org.apache.activemq.artemis.core.server.MessageReference) Xid(javax.transaction.xa.Xid) Transaction(org.apache.activemq.artemis.core.transaction.Transaction) FinishPageMessageOperation(org.apache.activemq.artemis.core.persistence.impl.journal.codec.FinishPageMessageOperation) CursorAckRecordEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.CursorAckRecordEncoding) XidEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.XidEncoding) PageCountRecordInc(org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc) RefEncoding(org.apache.activemq.artemis.core.persistence.impl.journal.codec.RefEncoding)

Example 5 with RecordInfo

use of org.apache.activemq.artemis.core.journal.RecordInfo in project activemq-artemis by apache.

the class LargeMessageTXFailureCallback method failedTransaction.

@Override
public void failedTransaction(final long transactionID, final List<RecordInfo> records, final List<RecordInfo> recordsToDelete) {
    for (RecordInfo record : records) {
        if (record.userRecordType == ADD_LARGE_MESSAGE) {
            byte[] data = record.data;
            ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
            try {
                LargeServerMessage serverMessage = journalStorageManager.parseLargeMessage(messages, buff);
                serverMessage.decrementDelayDeletionCount();
            } catch (Exception e) {
                ActiveMQServerLogger.LOGGER.journalError(e);
            }
        }
    }
}
Also used : RecordInfo(org.apache.activemq.artemis.core.journal.RecordInfo) LargeServerMessage(org.apache.activemq.artemis.core.server.LargeServerMessage) ActiveMQBuffer(org.apache.activemq.artemis.api.core.ActiveMQBuffer)

Aggregations

RecordInfo (org.apache.activemq.artemis.core.journal.RecordInfo)65 PreparedTransactionInfo (org.apache.activemq.artemis.core.journal.PreparedTransactionInfo)33 ArrayList (java.util.ArrayList)22 JournalImpl (org.apache.activemq.artemis.core.journal.impl.JournalImpl)20 Test (org.junit.Test)20 LinkedList (java.util.LinkedList)15 ActiveMQBuffer (org.apache.activemq.artemis.api.core.ActiveMQBuffer)15 HashMap (java.util.HashMap)10 NIOSequentialFileFactory (org.apache.activemq.artemis.core.io.nio.NIOSequentialFileFactory)10 TransactionFailureCallback (org.apache.activemq.artemis.core.journal.TransactionFailureCallback)9 File (java.io.File)7 Journal (org.apache.activemq.artemis.core.journal.Journal)7 SimpleEncoding (org.apache.activemq.artemis.tests.unit.core.journal.impl.fakes.SimpleEncoding)7 List (java.util.List)6 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)6 SequentialFileFactory (org.apache.activemq.artemis.core.io.SequentialFileFactory)6 PageCountRecordInc (org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc)6 IOException (java.io.IOException)5 HashSet (java.util.HashSet)5 LinkedHashMap (java.util.LinkedHashMap)5