Search in sources :

Example 6 with RecordInfo

use of org.apache.activemq.artemis.core.journal.RecordInfo in project activemq-artemis by apache.

the class JournalImpl method readJournalFile.

/**
 * this method is used internally only however tools may use it to maintenance.
 */
public static int readJournalFile(final SequentialFileFactory fileFactory, final JournalFile file, final JournalReaderCallback reader) throws Exception {
    file.getFile().open(1, false);
    ByteBuffer wholeFileBuffer = null;
    try {
        final int filesize = (int) file.getFile().size();
        if (filesize < JournalImpl.SIZE_HEADER) {
            // the file is damaged or the system crash before it was able to write
            return -1;
        }
        wholeFileBuffer = fileFactory.newBuffer(filesize);
        final int journalFileSize = file.getFile().read(wholeFileBuffer);
        if (journalFileSize != filesize) {
            throw new RuntimeException("Invalid read! The system couldn't read the entire file into memory");
        }
        // First long is the ordering timestamp, we just jump its position
        wholeFileBuffer.position(JournalImpl.SIZE_HEADER);
        int lastDataPos = JournalImpl.SIZE_HEADER;
        while (wholeFileBuffer.hasRemaining()) {
            final int pos = wholeFileBuffer.position();
            byte recordType = wholeFileBuffer.get();
            if (recordType < JournalImpl.ADD_RECORD || recordType > JournalImpl.ROLLBACK_RECORD) {
                // the possibilities are gone
                continue;
            }
            if (JournalImpl.isInvalidSize(journalFileSize, wholeFileBuffer.position(), DataConstants.SIZE_INT)) {
                reader.markAsDataFile(file);
                wholeFileBuffer.position(pos + 1);
                // II - Ignore this record, let's keep looking
                continue;
            }
            // III - Every record has the file-id.
            // This is what supports us from not re-filling the whole file
            int readFileId = wholeFileBuffer.getInt();
            // reused and we need to ignore this record
            if (readFileId != file.getRecordID()) {
                wholeFileBuffer.position(pos + 1);
                continue;
            }
            short compactCount = 0;
            if (file.getJournalVersion() >= 2) {
                if (JournalImpl.isInvalidSize(journalFileSize, wholeFileBuffer.position(), DataConstants.SIZE_BYTE)) {
                    reader.markAsDataFile(file);
                    wholeFileBuffer.position(pos + 1);
                    continue;
                }
                compactCount = wholeFileBuffer.get();
            }
            long transactionID = 0;
            if (JournalImpl.isTransaction(recordType)) {
                if (JournalImpl.isInvalidSize(journalFileSize, wholeFileBuffer.position(), DataConstants.SIZE_LONG)) {
                    wholeFileBuffer.position(pos + 1);
                    reader.markAsDataFile(file);
                    continue;
                }
                transactionID = wholeFileBuffer.getLong();
            }
            long recordID = 0;
            // If prepare or commit
            if (!JournalImpl.isCompleteTransaction(recordType)) {
                if (JournalImpl.isInvalidSize(journalFileSize, wholeFileBuffer.position(), DataConstants.SIZE_LONG)) {
                    wholeFileBuffer.position(pos + 1);
                    reader.markAsDataFile(file);
                    continue;
                }
                recordID = wholeFileBuffer.getLong();
            }
            // We use the size of the record to validate the health of the
            // record.
            // (V) We verify the size of the record
            // The variable record portion used on Updates and Appends
            int variableSize = 0;
            // Used to hold extra data on transaction prepares
            int preparedTransactionExtraDataSize = 0;
            byte userRecordType = 0;
            byte[] record = null;
            if (JournalImpl.isContainsBody(recordType)) {
                if (JournalImpl.isInvalidSize(journalFileSize, wholeFileBuffer.position(), DataConstants.SIZE_INT)) {
                    wholeFileBuffer.position(pos + 1);
                    reader.markAsDataFile(file);
                    continue;
                }
                variableSize = wholeFileBuffer.getInt();
                if (recordType != JournalImpl.DELETE_RECORD_TX) {
                    if (JournalImpl.isInvalidSize(journalFileSize, wholeFileBuffer.position(), 1)) {
                        wholeFileBuffer.position(pos + 1);
                        continue;
                    }
                    userRecordType = wholeFileBuffer.get();
                }
                if (JournalImpl.isInvalidSize(journalFileSize, wholeFileBuffer.position(), variableSize)) {
                    wholeFileBuffer.position(pos + 1);
                    continue;
                }
                record = new byte[variableSize];
                wholeFileBuffer.get(record);
            }
            // Case this is a transaction, this will contain the number of pendingTransactions on a transaction, at the
            // currentFile
            int transactionCheckNumberOfRecords = 0;
            if (recordType == JournalImpl.PREPARE_RECORD || recordType == JournalImpl.COMMIT_RECORD) {
                if (JournalImpl.isInvalidSize(journalFileSize, wholeFileBuffer.position(), DataConstants.SIZE_INT)) {
                    wholeFileBuffer.position(pos + 1);
                    continue;
                }
                transactionCheckNumberOfRecords = wholeFileBuffer.getInt();
                if (recordType == JournalImpl.PREPARE_RECORD) {
                    if (JournalImpl.isInvalidSize(journalFileSize, wholeFileBuffer.position(), DataConstants.SIZE_INT)) {
                        wholeFileBuffer.position(pos + 1);
                        continue;
                    }
                    // Add the variable size required for preparedTransactions
                    preparedTransactionExtraDataSize = wholeFileBuffer.getInt();
                }
                variableSize = 0;
            }
            int recordSize = JournalImpl.getRecordSize(recordType, file.getJournalVersion());
            // But we avoid buffer overflows by damaged data
            if (JournalImpl.isInvalidSize(journalFileSize, pos, recordSize + variableSize + preparedTransactionExtraDataSize)) {
                // Avoid a buffer overflow caused by damaged data... continue
                // scanning for more pendingTransactions...
                logger.trace("Record at position " + pos + " recordType = " + recordType + " file:" + file.getFile().getFileName() + " recordSize: " + recordSize + " variableSize: " + variableSize + " preparedTransactionExtraDataSize: " + preparedTransactionExtraDataSize + " is corrupted and it is being ignored (II)");
                // If a file has damaged pendingTransactions, we make it a dataFile, and the
                // next reclaiming will fix it
                reader.markAsDataFile(file);
                wholeFileBuffer.position(pos + 1);
                continue;
            }
            int oldPos = wholeFileBuffer.position();
            wholeFileBuffer.position(pos + variableSize + recordSize + preparedTransactionExtraDataSize - DataConstants.SIZE_INT);
            int checkSize = wholeFileBuffer.getInt();
            // checkSize by some sort of calculated hash)
            if (checkSize != variableSize + recordSize + preparedTransactionExtraDataSize) {
                logger.trace("Record at position " + pos + " recordType = " + recordType + " possible transactionID = " + transactionID + " possible recordID = " + recordID + " file:" + file.getFile().getFileName() + " is corrupted and it is being ignored (III)");
                // If a file has damaged pendingTransactions, we make it a dataFile, and the
                // next reclaiming will fix it
                reader.markAsDataFile(file);
                wholeFileBuffer.position(pos + DataConstants.SIZE_BYTE);
                continue;
            }
            wholeFileBuffer.position(oldPos);
            if (logger.isTraceEnabled()) {
                logger.trace("reading " + recordID + ", userRecordType=" + userRecordType + ", compactCount=" + compactCount);
            }
            switch(recordType) {
                case ADD_RECORD:
                    {
                        reader.onReadAddRecord(new RecordInfo(recordID, userRecordType, record, false, compactCount));
                        break;
                    }
                case UPDATE_RECORD:
                    {
                        reader.onReadUpdateRecord(new RecordInfo(recordID, userRecordType, record, true, compactCount));
                        break;
                    }
                case DELETE_RECORD:
                    {
                        reader.onReadDeleteRecord(recordID);
                        break;
                    }
                case ADD_RECORD_TX:
                    {
                        reader.onReadAddRecordTX(transactionID, new RecordInfo(recordID, userRecordType, record, false, compactCount));
                        break;
                    }
                case UPDATE_RECORD_TX:
                    {
                        reader.onReadUpdateRecordTX(transactionID, new RecordInfo(recordID, userRecordType, record, true, compactCount));
                        break;
                    }
                case DELETE_RECORD_TX:
                    {
                        reader.onReadDeleteRecordTX(transactionID, new RecordInfo(recordID, (byte) 0, record, true, compactCount));
                        break;
                    }
                case PREPARE_RECORD:
                    {
                        byte[] extraData = new byte[preparedTransactionExtraDataSize];
                        wholeFileBuffer.get(extraData);
                        reader.onReadPrepareRecord(transactionID, extraData, transactionCheckNumberOfRecords);
                        break;
                    }
                case COMMIT_RECORD:
                    {
                        reader.onReadCommitRecord(transactionID, transactionCheckNumberOfRecords);
                        break;
                    }
                case ROLLBACK_RECORD:
                    {
                        reader.onReadRollbackRecord(transactionID);
                        break;
                    }
                default:
                    {
                        throw new IllegalStateException("Journal " + file.getFile().getFileName() + " is corrupt, invalid record type " + recordType);
                    }
            }
            checkSize = wholeFileBuffer.getInt();
            // not doing what it was supposed to do
            if (checkSize != variableSize + recordSize + preparedTransactionExtraDataSize) {
                throw new IllegalStateException("Internal error on loading file. Position doesn't match with checkSize, file = " + file.getFile() + ", pos = " + pos);
            }
            lastDataPos = wholeFileBuffer.position();
        }
        return lastDataPos;
    } catch (Throwable e) {
        ActiveMQJournalLogger.LOGGER.errorReadingFile(e);
        throw new Exception(e.getMessage(), e);
    } finally {
        if (wholeFileBuffer != null) {
            fileFactory.releaseBuffer(wholeFileBuffer);
        }
        try {
            file.getFile().close();
        } catch (Throwable ignored) {
        }
    }
}
Also used : RecordInfo(org.apache.activemq.artemis.core.journal.RecordInfo) ByteBuffer(java.nio.ByteBuffer) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) IOException(java.io.IOException)

Example 7 with RecordInfo

use of org.apache.activemq.artemis.core.journal.RecordInfo in project activemq-artemis by apache.

the class JournalImpl method load.

/**
 * @param loadManager
 * @param changeData
 * @param replicationSync {@code true} will place
 * @return
 * @throws Exception
 */
private synchronized JournalLoadInformation load(final LoaderCallback loadManager, final boolean changeData, final JournalState replicationSync) throws Exception {
    if (state == JournalState.STOPPED || state == JournalState.LOADED) {
        throw new IllegalStateException("Journal " + this + " must be in " + JournalState.STARTED + " state, was " + state);
    }
    if (state == replicationSync) {
        throw new IllegalStateException("Journal cannot be in state " + JournalState.STARTED);
    }
    checkControlFile();
    records.clear();
    filesRepository.clear();
    transactions.clear();
    currentFile = null;
    final Map<Long, TransactionHolder> loadTransactions = new LinkedHashMap<>();
    final List<JournalFile> orderedFiles = orderFiles();
    filesRepository.calculateNextfileID(orderedFiles);
    int lastDataPos = JournalImpl.SIZE_HEADER;
    // AtomicLong is used only as a reference, not as an Atomic value
    final AtomicLong maxID = new AtomicLong(-1);
    for (final JournalFile file : orderedFiles) {
        logger.trace("Loading file " + file.getFile().getFileName());
        final AtomicBoolean hasData = new AtomicBoolean(false);
        int resultLastPost = JournalImpl.readJournalFile(fileFactory, file, new JournalReaderCallback() {

            private void checkID(final long id) {
                if (id > maxID.longValue()) {
                    maxID.set(id);
                }
            }

            @Override
            public void onReadAddRecord(final RecordInfo info) throws Exception {
                checkID(info.id);
                hasData.set(true);
                loadManager.addRecord(info);
                records.put(info.id, new JournalRecord(file, info.data.length + JournalImpl.SIZE_ADD_RECORD + 1));
            }

            @Override
            public void onReadUpdateRecord(final RecordInfo info) throws Exception {
                checkID(info.id);
                hasData.set(true);
                loadManager.updateRecord(info);
                JournalRecord posFiles = records.get(info.id);
                if (posFiles != null) {
                    // It's legal for this to be null. The file(s) with the may
                    // have been deleted
                    // just leaving some updates in this file
                    // +1 = compact
                    posFiles.addUpdateFile(file, info.data.length + JournalImpl.SIZE_ADD_RECORD + 1);
                // count
                }
            }

            @Override
            public void onReadDeleteRecord(final long recordID) throws Exception {
                hasData.set(true);
                loadManager.deleteRecord(recordID);
                JournalRecord posFiles = records.remove(recordID);
                if (posFiles != null) {
                    posFiles.delete(file);
                }
            }

            @Override
            public void onReadUpdateRecordTX(final long transactionID, final RecordInfo info) throws Exception {
                onReadAddRecordTX(transactionID, info);
            }

            @Override
            public void onReadAddRecordTX(final long transactionID, final RecordInfo info) throws Exception {
                checkID(info.id);
                hasData.set(true);
                TransactionHolder tx = loadTransactions.get(transactionID);
                if (tx == null) {
                    tx = new TransactionHolder(transactionID);
                    loadTransactions.put(transactionID, tx);
                }
                tx.recordInfos.add(info);
                JournalTransaction tnp = transactions.get(transactionID);
                if (tnp == null) {
                    tnp = new JournalTransaction(transactionID, JournalImpl.this);
                    transactions.put(transactionID, tnp);
                }
                // +1 = compact
                tnp.addPositive(file, info.id, info.data.length + JournalImpl.SIZE_ADD_RECORD_TX + 1);
            // count
            }

            @Override
            public void onReadDeleteRecordTX(final long transactionID, final RecordInfo info) throws Exception {
                hasData.set(true);
                TransactionHolder tx = loadTransactions.get(transactionID);
                if (tx == null) {
                    tx = new TransactionHolder(transactionID);
                    loadTransactions.put(transactionID, tx);
                }
                tx.recordsToDelete.add(info);
                JournalTransaction tnp = transactions.get(transactionID);
                if (tnp == null) {
                    tnp = new JournalTransaction(transactionID, JournalImpl.this);
                    transactions.put(transactionID, tnp);
                }
                tnp.addNegative(file, info.id);
            }

            @Override
            public void onReadPrepareRecord(final long transactionID, final byte[] extraData, final int numberOfRecords) throws Exception {
                hasData.set(true);
                TransactionHolder tx = loadTransactions.get(transactionID);
                if (tx == null) {
                    // The user could choose to prepare empty transactions
                    tx = new TransactionHolder(transactionID);
                    loadTransactions.put(transactionID, tx);
                }
                tx.prepared = true;
                tx.extraData = extraData;
                JournalTransaction journalTransaction = transactions.get(transactionID);
                if (journalTransaction == null) {
                    journalTransaction = new JournalTransaction(transactionID, JournalImpl.this);
                    transactions.put(transactionID, journalTransaction);
                }
                boolean healthy = checkTransactionHealth(file, journalTransaction, orderedFiles, numberOfRecords);
                if (healthy) {
                    journalTransaction.prepare(file);
                } else {
                    ActiveMQJournalLogger.LOGGER.preparedTXIncomplete(transactionID);
                    tx.invalid = true;
                }
            }

            @Override
            public void onReadCommitRecord(final long transactionID, final int numberOfRecords) throws Exception {
                TransactionHolder tx = loadTransactions.remove(transactionID);
                // ignore this
                if (tx != null) {
                    JournalTransaction journalTransaction = transactions.remove(transactionID);
                    if (journalTransaction == null) {
                        throw new IllegalStateException("Cannot find tx " + transactionID);
                    }
                    boolean healthy = checkTransactionHealth(file, journalTransaction, orderedFiles, numberOfRecords);
                    if (healthy) {
                        for (RecordInfo txRecord : tx.recordInfos) {
                            if (txRecord.isUpdate) {
                                loadManager.updateRecord(txRecord);
                            } else {
                                loadManager.addRecord(txRecord);
                            }
                        }
                        for (RecordInfo deleteValue : tx.recordsToDelete) {
                            loadManager.deleteRecord(deleteValue.id);
                        }
                        journalTransaction.commit(file);
                    } else {
                        ActiveMQJournalLogger.LOGGER.txMissingElements(transactionID);
                        journalTransaction.forget();
                    }
                    hasData.set(true);
                }
            }

            @Override
            public void onReadRollbackRecord(final long transactionID) throws Exception {
                TransactionHolder tx = loadTransactions.remove(transactionID);
                // point
                if (tx != null) {
                    JournalTransaction tnp = transactions.remove(transactionID);
                    if (tnp == null) {
                        throw new IllegalStateException("Cannot find tx " + transactionID);
                    }
                    // There is no need to validate summaries/holes on
                    // Rollbacks.. We will ignore the data anyway.
                    tnp.rollback(file);
                    hasData.set(true);
                }
            }

            @Override
            public void markAsDataFile(final JournalFile file) {
                hasData.set(true);
            }
        });
        if (hasData.get()) {
            lastDataPos = resultLastPost;
            filesRepository.addDataFileOnBottom(file);
        } else {
            if (changeData) {
                // Empty dataFiles with no data
                filesRepository.addFreeFile(file, false, false);
            }
        }
    }
    if (replicationSync == JournalState.SYNCING) {
        assert filesRepository.getDataFiles().isEmpty();
        setJournalState(JournalState.SYNCING);
        return new JournalLoadInformation(0, -1);
    }
    setUpCurrentFile(lastDataPos);
    setJournalState(JournalState.LOADED);
    for (TransactionHolder transaction : loadTransactions.values()) {
        if ((!transaction.prepared || transaction.invalid) && replicationSync != JournalState.SYNCING_UP_TO_DATE) {
            ActiveMQJournalLogger.LOGGER.uncomittedTxFound(transaction.transactionID);
            if (changeData) {
                // I append a rollback record here, because otherwise compacting will be throwing messages because of unknown transactions
                this.appendRollbackRecord(transaction.transactionID, false);
            }
            loadManager.failedTransaction(transaction.transactionID, transaction.recordInfos, transaction.recordsToDelete);
        } else {
            for (RecordInfo info : transaction.recordInfos) {
                if (info.id > maxID.get()) {
                    maxID.set(info.id);
                }
            }
            PreparedTransactionInfo info = new PreparedTransactionInfo(transaction.transactionID, transaction.extraData);
            info.getRecords().addAll(transaction.recordInfos);
            info.getRecordsToDelete().addAll(transaction.recordsToDelete);
            loadManager.addPreparedTransaction(info);
        }
    }
    checkReclaimStatus();
    return new JournalLoadInformation(records.size(), maxID.longValue());
}
Also used : PreparedTransactionInfo(org.apache.activemq.artemis.core.journal.PreparedTransactionInfo) RecordInfo(org.apache.activemq.artemis.core.journal.RecordInfo) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) IOException(java.io.IOException) LinkedHashMap(java.util.LinkedHashMap) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) JournalLoadInformation(org.apache.activemq.artemis.core.journal.JournalLoadInformation) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicLong(java.util.concurrent.atomic.AtomicLong)

Example 8 with RecordInfo

use of org.apache.activemq.artemis.core.journal.RecordInfo in project activemq-artemis by apache.

the class JDBCJournalImpl method addTxRecord.

private synchronized void addTxRecord(JDBCJournalRecord record) {
    if (logger.isTraceEnabled()) {
        logger.trace("addTxRecord " + record + ", started=" + started + ", failed=" + failed);
    }
    checkStatus();
    TransactionHolder txHolder = transactions.get(record.getTxId());
    if (txHolder == null) {
        txHolder = new TransactionHolder(record.getTxId());
        transactions.put(record.getTxId(), txHolder);
    }
    // We actually only need the record ID in this instance.
    if (record.isTransactional()) {
        RecordInfo info = new RecordInfo(record.getId(), record.getRecordType(), new byte[0], record.isUpdate(), record.getCompactCount());
        if (record.getRecordType() == JDBCJournalRecord.DELETE_RECORD_TX) {
            txHolder.recordsToDelete.add(info);
        } else {
            txHolder.recordInfos.add(info);
        }
    } else {
        txHolder.prepared = true;
    }
}
Also used : RecordInfo(org.apache.activemq.artemis.core.journal.RecordInfo)

Example 9 with RecordInfo

use of org.apache.activemq.artemis.core.journal.RecordInfo in project activemq-artemis by apache.

the class JDBCJournalImpl method sync.

public synchronized int sync() {
    List<JDBCJournalRecord> recordRef;
    synchronized (records) {
        if (records.isEmpty()) {
            return 0;
        }
        recordRef = new ArrayList<>(records);
        records.clear();
    }
    if (!started || failed.get()) {
        executeCallbacks(recordRef, false);
        return 0;
    }
    // We keep a list of deleted records and committed tx (used for cleaning up old transaction data).
    List<Long> deletedRecords = new ArrayList<>();
    List<Long> committedTransactions = new ArrayList<>();
    TransactionHolder holder;
    try {
        connection.setAutoCommit(false);
        for (JDBCJournalRecord record : recordRef) {
            if (logger.isTraceEnabled()) {
                logger.trace("sync::preparing JDBC statement for " + record);
            }
            switch(record.getRecordType()) {
                case JDBCJournalRecord.DELETE_RECORD:
                    // Standard SQL Delete Record, Non transactional delete
                    deletedRecords.add(record.getId());
                    record.writeDeleteRecord(deleteJournalRecords);
                    break;
                case JDBCJournalRecord.ROLLBACK_RECORD:
                    // Roll back we remove all records associated with this TX ID.  This query is always performed last.
                    deleteJournalTxRecords.setLong(1, record.getTxId());
                    deleteJournalTxRecords.addBatch();
                    break;
                case JDBCJournalRecord.COMMIT_RECORD:
                    // We perform all the deletes and add the commit record in the same Database TX
                    holder = transactions.get(record.getTxId());
                    for (RecordInfo info : holder.recordsToDelete) {
                        deletedRecords.add(record.getId());
                        deleteJournalRecords.setLong(1, info.id);
                        deleteJournalRecords.addBatch();
                    }
                    record.writeRecord(insertJournalRecords);
                    committedTransactions.add(record.getTxId());
                    break;
                default:
                    // Default we add a new record to the DB
                    record.writeRecord(insertJournalRecords);
                    break;
            }
        }
        insertJournalRecords.executeBatch();
        deleteJournalRecords.executeBatch();
        deleteJournalTxRecords.executeBatch();
        connection.commit();
        if (logger.isTraceEnabled()) {
            logger.trace("JDBC commit worked");
        }
        cleanupTxRecords(deletedRecords, committedTransactions);
        executeCallbacks(recordRef, true);
        return recordRef.size();
    } catch (Exception e) {
        handleException(recordRef, e);
        return 0;
    }
}
Also used : RecordInfo(org.apache.activemq.artemis.core.journal.RecordInfo) AtomicLong(java.util.concurrent.atomic.AtomicLong) ArrayList(java.util.ArrayList) SQLException(java.sql.SQLException)

Example 10 with RecordInfo

use of org.apache.activemq.artemis.core.journal.RecordInfo in project activemq-artemis by apache.

the class JDBCJournalLoaderCallbackTest method testAddDeleteRecord.

@Test
public void testAddDeleteRecord() throws Exception {
    ArrayList<RecordInfo> committedRecords = new ArrayList<>();
    ArrayList<PreparedTransactionInfo> preparedTransactions = new ArrayList<>();
    TransactionFailureCallback failureCallback = null;
    boolean fixBadTX = false;
    JDBCJournalLoaderCallback cb = new JDBCJournalLoaderCallback(committedRecords, preparedTransactions, failureCallback, fixBadTX);
    RecordInfo record = new RecordInfo(42, (byte) 0, null, false, (short) 0);
    cb.addRecord(record);
    assertEquals(1, committedRecords.size());
    assertTrue(committedRecords.contains(record));
    cb.deleteRecord(record.id);
    assertTrue(committedRecords.isEmpty());
}
Also used : PreparedTransactionInfo(org.apache.activemq.artemis.core.journal.PreparedTransactionInfo) RecordInfo(org.apache.activemq.artemis.core.journal.RecordInfo) ArrayList(java.util.ArrayList) TransactionFailureCallback(org.apache.activemq.artemis.core.journal.TransactionFailureCallback) Test(org.junit.Test)

Aggregations

RecordInfo (org.apache.activemq.artemis.core.journal.RecordInfo)65 PreparedTransactionInfo (org.apache.activemq.artemis.core.journal.PreparedTransactionInfo)33 ArrayList (java.util.ArrayList)22 JournalImpl (org.apache.activemq.artemis.core.journal.impl.JournalImpl)20 Test (org.junit.Test)20 LinkedList (java.util.LinkedList)15 ActiveMQBuffer (org.apache.activemq.artemis.api.core.ActiveMQBuffer)15 HashMap (java.util.HashMap)10 NIOSequentialFileFactory (org.apache.activemq.artemis.core.io.nio.NIOSequentialFileFactory)10 TransactionFailureCallback (org.apache.activemq.artemis.core.journal.TransactionFailureCallback)9 File (java.io.File)7 Journal (org.apache.activemq.artemis.core.journal.Journal)7 SimpleEncoding (org.apache.activemq.artemis.tests.unit.core.journal.impl.fakes.SimpleEncoding)7 List (java.util.List)6 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)6 SequentialFileFactory (org.apache.activemq.artemis.core.io.SequentialFileFactory)6 PageCountRecordInc (org.apache.activemq.artemis.core.persistence.impl.journal.codec.PageCountRecordInc)6 IOException (java.io.IOException)5 HashSet (java.util.HashSet)5 LinkedHashMap (java.util.LinkedHashMap)5