use of org.apache.activemq.artemis.core.journal.impl.JournalFile in project activemq-artemis by apache.
the class DescribeJournal method describeJournal.
/**
* @param fileFactory
* @param journal
* @throws Exception
*/
private static DescribeJournal describeJournal(SequentialFileFactory fileFactory, JournalImpl journal, final File path, PrintStream out, boolean safe) throws Exception {
List<JournalFile> files = journal.orderFiles();
final Map<Long, PageSubscriptionCounterImpl> counters = new HashMap<>();
out.println("Journal path: " + path);
for (JournalFile file : files) {
out.println("#" + file + " (size=" + file.getFile().size() + ")");
JournalImpl.readJournalFile(fileFactory, file, new JournalReaderCallback() {
@Override
public void onReadUpdateRecordTX(final long transactionID, final RecordInfo recordInfo) throws Exception {
out.println("operation@UpdateTX;txID=" + transactionID + "," + describeRecord(recordInfo, safe));
checkRecordCounter(recordInfo);
}
@Override
public void onReadUpdateRecord(final RecordInfo recordInfo) throws Exception {
out.println("operation@Update;" + describeRecord(recordInfo, safe));
checkRecordCounter(recordInfo);
}
@Override
public void onReadRollbackRecord(final long transactionID) throws Exception {
out.println("operation@Rollback;txID=" + transactionID);
}
@Override
public void onReadPrepareRecord(final long transactionID, final byte[] extraData, final int numberOfRecords) throws Exception {
out.println("operation@Prepare,txID=" + transactionID + ",numberOfRecords=" + numberOfRecords + ",extraData=" + encode(extraData) + ", xid=" + toXid(extraData));
}
@Override
public void onReadDeleteRecordTX(final long transactionID, final RecordInfo recordInfo) throws Exception {
out.println("operation@DeleteRecordTX;txID=" + transactionID + "," + describeRecord(recordInfo, safe));
}
@Override
public void onReadDeleteRecord(final long recordID) throws Exception {
out.println("operation@DeleteRecord;recordID=" + recordID);
}
@Override
public void onReadCommitRecord(final long transactionID, final int numberOfRecords) throws Exception {
out.println("operation@Commit;txID=" + transactionID + ",numberOfRecords=" + numberOfRecords);
}
@Override
public void onReadAddRecordTX(final long transactionID, final RecordInfo recordInfo) throws Exception {
out.println("operation@AddRecordTX;txID=" + transactionID + "," + describeRecord(recordInfo, safe));
}
@Override
public void onReadAddRecord(final RecordInfo recordInfo) throws Exception {
out.println("operation@AddRecord;" + describeRecord(recordInfo, safe));
}
@Override
public void markAsDataFile(final JournalFile file1) {
}
public void checkRecordCounter(RecordInfo info) {
if (info.getUserRecordType() == JournalRecordIds.PAGE_CURSOR_COUNTER_VALUE) {
PageCountRecord encoding = (PageCountRecord) newObjectEncoding(info);
long queueIDForCounter = encoding.getQueueID();
PageSubscriptionCounterImpl subsCounter = lookupCounter(counters, queueIDForCounter);
if (subsCounter.getValue() != 0 && subsCounter.getValue() != encoding.getValue()) {
out.println("####### Counter replace wrongly on queue " + queueIDForCounter + " oldValue=" + subsCounter.getValue() + " newValue=" + encoding.getValue());
}
subsCounter.loadValue(info.id, encoding.getValue(), encoding.getPersistentSize());
subsCounter.processReload();
out.print("#Counter queue " + queueIDForCounter + " value=" + subsCounter.getValue() + " persistentSize=" + subsCounter.getPersistentSize() + ", result=" + subsCounter.getValue());
if (subsCounter.getValue() < 0) {
out.println(" #NegativeCounter!!!!");
} else {
out.println();
}
out.println();
} else if (info.getUserRecordType() == JournalRecordIds.PAGE_CURSOR_COUNTER_INC) {
PageCountRecordInc encoding = (PageCountRecordInc) newObjectEncoding(info);
long queueIDForCounter = encoding.getQueueID();
PageSubscriptionCounterImpl subsCounter = lookupCounter(counters, queueIDForCounter);
subsCounter.loadInc(info.id, encoding.getValue(), encoding.getPersistentSize());
subsCounter.processReload();
out.print("#Counter queue " + queueIDForCounter + " value=" + subsCounter.getValue() + " persistentSize=" + subsCounter.getPersistentSize() + " increased by " + encoding.getValue());
if (subsCounter.getValue() < 0) {
out.println(" #NegativeCounter!!!!");
} else {
out.println();
}
out.println();
}
}
});
}
out.println();
if (counters.size() != 0) {
out.println("#Counters during initial load:");
printCounters(out, counters);
}
return printSurvivingRecords(journal, out, safe);
}
use of org.apache.activemq.artemis.core.journal.impl.JournalFile in project activemq-artemis by apache.
the class JournalStorageManager method startReplication.
@Override
public void startReplication(ReplicationManager replicationManager, PagingManager pagingManager, String nodeID, final boolean autoFailBack, long initialReplicationSyncTimeout) throws Exception {
if (!started) {
throw new IllegalStateException("JournalStorageManager must be started...");
}
assert replicationManager != null;
if (!(messageJournal instanceof JournalImpl) || !(bindingsJournal instanceof JournalImpl)) {
throw ActiveMQMessageBundle.BUNDLE.notJournalImpl();
}
// We first do a compact without any locks, to avoid copying unnecessary data over the network.
// We do this without holding the storageManager lock, so the journal stays open while compact is being done
originalMessageJournal.scheduleCompactAndBlock(-1);
originalBindingsJournal.scheduleCompactAndBlock(-1);
JournalFile[] messageFiles = null;
JournalFile[] bindingsFiles = null;
// We get a picture of the current sitaution on the large messages
// and we send the current messages while more state is coming
Map<Long, Pair<String, Long>> pendingLargeMessages = null;
try {
Map<SimpleString, Collection<Integer>> pageFilesToSync;
storageManagerLock.writeLock().lock();
try {
if (isReplicated())
throw new ActiveMQIllegalStateException("already replicating");
replicator = replicationManager;
if (!((JournalImpl) originalMessageJournal).flushAppendExecutor(10, TimeUnit.SECONDS)) {
throw new Exception("Live message journal is busy");
}
if (!((JournalImpl) originalBindingsJournal).flushAppendExecutor(10, TimeUnit.SECONDS)) {
throw new Exception("Live bindings journal is busy");
}
// Establishes lock
originalMessageJournal.synchronizationLock();
originalBindingsJournal.synchronizationLock();
try {
originalBindingsJournal.replicationSyncPreserveOldFiles();
originalMessageJournal.replicationSyncPreserveOldFiles();
pagingManager.lock();
try {
pagingManager.disableCleanup();
messageFiles = prepareJournalForCopy(originalMessageJournal, JournalContent.MESSAGES, nodeID, autoFailBack);
bindingsFiles = prepareJournalForCopy(originalBindingsJournal, JournalContent.BINDINGS, nodeID, autoFailBack);
pageFilesToSync = getPageInformationForSync(pagingManager);
pendingLargeMessages = recoverPendingLargeMessages();
} finally {
pagingManager.unlock();
}
} finally {
originalMessageJournal.synchronizationUnlock();
originalBindingsJournal.synchronizationUnlock();
}
bindingsJournal = new ReplicatedJournal(((byte) 0), originalBindingsJournal, replicator);
messageJournal = new ReplicatedJournal((byte) 1, originalMessageJournal, replicator);
// We need to send the list while locking otherwise part of the body might get sent too soon
// it will send a list of IDs that we are allocating
replicator.sendLargeMessageIdListMessage(pendingLargeMessages);
} finally {
storageManagerLock.writeLock().unlock();
}
sendJournalFile(messageFiles, JournalContent.MESSAGES);
sendJournalFile(bindingsFiles, JournalContent.BINDINGS);
sendLargeMessageFiles(pendingLargeMessages);
sendPagesToBackup(pageFilesToSync, pagingManager);
storageManagerLock.writeLock().lock();
try {
if (replicator != null) {
replicator.sendSynchronizationDone(nodeID, initialReplicationSyncTimeout);
performCachedLargeMessageDeletes();
}
} finally {
storageManagerLock.writeLock().unlock();
}
} catch (Exception e) {
ActiveMQServerLogger.LOGGER.unableToStartReplication(e);
stopReplication();
throw e;
} finally {
// Re-enable compact and reclaim of journal files
originalBindingsJournal.replicationSyncFinished();
originalMessageJournal.replicationSyncFinished();
pagingManager.resumeCleanup();
}
}
use of org.apache.activemq.artemis.core.journal.impl.JournalFile in project activemq-artemis by apache.
the class BackupSyncJournalTest method getFileIds.
private Set<Pair<Long, Integer>> getFileIds(JournalImpl journal) {
Set<Pair<Long, Integer>> results = new HashSet<>();
for (JournalFile jf : journal.getDataFiles()) {
results.add(getPair(jf));
}
results.add(getPair(journal.getCurrentFile()));
return results;
}
use of org.apache.activemq.artemis.core.journal.impl.JournalFile in project activemq-artemis by apache.
the class EncodeJournal method exportJournal.
public static void exportJournal(final String directory, final String journalPrefix, final String journalSuffix, final int minFiles, final int fileSize, final PrintStream out) throws Exception {
NIOSequentialFileFactory nio = new NIOSequentialFileFactory(new File(directory), null, 1);
JournalImpl journal = new JournalImpl(fileSize, minFiles, minFiles, 0, 0, nio, journalPrefix, journalSuffix, 1);
List<JournalFile> files = journal.orderFiles();
for (JournalFile file : files) {
out.println("#File," + file);
exportJournalFile(out, nio, file);
}
}
use of org.apache.activemq.artemis.core.journal.impl.JournalFile in project activemq-artemis by apache.
the class NIOJournalCompactTest method testControlFile.
// General tests
// =============
@Test
public void testControlFile() throws Exception {
ArrayList<JournalFile> dataFiles = new ArrayList<>();
for (int i = 0; i < 5; i++) {
SequentialFile file = fileFactory.createSequentialFile("file-" + i + ".tst");
dataFiles.add(new JournalFileImpl(file, 0, JournalImpl.FORMAT_VERSION));
}
ArrayList<JournalFile> newFiles = new ArrayList<>();
for (int i = 0; i < 3; i++) {
SequentialFile file = fileFactory.createSequentialFile("file-" + i + ".tst.new");
newFiles.add(new JournalFileImpl(file, 0, JournalImpl.FORMAT_VERSION));
}
ArrayList<Pair<String, String>> renames = new ArrayList<>();
renames.add(new Pair<>("a", "b"));
renames.add(new Pair<>("c", "d"));
AbstractJournalUpdateTask.writeControlFile(fileFactory, dataFiles, newFiles, renames);
ArrayList<String> strDataFiles = new ArrayList<>();
ArrayList<String> strNewFiles = new ArrayList<>();
ArrayList<Pair<String, String>> renamesRead = new ArrayList<>();
Assert.assertNotNull(JournalCompactor.readControlFile(fileFactory, strDataFiles, strNewFiles, renamesRead));
Assert.assertEquals(dataFiles.size(), strDataFiles.size());
Assert.assertEquals(newFiles.size(), strNewFiles.size());
Assert.assertEquals(renames.size(), renamesRead.size());
Iterator<String> iterDataFiles = strDataFiles.iterator();
for (JournalFile file : dataFiles) {
Assert.assertEquals(file.getFile().getFileName(), iterDataFiles.next());
}
Assert.assertFalse(iterDataFiles.hasNext());
Iterator<String> iterNewFiles = strNewFiles.iterator();
for (JournalFile file : newFiles) {
Assert.assertEquals(file.getFile().getFileName(), iterNewFiles.next());
}
Assert.assertFalse(iterNewFiles.hasNext());
Iterator<Pair<String, String>> iterRename = renames.iterator();
for (Pair<String, String> rename : renamesRead) {
Pair<String, String> original = iterRename.next();
Assert.assertEquals(original.getA(), rename.getA());
Assert.assertEquals(original.getB(), rename.getB());
}
Assert.assertFalse(iterNewFiles.hasNext());
}
Aggregations