use of org.apache.activemq.artemis.core.journal.JournalLoadInformation in project activemq-artemis by apache.
the class JDBCJournalImpl method load.
@Override
public synchronized JournalLoadInformation load(LoaderCallback reloadManager) {
JournalLoadInformation jli = new JournalLoadInformation();
JDBCJournalReaderCallback jrc = new JDBCJournalReaderCallback(reloadManager);
JDBCJournalRecord r;
try (ResultSet rs = selectJournalRecords.executeQuery()) {
int noRecords = 0;
while (rs.next()) {
r = JDBCJournalRecord.readRecord(rs);
switch(r.getRecordType()) {
case JDBCJournalRecord.ADD_RECORD:
jrc.onReadAddRecord(r.toRecordInfo());
break;
case JDBCJournalRecord.UPDATE_RECORD:
jrc.onReadUpdateRecord(r.toRecordInfo());
break;
case JDBCJournalRecord.DELETE_RECORD:
jrc.onReadDeleteRecord(r.getId());
break;
case JDBCJournalRecord.ADD_RECORD_TX:
jrc.onReadAddRecordTX(r.getTxId(), r.toRecordInfo());
break;
case JDBCJournalRecord.UPDATE_RECORD_TX:
jrc.onReadUpdateRecordTX(r.getTxId(), r.toRecordInfo());
break;
case JDBCJournalRecord.DELETE_RECORD_TX:
jrc.onReadDeleteRecordTX(r.getTxId(), r.toRecordInfo());
break;
case JDBCJournalRecord.PREPARE_RECORD:
jrc.onReadPrepareRecord(r.getTxId(), r.getTxDataAsByteArray(), r.getTxCheckNoRecords());
break;
case JDBCJournalRecord.COMMIT_RECORD:
jrc.onReadCommitRecord(r.getTxId(), r.getTxCheckNoRecords());
break;
case JDBCJournalRecord.ROLLBACK_RECORD:
jrc.onReadRollbackRecord(r.getTxId());
break;
default:
throw new Exception("Error Reading Journal, Unknown Record Type: " + r.getRecordType());
}
noRecords++;
if (r.getSeq() > seq.longValue()) {
seq.set(r.getSeq());
}
}
jrc.checkPreparedTx();
jli.setMaxID(((JDBCJournalLoaderCallback) reloadManager).getMaxId());
jli.setNumberOfRecords(noRecords);
transactions = jrc.getTransactions();
} catch (Throwable e) {
handleException(null, e);
}
return jli;
}
use of org.apache.activemq.artemis.core.journal.JournalLoadInformation in project activemq-artemis by apache.
the class JournalImpl method load.
/**
* @see JournalImpl#load(LoaderCallback)
*/
@Override
public synchronized JournalLoadInformation load(final List<RecordInfo> committedRecords, final List<PreparedTransactionInfo> preparedTransactions, final TransactionFailureCallback failureCallback, final boolean fixBadTX) throws Exception {
final Set<Long> recordsToDelete = new HashSet<>();
// ArrayList was taking too long to delete elements on checkDeleteSize
final List<RecordInfo> records = new LinkedList<>();
final int DELETE_FLUSH = 20000;
JournalLoadInformation info = load(new LoaderCallback() {
Runtime runtime = Runtime.getRuntime();
private void checkDeleteSize() {
// HORNETQ-482 - Flush deletes only if memory is critical
if (recordsToDelete.size() > DELETE_FLUSH && runtime.freeMemory() < runtime.maxMemory() * 0.2) {
ActiveMQJournalLogger.LOGGER.debug("Flushing deletes during loading, deleteCount = " + recordsToDelete.size());
// Clean up when the list is too large, or it won't be possible to load large sets of files
// Done as part of JBMESSAGING-1678
Iterator<RecordInfo> iter = records.iterator();
while (iter.hasNext()) {
RecordInfo record = iter.next();
if (recordsToDelete.contains(record.id)) {
iter.remove();
}
}
recordsToDelete.clear();
ActiveMQJournalLogger.LOGGER.debug("flush delete done");
}
}
@Override
public void addPreparedTransaction(final PreparedTransactionInfo preparedTransaction) {
preparedTransactions.add(preparedTransaction);
checkDeleteSize();
}
@Override
public void addRecord(final RecordInfo info) {
records.add(info);
checkDeleteSize();
}
@Override
public void updateRecord(final RecordInfo info) {
records.add(info);
checkDeleteSize();
}
@Override
public void deleteRecord(final long id) {
recordsToDelete.add(id);
checkDeleteSize();
}
@Override
public void failedTransaction(final long transactionID, final List<RecordInfo> records, final List<RecordInfo> recordsToDelete) {
if (failureCallback != null) {
failureCallback.failedTransaction(transactionID, records, recordsToDelete);
}
}
}, fixBadTX, null);
for (RecordInfo record : records) {
if (!recordsToDelete.contains(record.id)) {
committedRecords.add(record);
}
}
return info;
}
use of org.apache.activemq.artemis.core.journal.JournalLoadInformation in project activemq-artemis by apache.
the class ActiveMQServerImpl method initialisePart2.
/*
* Load the data, and start remoting service so clients can connect
*/
synchronized void initialisePart2(boolean scalingDown) throws Exception {
if (state == SERVER_STATE.STOPPED || state == SERVER_STATE.STOPPING) {
return;
}
pagingManager.reloadStores();
JournalLoadInformation[] journalInfo = loadJournals();
final ServerInfo dumper = new ServerInfo(this, pagingManager);
long dumpInfoInterval = configuration.getServerDumpInterval();
if (dumpInfoInterval > 0) {
scheduledPool.scheduleWithFixedDelay(new Runnable() {
@Override
public void run() {
ActiveMQServerLogger.LOGGER.dumpServerInfo(dumper.dump());
}
}, 0, dumpInfoInterval, TimeUnit.MILLISECONDS);
}
// Deploy the rest of the stuff
// Deploy predefined addresses
deployAddressesFromConfiguration();
// Deploy any predefined queues
deployQueuesFromConfiguration();
// Undeploy any addresses and queues not in config
undeployAddressesAndQueueNotInConfiguration();
// We need to call this here, this gives any dependent server a chance to deploy its own addresses
// this needs to be done before clustering is fully activated
callActivateCallbacks();
checkForPotentialOOMEInAddressConfiguration();
if (!scalingDown) {
// Deploy any pre-defined diverts
deployDiverts();
if (groupingHandler != null) {
groupingHandler.start();
}
if (groupingHandler != null && groupingHandler instanceof LocalGroupingHandler) {
clusterManager.start();
groupingHandler.awaitBindings();
remotingService.start();
} else {
remotingService.start();
clusterManager.start();
}
if (nodeManager.getNodeId() == null) {
throw ActiveMQMessageBundle.BUNDLE.nodeIdNull();
}
// We can only do this after everything is started otherwise we may get nasty races with expired messages
postOffice.startExpiryScanner();
}
if (configuration.getMaxDiskUsage() != -1) {
try {
injectMonitor(new FileStoreMonitor(getScheduledPool(), executorFactory.getExecutor(), configuration.getDiskScanPeriod(), TimeUnit.MILLISECONDS, configuration.getMaxDiskUsage() / 100f, shutdownOnCriticalIO));
} catch (Exception e) {
ActiveMQServerLogger.LOGGER.unableToInjectMonitor(e);
}
}
}
Aggregations