use of org.apache.activemq.artemis.core.persistence.impl.journal.DescribeJournal.ReferenceDescribe in project activemq-artemis by apache.
the class XmlDataExporter method removeAcked.
/**
* Go back through the messages and message refs we found in the journal and remove the ones that have been acked.
*
* @param acks the list of ack records we got from the journal
*/
private void removeAcked(ArrayList<RecordInfo> acks) {
for (RecordInfo info : acks) {
AckDescribe ack = (AckDescribe) DescribeJournal.newObjectEncoding(info, null);
HashMap<Long, ReferenceDescribe> referenceDescribeHashMap = messageRefs.get(info.id);
referenceDescribeHashMap.remove(ack.refEncoding.queueID);
if (referenceDescribeHashMap.size() == 0) {
messages.remove(info.id);
messageRefs.remove(info.id);
}
}
}
use of org.apache.activemq.artemis.core.persistence.impl.journal.DescribeJournal.ReferenceDescribe in project activemq-artemis by apache.
the class PagingTest method testDeleteQueueRestart.
@Test
public void testDeleteQueueRestart() throws Exception {
clearDataRecreateServerDirs();
// disable compact
Configuration config = createDefaultInVMConfig().setJournalDirectory(getJournalDir()).setJournalSyncNonTransactional(false).setJournalCompactMinFiles(0);
ActiveMQServer server = createServer(true, config, PagingTest.PAGE_SIZE, PagingTest.PAGE_MAX);
server.start();
final int numberOfMessages = 5000;
locator = createInVMNonHALocator().setConsumerWindowSize(10 * 1024 * 1024).setBlockOnNonDurableSend(true).setBlockOnDurableSend(true).setBlockOnAcknowledge(true);
SimpleString QUEUE2 = ADDRESS.concat("-2");
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
session.createQueue(PagingTest.ADDRESS, QUEUE2, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
// This is just to hold some messages as being delivered
ClientConsumerInternal cons = (ClientConsumerInternal) session.createConsumer(ADDRESS);
ClientConsumerInternal cons2 = (ClientConsumerInternal) session.createConsumer(QUEUE2);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++) {
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++) {
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
producer.send(message);
if (i % 1000 == 0) {
session.commit();
}
}
session.commit();
producer.close();
session.start();
long timeout = System.currentTimeMillis() + 30000;
// I want the buffer full to make sure there are pending messages on the server's side
while (System.currentTimeMillis() < timeout && (cons.getBufferSize() < 1000 || cons2.getBufferSize() < 1000)) {
System.out.println("cons1 buffer = " + cons.getBufferSize() + ", cons2 buffer = " + cons2.getBufferSize());
Thread.sleep(100);
}
assertTrue(cons.getBufferSize() >= 1000);
assertTrue(cons2.getBufferSize() >= 1000);
session.close();
Queue queue = server.locateQueue(QUEUE2);
long deletedQueueID = queue.getID();
server.destroyQueue(QUEUE2);
sf.close();
locator.close();
locator = null;
sf = null;
server.stop();
final HashMap<Integer, AtomicInteger> recordsType = countJournal(config);
for (Map.Entry<Integer, AtomicInteger> entry : recordsType.entrySet()) {
System.out.println(entry.getKey() + "=" + entry.getValue());
}
assertNull("The system is acking page records instead of just delete data", recordsType.get(new Integer(JournalRecordIds.ACKNOWLEDGE_CURSOR)));
Pair<List<RecordInfo>, List<PreparedTransactionInfo>> journalData = loadMessageJournal(config);
HashSet<Long> deletedQueueReferences = new HashSet<>();
for (RecordInfo info : journalData.getA()) {
if (info.getUserRecordType() == JournalRecordIds.ADD_REF) {
DescribeJournal.ReferenceDescribe ref = (ReferenceDescribe) DescribeJournal.newObjectEncoding(info);
if (ref.refEncoding.queueID == deletedQueueID) {
deletedQueueReferences.add(new Long(info.id));
}
} else if (info.getUserRecordType() == JournalRecordIds.ACKNOWLEDGE_REF) {
AckDescribe ref = (AckDescribe) DescribeJournal.newObjectEncoding(info);
if (ref.refEncoding.queueID == deletedQueueID) {
deletedQueueReferences.remove(new Long(info.id));
}
}
}
if (!deletedQueueReferences.isEmpty()) {
for (Long value : deletedQueueReferences) {
System.out.println("Deleted Queue still has a reference:" + value);
}
fail("Deleted queue still have references");
}
server.start();
locator = createInVMNonHALocator();
locator.setConsumerWindowSize(10 * 1024 * 1024);
sf = locator.createSessionFactory();
session = sf.createSession(false, false, false);
cons = (ClientConsumerInternal) session.createConsumer(ADDRESS);
session.start();
for (int i = 0; i < numberOfMessages; i++) {
message = cons.receive(5000);
assertNotNull(message);
message.acknowledge();
if (i % 1000 == 0) {
session.commit();
}
}
session.commit();
producer.close();
session.close();
queue = server.locateQueue(PagingTest.ADDRESS);
assertEquals(0, getMessageCount(queue));
timeout = System.currentTimeMillis() + 10000;
while (timeout > System.currentTimeMillis() && queue.getPageSubscription().getPagingStore().isPaging()) {
Thread.sleep(100);
}
assertFalse(queue.getPageSubscription().getPagingStore().isPaging());
server.stop();
}
use of org.apache.activemq.artemis.core.persistence.impl.journal.DescribeJournal.ReferenceDescribe in project activemq-artemis by apache.
the class XmlDataExporter method processMessageJournal.
/**
* Read through the message journal and stuff all the events/data we care about into local data structures. We'll
* use this data later to print all the right information.
*
* @throws Exception will be thrown if anything goes wrong reading the journal
*/
private void processMessageJournal() throws Exception {
ArrayList<RecordInfo> acks = new ArrayList<>();
List<RecordInfo> records = new LinkedList<>();
// We load these, but don't use them.
List<PreparedTransactionInfo> preparedTransactions = new LinkedList<>();
Journal messageJournal = storageManager.getMessageJournal();
ActiveMQServerLogger.LOGGER.debug("Reading journal from " + config.getJournalDirectory());
messageJournal.start();
// Just logging these, no action necessary
TransactionFailureCallback transactionFailureCallback = new TransactionFailureCallback() {
@Override
public void failedTransaction(long transactionID, List<RecordInfo> records1, List<RecordInfo> recordsToDelete) {
StringBuilder message = new StringBuilder();
message.append("Encountered failed journal transaction: ").append(transactionID);
for (int i = 0; i < records1.size(); i++) {
if (i == 0) {
message.append("; Records: ");
}
message.append(records1.get(i));
if (i != (records1.size() - 1)) {
message.append(", ");
}
}
for (int i = 0; i < recordsToDelete.size(); i++) {
if (i == 0) {
message.append("; RecordsToDelete: ");
}
message.append(recordsToDelete.get(i));
if (i != (recordsToDelete.size() - 1)) {
message.append(", ");
}
}
ActiveMQServerLogger.LOGGER.debug(message.toString());
}
};
messageJournal.load(records, preparedTransactions, transactionFailureCallback, false);
// Since we don't use these nullify the reference so that the garbage collector can clean them up
preparedTransactions = null;
for (RecordInfo info : records) {
byte[] data = info.data;
ActiveMQBuffer buff = ActiveMQBuffers.wrappedBuffer(data);
Object o = DescribeJournal.newObjectEncoding(info, storageManager);
if (info.getUserRecordType() == JournalRecordIds.ADD_MESSAGE) {
messages.put(info.id, ((MessageDescribe) o).getMsg().toCore());
} else if (info.getUserRecordType() == JournalRecordIds.ADD_MESSAGE_PROTOCOL) {
messages.put(info.id, ((MessageDescribe) o).getMsg().toCore());
} else if (info.getUserRecordType() == JournalRecordIds.ADD_LARGE_MESSAGE) {
messages.put(info.id, ((MessageDescribe) o).getMsg());
} else if (info.getUserRecordType() == JournalRecordIds.ADD_REF) {
ReferenceDescribe ref = (ReferenceDescribe) o;
HashMap<Long, ReferenceDescribe> map = messageRefs.get(info.id);
if (map == null) {
HashMap<Long, ReferenceDescribe> newMap = new HashMap<>();
newMap.put(ref.refEncoding.queueID, ref);
messageRefs.put(info.id, newMap);
} else {
map.put(ref.refEncoding.queueID, ref);
}
} else if (info.getUserRecordType() == JournalRecordIds.ACKNOWLEDGE_REF) {
acks.add(info);
} else if (info.userRecordType == JournalRecordIds.ACKNOWLEDGE_CURSOR) {
CursorAckRecordEncoding encoding = new CursorAckRecordEncoding();
encoding.decode(buff);
Set<PagePosition> set = cursorRecords.get(encoding.queueID);
if (set == null) {
set = new HashSet<>();
cursorRecords.put(encoding.queueID, set);
}
set.add(encoding.position);
} else if (info.userRecordType == JournalRecordIds.PAGE_TRANSACTION) {
if (info.isUpdate) {
PageUpdateTXEncoding pageUpdate = new PageUpdateTXEncoding();
pageUpdate.decode(buff);
pgTXs.add(pageUpdate.pageTX);
} else {
PageTransactionInfoImpl pageTransactionInfo = new PageTransactionInfoImpl();
pageTransactionInfo.decode(buff);
pageTransactionInfo.setRecordID(info.id);
pgTXs.add(pageTransactionInfo.getTransactionID());
}
}
}
messageJournal.stop();
removeAcked(acks);
}
Aggregations