use of org.apache.activemq.artemis.core.persistence.impl.journal.AckDescribe in project activemq-artemis by apache.
the class XmlDataExporter method removeAcked.
/**
* Go back through the messages and message refs we found in the journal and remove the ones that have been acked.
*
* @param acks the list of ack records we got from the journal
*/
private void removeAcked(ArrayList<RecordInfo> acks) {
for (RecordInfo info : acks) {
AckDescribe ack = (AckDescribe) DescribeJournal.newObjectEncoding(info, null);
HashMap<Long, ReferenceDescribe> referenceDescribeHashMap = messageRefs.get(info.id);
referenceDescribeHashMap.remove(ack.refEncoding.queueID);
if (referenceDescribeHashMap.size() == 0) {
messages.remove(info.id);
messageRefs.remove(info.id);
}
}
}
use of org.apache.activemq.artemis.core.persistence.impl.journal.AckDescribe in project activemq-artemis by apache.
the class PagingTest method testDeleteQueueRestart.
@Test
public void testDeleteQueueRestart() throws Exception {
clearDataRecreateServerDirs();
// disable compact
Configuration config = createDefaultInVMConfig().setJournalDirectory(getJournalDir()).setJournalSyncNonTransactional(false).setJournalCompactMinFiles(0);
ActiveMQServer server = createServer(true, config, PagingTest.PAGE_SIZE, PagingTest.PAGE_MAX);
server.start();
final int numberOfMessages = 5000;
locator = createInVMNonHALocator().setConsumerWindowSize(10 * 1024 * 1024).setBlockOnNonDurableSend(true).setBlockOnDurableSend(true).setBlockOnAcknowledge(true);
SimpleString QUEUE2 = ADDRESS.concat("-2");
ClientSessionFactory sf = locator.createSessionFactory();
ClientSession session = sf.createSession(false, false, false);
session.createQueue(PagingTest.ADDRESS, PagingTest.ADDRESS, null, true);
session.createQueue(PagingTest.ADDRESS, QUEUE2, null, true);
ClientProducer producer = session.createProducer(PagingTest.ADDRESS);
// This is just to hold some messages as being delivered
ClientConsumerInternal cons = (ClientConsumerInternal) session.createConsumer(ADDRESS);
ClientConsumerInternal cons2 = (ClientConsumerInternal) session.createConsumer(QUEUE2);
ClientMessage message = null;
byte[] body = new byte[MESSAGE_SIZE];
ByteBuffer bb = ByteBuffer.wrap(body);
for (int j = 1; j <= MESSAGE_SIZE; j++) {
bb.put(getSamplebyte(j));
}
for (int i = 0; i < numberOfMessages; i++) {
message = session.createMessage(true);
ActiveMQBuffer bodyLocal = message.getBodyBuffer();
bodyLocal.writeBytes(body);
producer.send(message);
if (i % 1000 == 0) {
session.commit();
}
}
session.commit();
producer.close();
session.start();
long timeout = System.currentTimeMillis() + 30000;
// I want the buffer full to make sure there are pending messages on the server's side
while (System.currentTimeMillis() < timeout && (cons.getBufferSize() < 1000 || cons2.getBufferSize() < 1000)) {
System.out.println("cons1 buffer = " + cons.getBufferSize() + ", cons2 buffer = " + cons2.getBufferSize());
Thread.sleep(100);
}
assertTrue(cons.getBufferSize() >= 1000);
assertTrue(cons2.getBufferSize() >= 1000);
session.close();
Queue queue = server.locateQueue(QUEUE2);
long deletedQueueID = queue.getID();
server.destroyQueue(QUEUE2);
sf.close();
locator.close();
locator = null;
sf = null;
server.stop();
final HashMap<Integer, AtomicInteger> recordsType = countJournal(config);
for (Map.Entry<Integer, AtomicInteger> entry : recordsType.entrySet()) {
System.out.println(entry.getKey() + "=" + entry.getValue());
}
assertNull("The system is acking page records instead of just delete data", recordsType.get(new Integer(JournalRecordIds.ACKNOWLEDGE_CURSOR)));
Pair<List<RecordInfo>, List<PreparedTransactionInfo>> journalData = loadMessageJournal(config);
HashSet<Long> deletedQueueReferences = new HashSet<>();
for (RecordInfo info : journalData.getA()) {
if (info.getUserRecordType() == JournalRecordIds.ADD_REF) {
DescribeJournal.ReferenceDescribe ref = (ReferenceDescribe) DescribeJournal.newObjectEncoding(info);
if (ref.refEncoding.queueID == deletedQueueID) {
deletedQueueReferences.add(new Long(info.id));
}
} else if (info.getUserRecordType() == JournalRecordIds.ACKNOWLEDGE_REF) {
AckDescribe ref = (AckDescribe) DescribeJournal.newObjectEncoding(info);
if (ref.refEncoding.queueID == deletedQueueID) {
deletedQueueReferences.remove(new Long(info.id));
}
}
}
if (!deletedQueueReferences.isEmpty()) {
for (Long value : deletedQueueReferences) {
System.out.println("Deleted Queue still has a reference:" + value);
}
fail("Deleted queue still have references");
}
server.start();
locator = createInVMNonHALocator();
locator.setConsumerWindowSize(10 * 1024 * 1024);
sf = locator.createSessionFactory();
session = sf.createSession(false, false, false);
cons = (ClientConsumerInternal) session.createConsumer(ADDRESS);
session.start();
for (int i = 0; i < numberOfMessages; i++) {
message = cons.receive(5000);
assertNotNull(message);
message.acknowledge();
if (i % 1000 == 0) {
session.commit();
}
}
session.commit();
producer.close();
session.close();
queue = server.locateQueue(PagingTest.ADDRESS);
assertEquals(0, getMessageCount(queue));
timeout = System.currentTimeMillis() + 10000;
while (timeout > System.currentTimeMillis() && queue.getPageSubscription().getPagingStore().isPaging()) {
Thread.sleep(100);
}
assertFalse(queue.getPageSubscription().getPagingStore().isPaging());
server.stop();
}
Aggregations