use of org.apache.activemq.artemis.core.io.SequentialFileFactory in project activemq-artemis by apache.
the class CleanBufferTest method testCleanOnAIO.
@Test
public void testCleanOnAIO() {
if (LibaioContext.isLoaded()) {
SequentialFileFactory factory = new AIOSequentialFileFactory(new File("./target"), 50);
testBuffer(factory);
}
}
use of org.apache.activemq.artemis.core.io.SequentialFileFactory in project activemq-artemis by apache.
the class CleanBufferTest method testCleanOnFake.
@Test
public void testCleanOnFake() {
SequentialFileFactory factory = new FakeSequentialFileFactory();
testBuffer(factory);
}
use of org.apache.activemq.artemis.core.io.SequentialFileFactory in project activemq-artemis by apache.
the class SequentialFileFactoryTestBase method listFilesOnNonExistentFolder.
@Test
public void listFilesOnNonExistentFolder() throws Exception {
SequentialFileFactory fileFactory = createFactory("./target/dontexist");
List<String> list = fileFactory.listFiles("tmp");
Assert.assertNotNull(list);
Assert.assertEquals(0, list.size());
}
use of org.apache.activemq.artemis.core.io.SequentialFileFactory in project activemq-artemis by apache.
the class SharedNothingReplicationTest method testReplicateFromSlowLive.
@Test
public void testReplicateFromSlowLive() throws Exception {
// start live
Configuration liveConfiguration = createLiveConfiguration();
ActiveMQServer liveServer = ActiveMQServers.newActiveMQServer(liveConfiguration);
liveServer.start();
Wait.waitFor(() -> liveServer.isStarted());
CoreMessagePersister.theInstance = SlowMessagePersister._getInstance();
final CountDownLatch replicated = new CountDownLatch(1);
ServerLocator locator = ServerLocatorImpl.newLocator("tcp://localhost:61616");
locator.setCallTimeout(60_000L);
locator.setConnectionTTL(60_000L);
locator.addClusterTopologyListener(new ClusterTopologyListener() {
@Override
public void nodeUP(TopologyMember member, boolean last) {
logger.infof("nodeUP fired last=%s, live=%s, backup=%s", last, member.getLive(), member.getBackup());
if (member.getBackup() != null) {
replicated.countDown();
}
}
@Override
public void nodeDown(long eventUID, String nodeID) {
}
});
final ClientSessionFactory csf = locator.createSessionFactory();
ClientSession sess = csf.createSession();
sess.createQueue("slow", RoutingType.ANYCAST, "slow", true);
sess.close();
Executor sendMessageExecutor = Executors.newCachedThreadPool();
// let's write some messages
int i = 0;
final int j = 50;
final CountDownLatch allMessageSent = new CountDownLatch(j);
while (i < 5) {
sendMessageExecutor.execute(() -> {
try {
ClientSession session = csf.createSession(true, true);
ClientProducer producer = session.createProducer("slow");
ClientMessage message = session.createMessage(true);
// this will make journal's append executor busy
message.putLongProperty("delay", 500L);
logger.infof("try to send a message before replicated");
producer.send(message);
logger.info("send message done");
producer.close();
session.close();
allMessageSent.countDown();
} catch (ActiveMQException e) {
logger.error("send message", e);
}
});
i++;
}
// start backup
Configuration backupConfiguration = createBackupConfiguration();
ActiveMQServer backupServer = ActiveMQServers.newActiveMQServer(backupConfiguration);
backupServer.start();
Wait.waitFor(() -> backupServer.isStarted());
Assert.assertTrue("can not replicate in 30 seconds", replicated.await(30, TimeUnit.SECONDS));
while (i < j) {
sendMessageExecutor.execute(() -> {
try {
ClientSession session = csf.createSession(true, true);
ClientProducer producer = session.createProducer("slow");
ClientMessage message = session.createMessage(true);
message.putLongProperty("delay", 0L);
logger.infof("try to send a message after replicated");
producer.send(message);
logger.info("send message done");
producer.close();
session.close();
allMessageSent.countDown();
} catch (ActiveMQException e) {
logger.error("send message", e);
}
});
i++;
}
Assert.assertTrue("all message sent", allMessageSent.await(30, TimeUnit.SECONDS));
csf.close();
locator.close();
backupServer.stop(true);
liveServer.stop(true);
SequentialFileFactory fileFactory;
File liveJournalDir = brokersFolder.getRoot().toPath().resolve("live").resolve("data").resolve("journal").toFile();
fileFactory = new MappedSequentialFileFactory(liveConfiguration.getJournalLocation(), liveConfiguration.getJournalFileSize(), false, liveConfiguration.getJournalBufferSize_NIO(), liveConfiguration.getJournalBufferTimeout_NIO(), null);
JournalImpl liveMessageJournal = new JournalImpl(liveConfiguration.getJournalFileSize(), liveConfiguration.getJournalMinFiles(), liveConfiguration.getJournalPoolFiles(), liveConfiguration.getJournalCompactMinFiles(), liveConfiguration.getJournalCompactPercentage(), fileFactory, "activemq-data", "amq", fileFactory.getMaxIO());
liveMessageJournal.start();
final AtomicInteger liveJournalCounter = new AtomicInteger();
liveMessageJournal.load(new AddRecordLoaderCallback() {
@Override
public void addRecord(RecordInfo info) {
if (!(info.userRecordType == JournalRecordIds.ADD_MESSAGE_PROTOCOL)) {
// ignore
}
logger.infof("got live message %d", info.id);
liveJournalCounter.incrementAndGet();
}
});
// read backup's journal
File backupJournalDir = brokersFolder.getRoot().toPath().resolve("backup").resolve("data").resolve("journal").toFile();
fileFactory = new MappedSequentialFileFactory(backupConfiguration.getJournalLocation(), backupConfiguration.getJournalFileSize(), false, backupConfiguration.getJournalBufferSize_NIO(), backupConfiguration.getJournalBufferTimeout_NIO(), null);
JournalImpl backupMessageJournal = new JournalImpl(backupConfiguration.getJournalFileSize(), backupConfiguration.getJournalMinFiles(), backupConfiguration.getJournalPoolFiles(), backupConfiguration.getJournalCompactMinFiles(), backupConfiguration.getJournalCompactPercentage(), fileFactory, "activemq-data", "amq", fileFactory.getMaxIO());
backupMessageJournal.start();
final AtomicInteger replicationCounter = new AtomicInteger();
backupMessageJournal.load(new AddRecordLoaderCallback() {
@Override
public void addRecord(RecordInfo info) {
if (!(info.userRecordType == JournalRecordIds.ADD_MESSAGE_PROTOCOL)) {
// ignore
}
logger.infof("replicated message %d", info.id);
replicationCounter.incrementAndGet();
}
});
logger.infof("expected %d messages, live=%d, backup=%d", j, liveJournalCounter.get(), replicationCounter.get());
Assert.assertEquals("Live lost journal record", j, liveJournalCounter.get());
Assert.assertEquals("Backup did not replicated all journal", j, replicationCounter.get());
// if this ever happens.. you need to make sure this persister is registered instead of the CoreMessagePersister
Assert.assertTrue("The test is not valid, slow persister stopped being used", SlowMessagePersister._getInstance().used);
}
use of org.apache.activemq.artemis.core.io.SequentialFileFactory in project activemq-artemis by apache.
the class BridgeTest method loadQueues.
/**
* It will inspect the journal directly and determine if there are queues on this journal,
*
* @param serverToInvestigate
* @return a Map containing the reference counts per queue
* @throws Exception
*/
protected Map<Long, AtomicInteger> loadQueues(ActiveMQServer serverToInvestigate) throws Exception {
SequentialFileFactory messagesFF = new NIOSequentialFileFactory(serverToInvestigate.getConfiguration().getJournalLocation(), 1);
JournalImpl messagesJournal = new JournalImpl(serverToInvestigate.getConfiguration().getJournalFileSize(), serverToInvestigate.getConfiguration().getJournalMinFiles(), serverToInvestigate.getConfiguration().getJournalPoolFiles(), 0, 0, messagesFF, "activemq-data", "amq", 1);
List<RecordInfo> records = new LinkedList<>();
List<PreparedTransactionInfo> preparedTransactions = new LinkedList<>();
messagesJournal.start();
messagesJournal.load(records, preparedTransactions, null);
// These are more immutable integers
Map<Long, AtomicInteger> messageRefCounts = new HashMap<>();
for (RecordInfo info : records) {
Object o = DescribeJournal.newObjectEncoding(info);
if (info.getUserRecordType() == JournalRecordIds.ADD_REF) {
DescribeJournal.ReferenceDescribe ref = (DescribeJournal.ReferenceDescribe) o;
AtomicInteger count = messageRefCounts.get(ref.refEncoding.queueID);
if (count == null) {
count = new AtomicInteger(1);
messageRefCounts.put(ref.refEncoding.queueID, count);
} else {
count.incrementAndGet();
}
}
}
messagesJournal.stop();
return messageRefCounts;
}
Aggregations