use of org.apache.activemq.artemis.core.io.IOCallback in project activemq-artemis by apache.
the class SyncCalculation method syncTest.
/**
* It will perform {@code tries} write tests of {@code blockSize * blocks} bytes and returning the lowest elapsed time to perform a try.
*
* <p>
* Please configure {@code blocks >= -XX:CompileThreshold} (ie by default on most JVMs is 10000) to favour the best JIT/OSR compilation (ie: Just In Time/On Stack Replacement)
* if the test is running on a temporary file-system (eg: tmpfs on Linux) or without {@code fsync}.
* <p>
* NOTE: The write latencies are provided only if {@code verbose && !(journalType == JournalType.ASYNCIO && !syncWrites)} (ie are used effective synchronous writes).
*
* @param datafolder the folder where the journal files will be stored
* @param blockSize the size in bytes of each write on the journal
* @param blocks the number of {@code blockSize} writes performed on each try
* @param tries the number of tests
* @param verbose {@code true} to make the output verbose, {@code false} otherwise
* @param fsync if {@code true} the test is performing full durable writes, {@code false} otherwise
* @param syncWrites if {@code true} each write is performed only if the previous one is completed, {@code false} otherwise (ie each try will wait only the last write)
* @param fileName the name of the journal file used for the test
* @param maxAIO the max number of in-flight IO requests (if {@code journalType} will support it)
* @param journalType the {@link JournalType} used for the tests
* @return the lowest elapsed time (in {@link TimeUnit#MILLISECONDS}) to perform a try
* @throws Exception
*/
public static long syncTest(File datafolder, int blockSize, int blocks, int tries, boolean verbose, boolean fsync, boolean syncWrites, String fileName, int maxAIO, JournalType journalType) throws Exception {
SequentialFileFactory factory = newFactory(datafolder, fsync, journalType, blockSize * blocks, maxAIO);
final boolean asyncWrites = journalType == JournalType.ASYNCIO && !syncWrites;
// the write latencies could be taken only when writes are effectively synchronous
final Histogram writeLatencies = (verbose && !asyncWrites) ? new Histogram(MAX_FLUSH_NANOS, 2) : null;
if (journalType == JournalType.ASYNCIO && syncWrites) {
System.out.println();
System.out.println("*******************************************************************************************");
System.out.println("*** Notice: The recommendation for AsyncIO journal is to not use --sync-writes ***");
System.out.println("*** The measures here will be useful to understand your device ***");
System.out.println("*** however the result here won't represent the best configuration option ***");
System.out.println("*******************************************************************************************");
System.out.println();
}
if (verbose) {
System.out.println("Using " + factory.getClass().getName() + " to calculate sync times, alignment=" + factory.getAlignment());
if (writeLatencies == null) {
System.out.println("*** Use --sync-writes if you want to see a histogram for each write performed ***");
}
}
SequentialFile file = factory.createSequentialFile(fileName);
// to be sure that a process/thread crash won't leave the dataFolder with garbage files
file.getJavaFile().deleteOnExit();
try {
final ByteBuffer bufferBlock = allocateAlignedBlock(blockSize, factory);
// making sure the blockSize matches the device
blockSize = bufferBlock.remaining();
file.delete();
file.open();
file.fill(blockSize * blocks);
file.close();
long[] result = new long[tries];
final ReusableLatch latch = new ReusableLatch(0);
IOCallback callback = new IOCallback() {
@Override
public void done() {
latch.countDown();
}
@Override
public void onError(int errorCode, String errorMessage) {
}
};
DecimalFormat dcformat = new DecimalFormat("###.##");
for (int ntry = 0; ntry < tries; ntry++) {
if (verbose) {
System.out.println("**************************************************");
System.out.println(ntry + " of " + tries + " calculation");
}
file.open();
file.position(0);
long start = System.currentTimeMillis();
for (int i = 0; i < blocks; i++) {
bufferBlock.position(0);
latch.countUp();
long startWrite = 0;
if (writeLatencies != null) {
startWrite = System.nanoTime();
}
file.writeDirect(bufferBlock, true, callback);
if (syncWrites) {
flushLatch(latch);
}
if (writeLatencies != null) {
final long elapsedWriteNanos = System.nanoTime() - startWrite;
writeLatencies.recordValue(elapsedWriteNanos);
}
}
if (!syncWrites)
flushLatch(latch);
long end = System.currentTimeMillis();
result[ntry] = (end - start);
if (verbose) {
double writesPerMillisecond = (double) blocks / (double) result[ntry];
System.out.println("Time = " + result[ntry] + " milliseconds");
System.out.println("Writes / millisecond = " + dcformat.format(writesPerMillisecond));
System.out.println("bufferTimeout = " + toNanos(result[ntry], blocks, verbose));
System.out.println("**************************************************");
}
file.close();
if (ntry == 0 && writeLatencies != null) {
// discarding the first one.. some warmup time
writeLatencies.reset();
}
}
factory.releaseDirectBuffer(bufferBlock);
if (writeLatencies != null) {
System.out.println("Write Latencies Percentile Distribution in microseconds");
// print latencies in us -> (ns * 1000d)
System.out.println("*****************************************************************");
writeLatencies.outputPercentileDistribution(System.out, 1000d);
System.out.println();
System.out.println("*****************************************************************");
System.out.println("*** this may be useful to generate charts if you like charts: ***");
System.out.println("*** http://hdrhistogram.github.io/HdrHistogram/plotFiles.html ***");
System.out.println("*****************************************************************");
System.out.println();
writeLatencies.reset();
}
long totalTime = Long.MAX_VALUE;
for (int i = 0; i < tries; i++) {
if (result[i] < totalTime) {
totalTime = result[i];
}
}
return totalTime;
} finally {
try {
file.close();
} catch (Exception e) {
}
try {
file.delete();
} catch (Exception e) {
}
try {
factory.stop();
} catch (Exception e) {
}
}
}
use of org.apache.activemq.artemis.core.io.IOCallback in project activemq-artemis by apache.
the class TimedSequentialFile method invokeOnErrorOn.
private static void invokeOnErrorOn(final int errorCode, final String errorMessage, List<? extends IOCallback> callbacks) {
final int size = callbacks.size();
for (int i = 0; i < size; i++) {
try {
final IOCallback callback = callbacks.get(i);
callback.onError(errorCode, errorMessage);
} catch (Throwable e) {
ActiveMQJournalLogger.LOGGER.errorCallingErrorCallback(e);
}
}
}
use of org.apache.activemq.artemis.core.io.IOCallback in project activemq-artemis by apache.
the class CallbackOrderTest method testCallbackOutOfOrder.
/**
* This method will make sure callbacks will come back in order even when out order from libaio
*/
@Test
public void testCallbackOutOfOrder() throws Exception {
AIOSequentialFileFactory factory = new AIOSequentialFileFactory(temporaryFolder.getRoot(), 100);
AIOSequentialFile file = (AIOSequentialFile) factory.createSequentialFile("test.bin");
final AtomicInteger count = new AtomicInteger(0);
IOCallback callback = new IOCallback() {
@Override
public void done() {
count.incrementAndGet();
}
@Override
public void onError(int errorCode, String errorMessage) {
}
};
ArrayList<AIOSequentialFileFactory.AIOSequentialCallback> list = new ArrayList<>();
// to increase possibility of issues due to reuse of callbacks
for (int n = 1; n < 100; n++) {
int N = n;
count.set(0);
list.clear();
for (int i = 0; i < N; i++) {
list.add(file.getCallback(callback, null));
}
for (int i = N - 1; i >= 0; i--) {
list.get(i).done();
}
Assert.assertEquals(N, count.get());
Assert.assertEquals(0, file.pendingCallbackList.size());
Assert.assertTrue(file.pendingCallbackList.isEmpty());
}
factory.stop();
}
use of org.apache.activemq.artemis.core.io.IOCallback in project activemq-artemis by apache.
the class DuplicateCacheTest method testDuplicate.
@Test
public void testDuplicate() throws Exception {
createStorage();
DuplicateIDCache cache = new DuplicateIDCacheImpl(new SimpleString("test"), 2000, journal, true);
TransactionImpl tx = new TransactionImpl(journal);
for (int i = 0; i < 5000; i++) {
byte[] bytes = RandomUtil.randomBytes();
cache.addToCache(bytes, tx);
}
tx.commit();
tx = new TransactionImpl(journal);
for (int i = 0; i < 5000; i++) {
byte[] bytes = RandomUtil.randomBytes();
cache.addToCache(bytes, tx);
}
tx.commit();
byte[] id = RandomUtil.randomBytes();
Assert.assertFalse(cache.contains(id));
cache.addToCache(id, null);
Assert.assertTrue(cache.contains(id));
cache.deleteFromCache(id);
final CountDownLatch latch = new CountDownLatch(1);
OperationContextImpl.getContext().executeOnCompletion(new IOCallback() {
@Override
public void done() {
latch.countDown();
}
@Override
public void onError(int errorCode, String errorMessage) {
}
}, true);
Assert.assertTrue(latch.await(1, TimeUnit.MINUTES));
Assert.assertFalse(cache.contains(id));
}
use of org.apache.activemq.artemis.core.io.IOCallback in project activemq-artemis by apache.
the class PostOfficeImpl method processRoute.
@Override
public void processRoute(final Message message, final RoutingContext context, final boolean direct) throws Exception {
final List<MessageReference> refs = new ArrayList<>();
Transaction tx = context.getTransaction();
Long deliveryTime = message.getScheduledDeliveryTime();
for (Map.Entry<SimpleString, RouteContextList> entry : context.getContexListing().entrySet()) {
PagingStore store = pagingManager.getPageStore(entry.getKey());
if (storageManager.addToPage(store, message, context.getTransaction(), entry.getValue())) {
if (message.isLargeMessage()) {
confirmLargeMessageSend(tx, message);
}
// We need to kick delivery so the Queues may check for the cursors case they are empty
schedulePageDelivery(tx, entry);
continue;
}
for (Queue queue : entry.getValue().getNonDurableQueues()) {
MessageReference reference = MessageReference.Factory.createReference(message, queue);
if (deliveryTime != null) {
reference.setScheduledDeliveryTime(deliveryTime);
}
refs.add(reference);
message.incrementRefCount();
}
Iterator<Queue> iter = entry.getValue().getDurableQueues().iterator();
while (iter.hasNext()) {
Queue queue = iter.next();
MessageReference reference = MessageReference.Factory.createReference(message, queue);
if (context.isAlreadyAcked(context.getAddress(message), queue)) {
reference.setAlreadyAcked();
if (tx != null) {
queue.acknowledge(tx, reference);
}
}
if (deliveryTime != null) {
reference.setScheduledDeliveryTime(deliveryTime);
}
refs.add(reference);
if (message.isDurable()) {
int durableRefCount = message.incrementDurableRefCount();
if (durableRefCount == 1) {
if (tx != null) {
storageManager.storeMessageTransactional(tx.getID(), message);
} else {
storageManager.storeMessage(message);
}
if (message.isLargeMessage()) {
confirmLargeMessageSend(tx, message);
}
}
if (tx != null) {
storageManager.storeReferenceTransactional(tx.getID(), queue.getID(), message.getMessageID());
tx.setContainsPersistent();
} else {
storageManager.storeReference(queue.getID(), message.getMessageID(), !iter.hasNext());
}
if (deliveryTime > 0) {
if (tx != null) {
storageManager.updateScheduledDeliveryTimeTransactional(tx.getID(), reference);
} else {
storageManager.updateScheduledDeliveryTime(reference);
}
}
}
message.incrementRefCount();
}
}
if (tx != null) {
tx.addOperation(new AddOperation(refs));
} else {
// This will use the same thread if there are no pending operations
// avoiding a context switch on this case
storageManager.afterCompleteOperations(new IOCallback() {
@Override
public void onError(final int errorCode, final String errorMessage) {
ActiveMQServerLogger.LOGGER.ioErrorAddingReferences(errorCode, errorMessage);
}
@Override
public void done() {
addReferences(refs, direct);
}
});
}
}
Aggregations