use of net.openhft.chronicle.bytes.Bytes in project Chronicle-Queue by OpenHFT.
the class BytesRingBufferTest method testMultiThreadedWithIntValues.
// @Ignore("works in lang-bytes, appears to be a visibility issue that can be fixed by adding
// a" +
// " synchronized to ringbuffer.poll() and ringbuffer.offer()")
@Test
public void testMultiThreadedWithIntValues() {
try (NativeBytesStore allocate = NativeBytesStore.nativeStoreWithFixedCapacity(1000)) {
final BytesRingBuffer bytesRingBuffer = new BytesRingBuffer(allocate.bytes());
AtomicInteger counter = new AtomicInteger();
// writer
int iterations = 20_000;
{
ExecutorService executorService = Executors.newFixedThreadPool(2);
for (int i = 0; i < iterations; i++) {
final int j = i;
executorService.submit(() -> {
try (NativeBytesStore allocate2 = NativeBytesStore.nativeStoreWithFixedCapacity(iterations)) {
final Bytes out = allocate2.bytes();
out.clear();
out.writeInt(j);
counter.addAndGet(j);
out.flip();
boolean offer;
do {
offer = bytesRingBuffer.offer(out);
} while (!offer);
} catch (Exception e) {
e.printStackTrace();
}
});
}
}
CountDownLatch count = new CountDownLatch(iterations);
// reader
{
ExecutorService executorService = Executors.newSingleThreadExecutor();
for (int i = 0; i < iterations; i++) {
executorService.submit(() -> {
try {
try (NativeBytesStore allocate3 = NativeBytesStore.nativeStoreWithFixedCapacity(25)) {
final Bytes bytes = allocate3.bytes();
Bytes result = null;
do {
try {
result = bytesRingBuffer.poll(maxsize -> bytes);
} catch (InterruptedException e) {
return;
}
} while (result == null);
int value = result.readInt();
counter.addAndGet(-value);
count.countDown();
} catch (Error e) {
e.printStackTrace();
}
} catch (Exception e) {
e.printStackTrace();
}
});
}
}
Assert.assertTrue(count.await(5000, TimeUnit.SECONDS));
Assert.assertEquals(0, counter.get());
}
}
use of net.openhft.chronicle.bytes.Bytes in project Chronicle-Queue by OpenHFT.
the class BytesRingBufferTest method testSimpledSingleThreadedWriteRead.
@Test
public void testSimpledSingleThreadedWriteRead() {
try (NativeBytesStore<Void> nativeStore = NativeBytesStore.nativeStoreWithFixedCapacity(150)) {
final BytesRingBuffer bytesRingBuffer = new BytesRingBuffer(nativeStore.bytes());
bytesRingBuffer.offer(data());
Bytes actual = bytesRingBuffer.take(maxSize -> input.clear());
assertEquals(EXPECTED, actual.readUTFΔ());
}
}
use of net.openhft.chronicle.bytes.Bytes in project Chronicle-Queue by OpenHFT.
the class StatelessTailer method wire.
/**
* The wire associated with the current index, calling this method moves on the index
*
* @return the wire generated by the {@code wireFunction} and populated with the {@code bytes}
*/
@Override
public WireIn wire() {
if (index == -1) {
index = statelessRawBytesTailer.lastWrittenIndex();
}
final Bytes bytes = statelessRawBytesTailer.readExcept(index);
index++;
return wireFunction.apply(bytes);
}
use of net.openhft.chronicle.bytes.Bytes in project Chronicle-Queue by OpenHFT.
the class ZipBytesRingBufferTest method testZipAndAppend.
@Test
public void testZipAndAppend() {
File file = null;
try {
NativeBytesStore allocate = NativeBytesStore.nativeStoreWithFixedCapacity(1024);
NativeBytesStore msgBytes = NativeBytesStore.nativeStoreWithFixedCapacity(150);
net.openhft.chronicle.bytes.Bytes message = msgBytes.bytes();
message.writeUTFΔ("Hello World");
message.flip();
file = File.createTempFile("chronicle", "q");
DirectChronicleQueue chronicle = (DirectChronicleQueue) new ChronicleQueueBuilder(file.getName()).build();
final long writeAddress = getHeader((SingleChronicleQueue) chronicle).getWriteByte();
final BytesRingBuffer ring = new BytesRingBuffer(allocate.bytes());
final ZippedDocumentAppender zippedDocumentAppender = new ZippedDocumentAppender(ring, chronicle);
zippedDocumentAppender.append(message);
long initialValue = chronicle.firstBytes();
AtomicLong offset = new AtomicLong(initialValue);
while (lastWrite((SingleChronicleQueue) chronicle) == writeAddress) {
// wait for data to be written ( via another thread )
}
// read the data from chronicle into actual
Bytes actual = NativeBytesStore.nativeStoreWithFixedCapacity(100).bytes();
chronicle.readDocument(offset, actual);
// "Hello World" zipped should be 12 chars
Assert.assertEquals(12, actual.flip().remaining());
} finally {
if (file != null)
file.delete();
}
}
use of net.openhft.chronicle.bytes.Bytes in project Chronicle-Queue by OpenHFT.
the class ThroughputMain method main.
public static void main(String[] args) {
System.out.println("Testing with " + "-Dtime=" + time + " " + "-Dthreads=" + threads + " " + "-Dsize=" + size + " " + "-Dpath=" + path + " " + "-DfullWrite=" + fullWrite);
long start = System.nanoTime();
String base = path + "/delete-" + Time.uniqueId() + ".me.";
long blockSize = OS.is64Bit() ? OS.isLinux() ? 4L << 30 : 1L << 30 : 256L << 20;
AtomicLong count = new AtomicLong();
IntStream.range(0, threads).parallel().forEach(i -> {
long count2 = 0;
BytesStore<?, Void> nbs = BytesStore.nativeStoreWithFixedCapacity(size);
try (ChronicleQueue q = ChronicleQueue.singleBuilder(base + i).rollCycle(RollCycles.LARGE_HOURLY_XSPARSE).blockSize(blockSize).build()) {
ExcerptAppender appender = q.acquireAppender();
long lastIndex = -1;
do {
int defaultIndexSpacing = q.rollCycle().defaultIndexSpacing();
Wire wire = appender.wire();
int writeCount = (int) (defaultIndexSpacing - (lastIndex & (defaultIndexSpacing - 1)) - 1);
if (!fullWrite && wire != null && writeCount > 0) {
MappedBytes bytes = (MappedBytes) wire.bytes();
long address = bytes.addressForWrite(bytes.writePosition());
long bstart = bytes.start();
long bcap = bytes.realCapacity();
long canWrite = bcap - (bytes.writePosition() - bstart);
long lengthCount = writeMessages(address, canWrite, writeCount, nbs);
bytes.writeSkip((int) lengthCount);
lastIndex += lengthCount >> 32;
count2 += lengthCount >> 32;
} else {
try (DocumentContext dc = appender.writingDocument()) {
Wire wire2 = dc.wire();
wire2.bytes().write(nbs);
addToEndOfCache(wire2);
}
lastIndex = appender.lastIndexAppended();
count2++;
}
} while (start + time * 1e9 > System.nanoTime());
}
// System.out.println("... All data written, now reading ...");
nbs.releaseLast();
count.addAndGet(count2);
});
long time1 = System.nanoTime() - start;
Jvm.pause(1000);
System.gc();
long mid = System.nanoTime();
IntStream.range(0, threads).parallel().forEach(i -> {
Bytes bytes = Bytes.allocateElasticDirect(64);
try (ChronicleQueue q = ChronicleQueue.singleBuilder(base + i).rollCycle(RollCycles.LARGE_HOURLY_XSPARSE).blockSize(blockSize).build()) {
ExcerptTailer tailer = q.createTailer();
for (; ; ) {
try (DocumentContext dc = tailer.readingDocument()) {
if (!dc.isPresent())
break;
bytes.clear();
bytes.write(dc.wire().bytes());
}
}
}
bytes.releaseLast();
});
long end = System.nanoTime();
long time2 = end - mid;
System.out.printf("Writing %,d messages took %.3f seconds, at a rate of %,d per second%n", count.longValue(), time1 / 1e9, 1000 * (long) (1e6 * count.get() / time1));
System.out.printf("Reading %,d messages took %.3f seconds, at a rate of %,d per second%n", count.longValue(), time2 / 1e9, 1000 * (long) (1e6 * count.get() / time2));
Jvm.pause(200);
// make sure its cleaned up for windows to delete.
System.gc();
IntStream.range(0, threads).forEach(i -> IOTools.deleteDirWithFiles(base + i, 2));
}
Aggregations