use of net.openhft.chronicle.bytes.Bytes in project Chronicle-Queue by OpenHFT.
the class MappingReferenceCountTest method testMappingReferenceCount.
/**
* tests that blocks are acquired and released as needed
*
* @
*/
@Test
public void testMappingReferenceCount() {
File tempFile = File.createTempFile("chronicle", "q");
try {
int BLOCK_SIZE = 4096;
final MappedFile mappedFile = MappedFile.mappedFile(tempFile.getName(), BLOCK_SIZE, 8);
final Bytes bytes = mappedFile.bytes();
// write into block 1
bytes.writeLong(4096 + 8, Long.MAX_VALUE);
// Assert.assertEquals(1, mappedFile.getRefCount(1));
assertEquals("refCount: 2, 0, 2", mappedFile.referenceCounts());
// we move from block 1 to block 2
bytes.writeLong((4096 * 2) + 8, Long.MAX_VALUE);
// assertEquals(0, mappedFile.getRefCount(1));
// assertEquals(1, mappedFile.getRefCount(2));
assertEquals("refCount: 3, 0, 1, 2", mappedFile.referenceCounts());
// we move from block 2 back to block 1
bytes.writeLong((4096 * 1) + 8, Long.MAX_VALUE);
// assertEquals(1, mappedFile.getRefCount(1));
// assertEquals(0, mappedFile.getRefCount(2));
assertEquals("refCount: 3, 0, 2, 1", mappedFile.referenceCounts());
// we move from block 2 back to block 1
bytes.writeLong((4096 * 3) + 8, Long.MAX_VALUE);
// assertEquals(1, mappedFile.getRefCount(3));
assertEquals("refCount: 4, 0, 1, 1, 2", mappedFile.referenceCounts());
bytes.releaseLast();
mappedFile.close();
assertEquals("refCount: 0, 0, 0, 0, 0", mappedFile.referenceCounts());
} catch (FileNotFoundException e) {
e.printStackTrace();
} finally {
tempFile.delete();
}
}
use of net.openhft.chronicle.bytes.Bytes in project Chronicle-Queue by OpenHFT.
the class QueueDumpMain method dump.
public static void dump(File filename, PrintWriter pw) throws FileNotFoundException {
MappedFile mappedFile = MappedFile.mappedFile(filename, 64 << 20, 16 << 20);
Bytes bytes = mappedFile.bytes();
pw.print("# Magic: ");
for (int i = 0; i < 8; i++) pw.print((char) bytes.readUnsignedByte());
pw.println();
while (true) {
long spb = bytes.readUnsignedInt();
if (!Wires.isKnownLength(spb))
break;
pw.print("--- !");
pw.print(SBP_TYPES[((int) (spb >>> 30))]);
pw.println();
long start = bytes.position();
BytesUtil.toString(bytes, pw, start, start, start + Wires.lengthOf(spb));
pw.println();
bytes.skip(Wires.lengthOf(spb));
}
pw.flush();
}
use of net.openhft.chronicle.bytes.Bytes in project Chronicle-Queue by OpenHFT.
the class BytesRingBuffer method poll.
/**
* Retrieves and removes the head of this queue, or returns {@code null} if this queue is
* empty.
*
* @return {@code null} if this queue is empty, or a populated buffer if the element was retried
* @throws IllegalStateException is the {@code using} buffer is not large enough
*/
@Nullable
public Bytes poll(@NotNull BytesProvider bytesProvider) throws InterruptedException, IllegalStateException {
long writeLoc = writeLocation();
long offset = header.getReadLocation();
// = this.readLocation.get();
long readLocation = offset;
if (readLocation >= writeLoc) {
return null;
}
assert readLocation <= writeLoc : "reader has go ahead of the writer";
long flag = offset;
final byte state = bytes.readByte(flag);
offset += 1;
// the element is currently being written to, so let wait for the write to finish
if (state == States.BUSY.ordinal())
return null;
assert state == States.READY.ordinal() : " we are reading a message that we " + "shouldn't, state=" + state;
final long elementSize = bytes.readLong(offset);
offset += 8;
final long next = offset + elementSize;
final Bytes using = bytesProvider.provide(elementSize);
// checks that the 'using' bytes is large enough
checkSize(using, elementSize);
using.limit(using.position() + elementSize);
bytes.read(using, offset);
bytes.write(flag, States.USED.ordinal());
header.setWriteUpTo(next + bytes.capacity());
header.setReadLocation(next);
using.position(using.position());
return using;
}
use of net.openhft.chronicle.bytes.Bytes in project Chronicle-Queue by OpenHFT.
the class InternalAppenderWriteBytesTest method appendToPreviousCycle.
@Test
public void appendToPreviousCycle() {
@NotNull Bytes<byte[]> test = Bytes.from("hello world");
@NotNull Bytes<byte[]> test1 = Bytes.from("hello world again cycle1");
@NotNull Bytes<byte[]> test2 = Bytes.from("hello world cycle2");
Bytes result = Bytes.elasticHeapByteBuffer();
SetTimeProvider timeProvider = new SetTimeProvider();
try (SingleChronicleQueue q = SingleChronicleQueueBuilder.binary(getTmpDir()).timeProvider(timeProvider).rollCycle(TEST_HOURLY).build()) {
ExcerptAppender appender = q.acquireAppender();
appender.writeBytes(test);
long nextIndexInFirstCycle = appender.lastIndexAppended() + 1;
int firstCycle = q.rollCycle().toCycle(nextIndexInFirstCycle);
timeProvider.advanceMillis(TimeUnit.SECONDS.toMillis(65 * 60));
appender.writeBytes(test2);
// System.out.println(q.dump());
Assert.assertTrue(hasEOF(q, firstCycle));
// here we try and write to previous cycle file. We will overwrite the EOF in doing so
ignoreException("Incomplete header found at pos: 33048: c0000000, overwriting");
((InternalAppender) appender).writeBytes(nextIndexInFirstCycle, test1);
Assert.assertFalse(hasEOF(q, firstCycle));
// we have to manually fix. This is done by CQE at the end of backfilling
appender.normaliseEOFs();
ExcerptTailer tailer = q.createTailer();
tailer.readBytes(result);
assertEquals(test, result);
result.clear();
tailer.readBytes(result);
assertEquals(test1, result);
result.clear();
tailer.readBytes(result);
assertEquals(test2, result);
}
}
use of net.openhft.chronicle.bytes.Bytes in project Chronicle-Queue by OpenHFT.
the class InternalAppenderWriteBytesTest method dontOverwriteExistingDifferentQueueInstance.
@Test
public void dontOverwriteExistingDifferentQueueInstance() {
expectException("Trying to overwrite index 0 which is before the end of the queue");
expectException("Trying to overwrite index 1 which is before the end of the queue");
@NotNull Bytes<byte[]> test = Bytes.from("hello world");
@NotNull Bytes<byte[]> test2 = Bytes.from("hello world2");
Bytes result = Bytes.elasticHeapByteBuffer();
long index;
final File tmpDir = getTmpDir();
final String expected = "" + "--- !!meta-data #binary\n" + "header: !STStore {\n" + " wireType: !WireType BINARY_LIGHT,\n" + " metadata: !SCQMeta {\n" + " roll: !SCQSRoll { length: !int 86400000, format: yyyyMMdd'T4', epoch: 0 },\n" + " deltaCheckpointInterval: 64,\n" + " sourceId: 0\n" + " }\n" + "}\n" + "# position: 176, header: 0\n" + "--- !!data #binary\n" + "listing.highestCycle: 0\n" + "# position: 216, header: 1\n" + "--- !!data #binary\n" + "listing.lowestCycle: 0\n" + "# position: 256, header: 2\n" + "--- !!data #binary\n" + "listing.modCount: 1\n" + "# position: 288, header: 3\n" + "--- !!data #binary\n" + "chronicle.write.lock: -9223372036854775808\n" + "# position: 328, header: 4\n" + "--- !!data #binary\n" + "chronicle.append.lock: -9223372036854775808\n" + "# position: 368, header: 5\n" + "--- !!data #binary\n" + "chronicle.lastIndexReplicated: -1\n" + "# position: 416, header: 6\n" + "--- !!data #binary\n" + "chronicle.lastAcknowledgedIndexReplicated: -1\n" + "...\n" + "# 130596 bytes remaining\n" + "--- !!meta-data #binary\n" + "header: !SCQStore {\n" + " writePosition: [\n" + " 792,\n" + " 3401614098433\n" + " ],\n" + " indexing: !SCQSIndexing {\n" + " indexCount: 32,\n" + " indexSpacing: 4,\n" + " index2Index: 196,\n" + " lastIndex: 4\n" + " },\n" + " dataFormat: 1\n" + "}\n" + "# position: 196, header: -1\n" + "--- !!meta-data #binary\n" + "index2index: [\n" + " # length: 32, used: 1\n" + " 488,\n" + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n" + "]\n" + "# position: 488, header: -1\n" + "--- !!meta-data #binary\n" + "index: [\n" + " # length: 32, used: 1\n" + " 776,\n" + " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n" + "]\n" + "# position: 776, header: 0\n" + "--- !!data\n" + "hello world\n" + "# position: 792, header: 1\n" + "--- !!data\n" + "hello world2\n" + "...\n" + "# 130260 bytes remaining\n";
try (SingleChronicleQueue q = createQueue(tmpDir)) {
ExcerptAppender appender = q.acquireAppender();
appender.writeBytes(test);
appender.writeBytes(test2);
index = appender.lastIndexAppended();
// assertEquals(expected, q.dump());
}
assertEquals(1, index);
// has to be the same tmpDir
try (SingleChronicleQueue q = createQueue(tmpDir)) {
InternalAppender appender = (InternalAppender) q.acquireAppender();
appender.writeBytes(0, Bytes.from("HELLO WORLD"));
// assertEquals(expected, q.dump());
appender.writeBytes(1, Bytes.from("HELLO WORLD"));
// assertEquals(expected, q.dump());
ExcerptTailer tailer = q.createTailer();
tailer.readBytes(result);
assertEquals(test, result);
assertEquals(1, tailer.index());
}
}
Aggregations