use of net.openhft.chronicle.wire.DocumentContext in project Chronicle-Queue by OpenHFT.
the class DetectNotReadyEntriesTest method testDeadEntries.
@Test
public void testDeadEntries() throws FileNotFoundException {
// TODO FIX.
if (OS.isWindows())
return;
File dir = new File(OS.TARGET, getClass().getSimpleName() + "-" + System.nanoTime());
dir.mkdir();
MappedBytes bytes = MappedBytes.mappedBytes(new File(dir, "19700101" + SingleChronicleQueue.SUFFIX), 64 << 10);
Wire wire = new BinaryWire(bytes);
try (DocumentContext dc = wire.writingDocument(true)) {
dc.wire().writeEventName(() -> "header").typePrefix(SingleChronicleQueueStore.class).marshallable(w -> {
w.write(() -> "wireType").object(WireType.BINARY);
w.write(() -> "writePosition").int64forBinding(288 + 4 + 17);
w.write(() -> "roll").typedMarshallable(new SCQRoll(RollCycles.DAILY, 0));
w.write(() -> "indexing").typedMarshallable(new SCQIndexing(WireType.BINARY, 32 << 10, 32));
w.write(() -> "lastAcknowledgedIndexReplicated").int64forBinding(0);
});
}
long pos = wire.bytes().writePosition();
try (DocumentContext dc = wire.writingDocument(false)) {
dc.wire().write("test").text("Hello World");
}
assertEquals(17, wire.bytes().readInt(pos));
// make it incomplete, note that the length is removed,
// since writing a length into an incomplete excerpt is not allowed
wire.bytes().writeInt(pos, Wires.NOT_COMPLETE);
assertEquals("--- !!meta-data #binary\n" + "header: !SCQStore {\n" + " wireType: !WireType BINARY,\n" + " writePosition: 309,\n" + " roll: !SCQSRoll {\n" + " length: !int 86400000,\n" + " format: yyyyMMdd,\n" + " epoch: 0\n" + " },\n" + " indexing: !SCQSIndexing {\n" + " indexCount: !int 32768,\n" + " indexSpacing: 32,\n" + " index2Index: 0,\n" + " lastIndex: 0\n" + " },\n" + " lastAcknowledgedIndexReplicated: 0\n" + "}\n" + "# position: 288, header: -1 or 0\n" + "--- !!not-ready-data! #binary\n" + "...\n" + "# 17 bytes remaining\n", Wires.fromSizePrefixedBlobs(bytes.readPosition(0)));
bytes.release();
try (SingleChronicleQueue queue = SingleChronicleQueueBuilder.binary(dir).testBlockSize().build()) {
queue.acquireAppender().writeText("Bye for now");
}
try {
IOTools.shallowDeleteDirWithFiles(dir.getAbsolutePath());
} catch (Exception e) {
e.printStackTrace();
}
}
use of net.openhft.chronicle.wire.DocumentContext in project Chronicle-Queue by OpenHFT.
the class MultiThreadedRollTest method test.
@Test(timeout = 10000)
public void test() throws ExecutionException, InterruptedException {
final SetTimeProvider timeProvider = new SetTimeProvider();
timeProvider.currentTimeMillis(1000);
final File path = DirectoryUtils.tempDir("MultiThreadedRollTest");
try (final RollingChronicleQueue wqueue = binary(path).testBlockSize().timeProvider(timeProvider).rollCycle(TEST_SECONDLY).build()) {
wqueue.acquireAppender().writeText("hello world");
try (final RollingChronicleQueue rqueue = binary(path).testBlockSize().timeProvider(timeProvider).rollCycle(TEST_SECONDLY).build()) {
ExcerptTailer tailer = rqueue.createTailer();
Future f = reader.submit(() -> {
long index;
do {
try (DocumentContext documentContext = tailer.readingDocument()) {
System.out.println("tailer.state: " + tailer.state());
// index is only meaningful if present.
index = documentContext.index();
// if (documentContext.isPresent())
final boolean present = documentContext.isPresent();
System.out.println("documentContext.isPresent=" + present + (present ? ",index=" + Long.toHexString(index) : ", no index"));
Jvm.pause(50);
}
} while (index != 0x200000000L && !reader.isShutdown());
});
timeProvider.currentTimeMillis(2000);
((SingleChronicleQueueExcerpts.StoreAppender) wqueue.acquireAppender()).writeEndOfCycleIfRequired();
Jvm.pause(200);
wqueue.acquireAppender().writeText("hello world");
f.get();
}
}
}
use of net.openhft.chronicle.wire.DocumentContext in project Chronicle-Queue by OpenHFT.
the class NotCompleteTest method testMessageLeftNotComplete.
@Test
public void testMessageLeftNotComplete() {
File tmpDir = DirectoryUtils.tempDir("testMessageLeftNotComplete");
try (final ChronicleQueue queue = binary(tmpDir).testBlockSize().rollCycle(RollCycles.TEST_DAILY).build()) {
ExcerptAppender appender = queue.acquireAppender().lazyIndexing(lazyIndexing);
// start a message which was not completed.
DocumentContext dc = appender.writingDocument();
dc.wire().write("some").text("data");
// didn't call dc.close();
}
final SingleChronicleQueue singleChronicleQueue = null;
try (final ChronicleQueue queue = binary(tmpDir).testBlockSize().build()) {
ExcerptTailer tailer = queue.createTailer();
try (DocumentContext dc = tailer.readingDocument()) {
assertFalse(dc.isPresent());
}
String expectedEager = "--- !!meta-data #binary\n" + "header: !SCQStore {\n" + " wireType: !WireType BINARY_LIGHT,\n" + " writePosition: [\n" + " 0,\n" + " 0\n" + " ],\n" + " roll: !SCQSRoll {\n" + " length: !int 86400000,\n" + " format: yyyyMMdd,\n" + " epoch: 0\n" + " },\n" + " indexing: !SCQSIndexing {\n" + " indexCount: 8,\n" + " indexSpacing: 1,\n" + " index2Index: 442,\n" + " lastIndex: 0\n" + " },\n" + " lastAcknowledgedIndexReplicated: -1,\n" + " recovery: !TimedStoreRecovery {\n" + " timeStamp: 0\n" + " },\n" + " deltaCheckpointInterval: 0,\n" + " lastIndexReplicated: -1,\n" + " sourceId: 0\n" + "}\n" + "# position: 442, header: -1\n" + "--- !!meta-data #binary\n" + "index2index: [\n" + " # length: 8, used: 1\n" + " 544,\n" + " 0, 0, 0, 0, 0, 0, 0\n" + "]\n" + "# position: 544, header: -1\n" + "--- !!meta-data #binary\n" + "index: [\n" + " # length: 8, used: 0\n" + " 0, 0, 0, 0, 0, 0, 0, 0\n" + "]\n" + "# position: 640, header: -1 or 0\n" + "--- !!not-ready-data! #binary\n" + "...\n" + "# 130428 bytes remaining\n";
String expectedLazy = "--- !!meta-data #binary\n" + "header: !SCQStore {\n" + " wireType: !WireType BINARY_LIGHT,\n" + " writePosition: [\n" + " 0,\n" + " 0\n" + " ],\n" + " roll: !SCQSRoll {\n" + " length: !int 86400000,\n" + " format: yyyyMMdd,\n" + " epoch: 0\n" + " },\n" + " indexing: !SCQSIndexing {\n" + " indexCount: 8,\n" + " indexSpacing: 1,\n" + " index2Index: 0,\n" + " lastIndex: 0\n" + " },\n" + " lastAcknowledgedIndexReplicated: -1,\n" + " recovery: !TimedStoreRecovery {\n" + " timeStamp: 0\n" + " },\n" + " deltaCheckpointInterval: 0,\n" + " lastIndexReplicated: -1,\n" + " sourceId: 0\n" + "}\n" + "# position: 442, header: -1 or 0\n" + "--- !!not-ready-data! #binary\n" + "...\n" + "# 130626 bytes remaining\n";
assertEquals(lazyIndexing ? expectedLazy : expectedEager, queue.dump());
}
try (final ChronicleQueue queue = binary(tmpDir).testBlockSize().timeoutMS(500).build()) {
ExcerptAppender appender = queue.acquireAppender();
try (DocumentContext dc = appender.writingDocument()) {
dc.wire().write("some").text("data");
}
String expectedEager = "--- !!meta-data #binary\n" + "header: !SCQStore {\n" + " wireType: !WireType BINARY_LIGHT,\n" + " writePosition: [\n" + " 33412,\n" + " 143503447293952\n" + " ],\n" + " roll: !SCQSRoll {\n" + " length: !int 86400000,\n" + " format: yyyyMMdd,\n" + " epoch: 0\n" + " },\n" + " indexing: !SCQSIndexing {\n" + " indexCount: 8,\n" + " indexSpacing: 1,\n" + " index2Index: 442,\n" + " lastIndex: 1\n" + " },\n" + " lastAcknowledgedIndexReplicated: -1,\n" + " recovery: !TimedStoreRecovery {\n" + " timeStamp: 0\n" + " },\n" + " deltaCheckpointInterval: 0,\n" + " lastIndexReplicated: -1,\n" + " sourceId: 0\n" + "}\n" + "# position: 442, header: -1\n" + "--- !!meta-data #binary\n" + "index2index: [\n" + " # length: 8, used: 1\n" + " 544,\n" + " 0, 0, 0, 0, 0, 0, 0\n" + "]\n" + "# position: 544, header: -1\n" + "--- !!meta-data #binary\n" + "index: [\n" + " # length: 8, used: 1\n" + " 33412,\n" + " 0, 0, 0, 0, 0, 0, 0\n" + "]\n" + "# position: 640, header: -1\n" + "--- !!meta-data #binary\n" + "\"!! Skipped due to recovery of locked header !!";
String expectedEagerFooter = "# position: 33412, header: 0\n" + "--- !!data #binary\n" + "some: data\n" + "...\n" + "# 97642 bytes remaining\n";
String expectedLazy = "--- !!meta-data #binary\n" + "header: !SCQStore {\n" + " wireType: !WireType BINARY_LIGHT,\n" + " writePosition: [\n" + " 33408,\n" + " 143486267424768\n" + " ],\n" + " roll: !SCQSRoll {\n" + " length: !int 86400000,\n" + " format: yyyyMMdd,\n" + " epoch: 0\n" + " },\n" + " indexing: !SCQSIndexing {\n" + " indexCount: 8,\n" + " indexSpacing: 1,\n" + " index2Index: 33212,\n" + " lastIndex: 1\n" + " },\n" + " lastAcknowledgedIndexReplicated: -1,\n" + " recovery: !TimedStoreRecovery {\n" + " timeStamp: 0\n" + " },\n" + " deltaCheckpointInterval: 0,\n" + " lastIndexReplicated: -1,\n" + " sourceId: 0\n" + "}\n" + "# position: 442, header: -1\n" + "--- !!meta-data #binary\n" + "\"!! Skipped due to recovery of locked header !!";
String expectedLazyFooter = "# position: 33212, header: -1\n" + "--- !!meta-data #binary\n" + "index2index: [\n" + " # length: 8, used: 1\n" + " 33312,\n" + " 0, 0, 0, 0, 0, 0, 0\n" + "]\n" + "# position: 33312, header: -1\n" + "--- !!meta-data #binary\n" + "index: [\n" + " # length: 8, used: 1\n" + " 33408,\n" + " 0, 0, 0, 0, 0, 0, 0\n" + "]\n" + "# position: 33408, header: 0\n" + "--- !!data #binary\n" + "some: data\n" + "...\n" + "# 97646 bytes remaining\n";
if (lazyIndexing) {
assertThat(queue.dump(), containsString(expectedLazy));
assertThat(queue.dump(), containsString(expectedLazyFooter));
} else {
assertThat(queue.dump(), containsString(expectedEager));
assertThat(queue.dump(), containsString(expectedEagerFooter));
}
}
}
use of net.openhft.chronicle.wire.DocumentContext in project Chronicle-Queue by OpenHFT.
the class NotCompleteTest method testInterruptedDuringSerialisation.
@Test
public void testInterruptedDuringSerialisation() throws InterruptedException {
final File tmpDir = DirectoryUtils.tempDir("testInterruptedDuringSerialisation_" + (lazyIndexing ? "lazy" : "eager"));
DirectoryUtils.deleteDir(tmpDir);
tmpDir.mkdirs();
final List<String> names = Collections.synchronizedList(new ArrayList<>());
final Person person1 = new Person(40, "Terry");
final Person interrupter = new Person(50, Person.INTERRUPT);
final Person thrower = new Person(80, Person.THROW);
final Person person2 = new Person(90, "Bert");
try (final ChronicleQueue queueReader = binary(tmpDir).testBlockSize().rollCycle(RollCycles.TEST_DAILY).timeoutMS(500).build();
final ChronicleQueue queueWriter = binary(tmpDir).testBlockSize().rollCycle(RollCycles.TEST_DAILY).build()) {
ExcerptTailer tailer = queueReader.createTailer();
MethodReader reader = tailer.methodReader((PersonListener) person -> names.add(person.name));
final StringBuilder queueDumpBeforeInterruptedWrite = new StringBuilder();
// set up
doWrite(queueWriter, (proxy, queue) -> {
proxy.accept(person1);
queueDumpBeforeInterruptedWrite.append(queue.dump());
});
String cleanedQueueDump = cleanQueueDump(queueDumpBeforeInterruptedWrite.toString());
// start up writer thread
Thread writerThread = new Thread(() -> doWrite(queueWriter, (proxy, queue) -> {
// thread is interrupted during this
proxy.accept(interrupter);
}));
writerThread.start();
writerThread.join();
try (final ChronicleQueue queue = binary(tmpDir).testBlockSize().rollCycle(RollCycles.TEST_DAILY).build()) {
String dump = cleanQueueDump(queue.dump());
assertEquals("queue should be unchanged by the interrupted write", cleanedQueueDump, dump);
}
// check only 1 written
assertTrue(reader.readOne());
assertEquals(1, names.size());
assertEquals(person1.name, names.get(0));
assertFalse(reader.readOne());
// do a write that throws an exception
doWrite(queueWriter, (proxy, queue) -> {
try {
proxy.accept(thrower);
} catch (NullPointerException npe) {
// ignore
}
});
try (final ChronicleQueue queue = binary(tmpDir).testBlockSize().rollCycle(RollCycles.TEST_DAILY).build()) {
String dump = cleanQueueDump(queue.dump());
if (lazyIndexing) {
// reading the queue creates the index, thus changing it, so do a text comparison here
cleanedQueueDump = "--- !!meta-data #binary\n" + "header: !SCQStore {\n" + " wireType: !WireType BINARY_LIGHT,\n" + " writePosition: [\n" + " 442,\n" + " 0\n" + " ],\n" + " roll: !SCQSRoll {\n" + " length: !int 86400000,\n" + " format: yyyyMMdd,\n" + " epoch: 0\n" + " },\n" + " indexing: !SCQSIndexing {\n" + " indexCount: 8,\n" + " indexSpacing: 1,\n" + " index2Index: 475,\n" + " lastIndex: 0\n" + " },\n" + " lastAcknowledgedIndexReplicated: -1,\n" + " recovery: !TimedStoreRecovery {\n" + " timeStamp: 0\n" + " },\n" + " deltaCheckpointInterval: 0,\n" + " lastIndexReplicated: -1,\n" + " sourceId: 0\n" + "}\n" + "# position: 442, header: 0\n" + "--- !!data #binary\n" + "accept: {\n" + " age: 40,\n" + " name: Terry\n" + "}\n" + "# position: 475, header: 0\n" + "--- !!meta-data #binary\n" + "index2index: [\n" + " # length: 8, used: 0\n" + " 0, 0, 0, 0, 0, 0, 0, 0\n" + "]\n" + "...\n" + "\n";
}
assertEquals("queue should be unchanged by the failed write", cleanedQueueDump, dump);
}
// check nothing else written
assertFalse(reader.readOne());
// do an empty write
ExcerptAppender appender = queueWriter.acquireAppender().lazyIndexing(lazyIndexing);
DocumentContext wd = appender.writingDocument();
wd.rollbackOnClose();
wd.close();
// check queue unchanged
String dump = cleanQueueDump(queueWriter.dump());
assertEquals("queue should be unchanged by the failed write", cleanedQueueDump, dump);
// check nothing else written
assertFalse(reader.readOne());
// write another person to same queue in this thread
doWrite(queueWriter, (proxy, queue) -> proxy.accept(person2));
assertTrue(reader.readOne());
assertEquals(2, names.size());
assertEquals(person2.name, names.get(1));
assertFalse(reader.readOne());
}
}
use of net.openhft.chronicle.wire.DocumentContext in project Chronicle-Queue by OpenHFT.
the class QueueInspectorTest method shouldIndicateNoProcessIdWhenDocumentIsComplete.
@Test
public void shouldIndicateNoProcessIdWhenDocumentIsComplete() throws IOException {
try (final SingleChronicleQueue queue = SingleChronicleQueueBuilder.binary(tmpDir.newFolder()).testBlockSize().build()) {
final QueueInspector inspector = new QueueInspector(queue);
final ExcerptAppender appender = queue.acquireAppender();
appender.writeDocument(37L, ValueOut::int64);
try (final DocumentContext ctx = appender.writingDocument()) {
ctx.wire().write("foo").int32(17L);
}
final int writingThreadId = inspector.getWritingThreadId();
assertThat(writingThreadId, is(not(OS.getProcessId())));
assertThat(QueueInspector.isValidThreadId(writingThreadId), is(false));
}
}
Aggregations