use of net.openhft.chronicle.bytes.MappedBytes in project Chronicle-Queue by OpenHFT.
the class TimedStoreRecovery method recoverAndWriteHeader.
@Override
public long recoverAndWriteHeader(@NotNull Wire wire, long timeoutMS, final LongValue lastPosition, Sequence sequence) throws UnrecoverableTimeoutException, EOFException {
Bytes<?> bytes = wire.bytes();
long offset = bytes.writePosition();
int num = bytes.readVolatileInt(offset);
// header number is only updated after successful write
final long targetHeaderNumber = wire.headerNumber() + 1;
String msgStart = "Unable to write a header at header number: 0x" + Long.toHexString(targetHeaderNumber) + " position: " + offset;
if (Wires.isNotComplete(num)) {
// TODO Determine what the safe size should be.
int sizeToSkip = 32 << 10;
if (bytes instanceof MappedBytes) {
MappedBytes mb = (MappedBytes) bytes;
sizeToSkip = Maths.toUInt31(mb.mappedFile().overlapSize() / 2);
}
// pad to a 4 byte word.
sizeToSkip = (sizeToSkip + 3) & ~3;
sizeToSkip -= (int) (offset & 3);
// clearing the start of the data so the meta data will look like 4 zero values with no event names.
long pos = bytes.writePosition();
try {
bytes.writeSkip(4);
final String debugMessage = "!! Skipped due to recovery of locked header !! By thread " + Thread.currentThread().getName() + ", pid " + OS.getProcessId();
wire.getValueOut().text(debugMessage);
final StringWriter stackVisitor = new StringWriter();
new RuntimeException().printStackTrace(new PrintWriter(stackVisitor));
final String stackTrace = stackVisitor.toString();
// ensure there is enough space to record a stack trace for debugging purposes
if (debugMessage.length() + stackTrace.length() + 16 < sizeToSkip) {
wire.getValueOut().text(stackTrace);
}
wire.addPadding(Math.toIntExact(sizeToSkip + (pos + 4) - bytes.writePosition()));
} finally {
bytes.writePosition(pos);
}
int emptyMetaData = Wires.META_DATA | sizeToSkip;
if (bytes.compareAndSwapInt(offset, num, emptyMetaData)) {
warn().on(getClass(), msgStart + " switching to a corrupt meta data message");
bytes.writeSkip(sizeToSkip + 4);
} else {
int num2 = bytes.readVolatileInt(offset);
warn().on(getClass(), msgStart + " already set to " + Integer.toHexString(num2));
}
} else {
warn().on(getClass(), msgStart + " but message now exists.");
}
try {
return wire.writeHeaderOfUnknownLength(timeoutMS, TimeUnit.MILLISECONDS, lastPosition, sequence);
} catch (TimeoutException e) {
warn().on(getClass(), e);
// Could happen if another thread recovers, writes 2 messages but the second one is corrupt.
return recoverAndWriteHeader(wire, timeoutMS, lastPosition, sequence);
} catch (EOFException e) {
throw new AssertionError(e);
}
}
use of net.openhft.chronicle.bytes.MappedBytes in project Chronicle-Queue by OpenHFT.
the class FsFullReadTest method testFullReadFs.
@Ignore("broken test")
@Test
public void testFullReadFs() throws Exception {
SingleChronicleQueue queue = SingleChronicleQueueBuilder.binary(basePath).blockSize(256 << 1000).rollCycle(RollCycles.DAILY).build();
ExcerptTailer tailer = queue.createTailer();
DocumentContext dc = tailer.readingDocument();
boolean doExit = false;
int entries = 0;
while (!doExit) {
try {
if (dc.isPresent()) {
entries++;
Wire w = dc.wire();
LocalDateTime dt = w.read().dateTime();
assertNotNull(dt);
byte[] b = w.read().bytes();
assertEquals(1024, b.length);
} else {
System.out.println("Exiting");
doExit = true;
}
} finally {
dc.close();
}
}
System.out.println(String.format("Read %d entries.", entries));
CommonStore commonStore = queue.storeForCycle(queue.cycle(), 0, false);
File file = commonStore.file();
queue.close();
int dumpEntries = 0;
try {
MappedBytes bytes = MappedBytes.mappedBytes(file, 4 << 20);
bytes.readLimit(bytes.realCapacity());
WireDumper dumper = WireDumper.of(bytes);
Bytes<ByteBuffer> buffer = Bytes.elasticByteBuffer();
while (bytes.readRemaining() >= 4) {
StringBuilder sb = new StringBuilder();
boolean last = dumper.dumpOne(sb, buffer);
assertTrue(sb.length() > 0);
if (last)
break;
dumpEntries++;
}
} catch (IOException ioe) {
err.println("Failed to read " + file + " " + ioe);
}
assertEquals(dumpEntries, entries);
}
use of net.openhft.chronicle.bytes.MappedBytes in project Chronicle-Queue by OpenHFT.
the class SingleCQFormatTest method testTwoMessages.
@Test
public void testTwoMessages() throws FileNotFoundException {
@NotNull File dir = new File(OS.TARGET + "/deleteme-" + System.nanoTime());
dir.mkdir();
@NotNull RollCycles cycle = RollCycles.TEST4_DAILY;
{
@NotNull MappedBytes mappedBytes = MappedBytes.mappedBytes(new File(dir, "19700102" + SingleChronicleQueue.SUFFIX), 64 << 10);
@NotNull Wire wire = new BinaryWire(mappedBytes);
try (DocumentContext dc = wire.writingDocument(true)) {
dc.wire().writeEventName(() -> "header").typedMarshallable(new SingleChronicleQueueStore(cycle, WireType.BINARY, mappedBytes, 0, cycle.defaultIndexCount(), cycle.defaultIndexSpacing(), new TimedStoreRecovery(WireType.BINARY), -1, 0));
}
try (DocumentContext dc = wire.writingDocument(false)) {
dc.wire().writeEventName("msg").text("Hello world");
}
try (DocumentContext dc = wire.writingDocument(false)) {
dc.wire().writeEventName("msg").text("Also hello world");
}
assertEquals("--- !!meta-data #binary\n" + "header: !SCQStore {\n" + " wireType: !WireType BINARY,\n" + " writePosition: [\n" + " 0,\n" + " 0\n" + " ],\n" + " roll: !SCQSRoll {\n" + " length: !int 86400000,\n" + " format: yyyyMMdd,\n" + " epoch: 0\n" + " },\n" + " indexing: !SCQSIndexing {\n" + " indexCount: 32,\n" + " indexSpacing: 4,\n" + " index2Index: 0,\n" + " lastIndex: 0\n" + " },\n" + " lastAcknowledgedIndexReplicated: -1,\n" + " recovery: !TimedStoreRecovery {\n" + " timeStamp: 0\n" + " },\n" + " deltaCheckpointInterval: !byte -1,\n" + " lastIndexReplicated: -1,\n" + " sourceId: 0\n" + "}\n" + "# position: 442, header: 0\n" + "--- !!data #binary\n" + "msg: Hello world\n" + "# position: 463, header: 1\n" + "--- !!data #binary\n" + "msg: Also hello world\n", Wires.fromSizePrefixedBlobs(mappedBytes.readPosition(0)));
mappedBytes.release();
}
@NotNull SingleChronicleQueue queue = binary(dir).rollCycle(cycle).testBlockSize().build();
@NotNull ExcerptTailer tailer = queue.createTailer();
readTwo(tailer);
tailer.toStart();
readTwo(tailer);
// TODO no direction
tailer.direction(TailerDirection.NONE).toStart();
long start = queue.firstIndex();
assertEquals(start, tailer.index());
expected(tailer, "msg: Hello world\n");
assertEquals(start, tailer.index());
expected(tailer, "msg: Hello world\n");
/* TODO FIX.
assertEquals(start + 1, queue.lastIndex());
tailer.direction(TailerDirection.BACKWARD).toEnd();
assertEquals(start + 1, tailer.index());
expected(tailer, "msg: Also hello world\n");
assertEquals(start, tailer.index());
expected(tailer, "msg: Hello world\n");
assertEquals(start - 1, tailer.index());
*/
queue.close();
try {
IOTools.shallowDeleteDirWithFiles(dir.getAbsolutePath());
} catch (Exception e) {
e.printStackTrace();
}
}
use of net.openhft.chronicle.bytes.MappedBytes in project Chronicle-Queue by OpenHFT.
the class DumpQueueMain method dumpFile.
private static void dumpFile(@NotNull File file, @NotNull PrintStream out, long upperLimit) {
if (file.getName().endsWith(SingleChronicleQueue.SUFFIX)) {
Bytes<ByteBuffer> buffer = Bytes.elasticByteBuffer();
try {
MappedBytes bytes = MappedBytes.mappedBytes(file, 4 << 20, OS.pageSize(), !OS.isWindows());
bytes.readLimit(bytes.realCapacity());
StringBuilder sb = new StringBuilder();
WireDumper dumper = WireDumper.of(bytes);
while (bytes.readRemaining() >= 4) {
sb.setLength(0);
boolean last = dumper.dumpOne(sb, buffer);
if (sb.indexOf("\nindex2index:") != -1 || sb.indexOf("\nindex:") != -1) {
// truncate trailing zeros
if (sb.indexOf(", 0\n]\n") == sb.length() - 6) {
int i = indexOfLastZero(sb);
if (i < sb.length())
sb.setLength(i - 5);
sb.append(" # truncated trailing zeros\n]");
}
}
out.println(sb);
if (last)
break;
if (bytes.readPosition() > upperLimit) {
out.println("# limit reached.");
return;
}
}
} catch (IOException ioe) {
err.println("Failed to read " + file + " " + ioe);
} finally {
buffer.release();
}
}
}
use of net.openhft.chronicle.bytes.MappedBytes in project Chronicle-Queue by OpenHFT.
the class ThroughputMain method main.
public static void main(String[] args) {
System.out.println("Testing with " + "-Dtime=" + time + " " + "-Dthreads=" + threads + " " + "-Dsize=" + size + " " + "-Dpath=" + path + " " + "-DfullWrite=" + fullWrite);
long start = System.nanoTime();
String base = path + "/delete-" + Time.uniqueId() + ".me.";
long blockSize = OS.is64Bit() ? OS.isLinux() ? 4L << 30 : 1L << 30 : 256L << 20;
AtomicLong count = new AtomicLong();
IntStream.range(0, threads).parallel().forEach(i -> {
long count2 = 0;
BytesStore<?, Void> nbs = BytesStore.nativeStoreWithFixedCapacity(size);
try (ChronicleQueue q = ChronicleQueue.singleBuilder(base + i).rollCycle(RollCycles.LARGE_HOURLY_XSPARSE).blockSize(blockSize).build()) {
ExcerptAppender appender = q.acquireAppender();
long lastIndex = -1;
do {
int defaultIndexSpacing = q.rollCycle().defaultIndexSpacing();
Wire wire = appender.wire();
int writeCount = (int) (defaultIndexSpacing - (lastIndex & (defaultIndexSpacing - 1)) - 1);
if (!fullWrite && wire != null && writeCount > 0) {
MappedBytes bytes = (MappedBytes) wire.bytes();
long address = bytes.addressForWrite(bytes.writePosition());
long bstart = bytes.start();
long bcap = bytes.realCapacity();
long canWrite = bcap - (bytes.writePosition() - bstart);
long lengthCount = writeMessages(address, canWrite, writeCount, nbs);
bytes.writeSkip((int) lengthCount);
lastIndex += lengthCount >> 32;
count2 += lengthCount >> 32;
} else {
try (DocumentContext dc = appender.writingDocument()) {
Wire wire2 = dc.wire();
wire2.bytes().write(nbs);
addToEndOfCache(wire2);
}
lastIndex = appender.lastIndexAppended();
count2++;
}
} while (start + time * 1e9 > System.nanoTime());
}
// System.out.println("... All data written, now reading ...");
nbs.releaseLast();
count.addAndGet(count2);
});
long time1 = System.nanoTime() - start;
Jvm.pause(1000);
System.gc();
long mid = System.nanoTime();
IntStream.range(0, threads).parallel().forEach(i -> {
Bytes bytes = Bytes.allocateElasticDirect(64);
try (ChronicleQueue q = ChronicleQueue.singleBuilder(base + i).rollCycle(RollCycles.LARGE_HOURLY_XSPARSE).blockSize(blockSize).build()) {
ExcerptTailer tailer = q.createTailer();
for (; ; ) {
try (DocumentContext dc = tailer.readingDocument()) {
if (!dc.isPresent())
break;
bytes.clear();
bytes.write(dc.wire().bytes());
}
}
}
bytes.releaseLast();
});
long end = System.nanoTime();
long time2 = end - mid;
System.out.printf("Writing %,d messages took %.3f seconds, at a rate of %,d per second%n", count.longValue(), time1 / 1e9, 1000 * (long) (1e6 * count.get() / time1));
System.out.printf("Reading %,d messages took %.3f seconds, at a rate of %,d per second%n", count.longValue(), time2 / 1e9, 1000 * (long) (1e6 * count.get() / time2));
Jvm.pause(200);
// make sure its cleaned up for windows to delete.
System.gc();
IntStream.range(0, threads).forEach(i -> IOTools.deleteDirWithFiles(base + i, 2));
}
Aggregations