use of io.aklivity.zilla.runtime.binding.kafka.internal.types.cache.KafkaCacheEntryFW in project zilla by aklivity.
the class KafkaCachePartition method writeEntryFinish.
public void writeEntryFinish(ArrayFW<KafkaHeaderFW> headers, KafkaDeltaType deltaType) {
final Node head = sentinel.previous;
assert head != sentinel;
final KafkaCacheSegment headSegment = head.segment;
assert headSegment != null;
final KafkaCacheFile logFile = headSegment.logFile();
final KafkaCacheFile deltaFile = headSegment.deltaFile();
final KafkaCacheFile hashFile = headSegment.hashFile();
final KafkaCacheFile indexFile = headSegment.indexFile();
final int logAvailable = logFile.available();
final int logRequired = headers.sizeof();
assert logAvailable >= logRequired : String.format("%s %d >= %d", headSegment, logAvailable, logRequired);
logFile.appendBytes(headers);
final long offsetDelta = (int) (progress - headSegment.baseOffset());
final long indexEntry = (offsetDelta << 32) | logFile.markValue();
if (!headers.isEmpty()) {
final DirectBuffer buffer = headers.buffer();
final ByteBuffer byteBuffer = buffer.byteBuffer();
assert byteBuffer != null;
byteBuffer.clear();
headers.forEach(h -> {
final long hash = computeHash(h);
final long hashEntry = (hash << 32) | logFile.markValue();
hashFile.appendLong(hashEntry);
});
}
assert indexFile.available() >= Long.BYTES;
indexFile.appendLong(indexEntry);
final KafkaCacheEntryFW headEntry = logFile.readBytes(logFile.markValue(), headEntryRO::wrap);
if (deltaType == JSON_PATCH && ancestorEntry != null && ancestorEntry.valueLen() != -1 && headEntry.valueLen() != -1) {
final OctetsFW ancestorValue = ancestorEntry.value();
final OctetsFW headValue = headEntry.value();
assert headEntry.offset$() == progress;
final JsonProvider json = JsonProvider.provider();
ancestorIn.wrap(ancestorValue.buffer(), ancestorValue.offset(), ancestorValue.sizeof());
final JsonReader ancestorReader = json.createReader(ancestorIn);
final JsonStructure ancestorJson = ancestorReader.read();
ancestorReader.close();
headIn.wrap(headValue.buffer(), headValue.offset(), headValue.sizeof());
final JsonReader headReader = json.createReader(headIn);
final JsonStructure headJson = headReader.read();
headReader.close();
final JsonPatch diff = json.createDiff(ancestorJson, headJson);
final JsonArray diffJson = diff.toJsonArray();
diffOut.wrap(diffBuffer, Integer.BYTES);
final JsonWriter writer = json.createWriter(diffOut);
writer.write(diffJson);
writer.close();
// TODO: signal delta.sizeof > head.sizeof via null delta, otherwise delta file can exceed log file
final int deltaLength = diffOut.position();
diffBuffer.putInt(0, deltaLength);
deltaFile.appendBytes(diffBuffer, 0, Integer.BYTES + deltaLength);
}
headSegment.lastOffset(progress);
}
Aggregations