use of io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW in project zilla by aklivity.
the class KafkaCachePartition method writeEntryFinish.
public void writeEntryFinish(ArrayFW<KafkaHeaderFW> headers, KafkaDeltaType deltaType) {
final Node head = sentinel.previous;
assert head != sentinel;
final KafkaCacheSegment headSegment = head.segment;
assert headSegment != null;
final KafkaCacheFile logFile = headSegment.logFile();
final KafkaCacheFile deltaFile = headSegment.deltaFile();
final KafkaCacheFile hashFile = headSegment.hashFile();
final KafkaCacheFile indexFile = headSegment.indexFile();
final int logAvailable = logFile.available();
final int logRequired = headers.sizeof();
assert logAvailable >= logRequired : String.format("%s %d >= %d", headSegment, logAvailable, logRequired);
logFile.appendBytes(headers);
final long offsetDelta = (int) (progress - headSegment.baseOffset());
final long indexEntry = (offsetDelta << 32) | logFile.markValue();
if (!headers.isEmpty()) {
final DirectBuffer buffer = headers.buffer();
final ByteBuffer byteBuffer = buffer.byteBuffer();
assert byteBuffer != null;
byteBuffer.clear();
headers.forEach(h -> {
final long hash = computeHash(h);
final long hashEntry = (hash << 32) | logFile.markValue();
hashFile.appendLong(hashEntry);
});
}
assert indexFile.available() >= Long.BYTES;
indexFile.appendLong(indexEntry);
final KafkaCacheEntryFW headEntry = logFile.readBytes(logFile.markValue(), headEntryRO::wrap);
if (deltaType == JSON_PATCH && ancestorEntry != null && ancestorEntry.valueLen() != -1 && headEntry.valueLen() != -1) {
final OctetsFW ancestorValue = ancestorEntry.value();
final OctetsFW headValue = headEntry.value();
assert headEntry.offset$() == progress;
final JsonProvider json = JsonProvider.provider();
ancestorIn.wrap(ancestorValue.buffer(), ancestorValue.offset(), ancestorValue.sizeof());
final JsonReader ancestorReader = json.createReader(ancestorIn);
final JsonStructure ancestorJson = ancestorReader.read();
ancestorReader.close();
headIn.wrap(headValue.buffer(), headValue.offset(), headValue.sizeof());
final JsonReader headReader = json.createReader(headIn);
final JsonStructure headJson = headReader.read();
headReader.close();
final JsonPatch diff = json.createDiff(ancestorJson, headJson);
final JsonArray diffJson = diff.toJsonArray();
diffOut.wrap(diffBuffer, Integer.BYTES);
final JsonWriter writer = json.createWriter(diffOut);
writer.write(diffJson);
writer.close();
// TODO: signal delta.sizeof > head.sizeof via null delta, otherwise delta file can exceed log file
final int deltaLength = diffOut.position();
diffBuffer.putInt(0, deltaLength);
deltaFile.appendBytes(diffBuffer, 0, Integer.BYTES + deltaLength);
}
headSegment.lastOffset(progress);
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW in project zilla by aklivity.
the class KafkaCacheClientFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit());
final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ? kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null;
MessageConsumer newStream = null;
if (kafkaBeginEx != null) {
final BindingHandler factory = factories.get(kafkaBeginEx.kind());
if (factory != null) {
newStream = factory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender);
}
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW in project zilla by aklivity.
the class KafkaCacheClientFetchFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::tryWrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap);
assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_FETCH;
final KafkaFetchBeginExFW kafkaFetchBeginEx = kafkaBeginEx.fetch();
final String16FW beginTopic = kafkaFetchBeginEx.topic();
final KafkaOffsetFW progress = kafkaFetchBeginEx.partition();
final ArrayFW<KafkaFilterFW> filters = kafkaFetchBeginEx.filters();
final KafkaDeltaType deltaType = kafkaFetchBeginEx.deltaType().get();
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final int partitionId = progress.partitionId();
final long partitionOffset = progress.partitionOffset();
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final long partitionKey = cacheRoute.topicPartitionKey(topicName, partitionId);
KafkaCacheClientFetchFanout fanout = cacheRoute.clientFetchFanoutsByTopicPartition.get(partitionKey);
if (fanout == null) {
final String cacheName = String.format("%s.%s", supplyNamespace.apply(resolvedId), supplyLocalName.apply(resolvedId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic topic = cache.supplyTopic(topicName);
final KafkaCachePartition partition = topic.supplyFetchPartition(partitionId);
final long defaultOffset = resolved.with != null ? resolved.with.defaultOffset.value() : KafkaOffsetType.HISTORICAL.value();
final KafkaCacheClientFetchFanout newFanout = new KafkaCacheClientFetchFanout(resolvedId, authorization, affinity, partition, defaultOffset);
cacheRoute.clientFetchFanoutsByTopicPartition.put(partitionKey, newFanout);
fanout = newFanout;
}
final KafkaFilterCondition condition = cursorFactory.asCondition(filters);
final long latestOffset = kafkaFetchBeginEx.partition().latestOffset();
final KafkaOffsetType maximumOffset = KafkaOffsetType.valueOf((byte) latestOffset);
final int leaderId = cacheRoute.leadersByPartitionId.get(partitionId);
newStream = new KafkaCacheClientFetchStream(fanout, sender, routeId, initialId, leaderId, authorization, partitionOffset, condition, maximumOffset, deltaType)::onClientMessage;
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW in project zilla by aklivity.
the class KafkaCacheServerProduceFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::wrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap);
final KafkaProduceBeginExFW kafkaProduceBeginEx = kafkaBeginEx.produce();
final String16FW beginTopic = kafkaProduceBeginEx.topic();
final int partitionId = kafkaProduceBeginEx.partition().partitionId();
final int remoteIndex = supplyRemoteIndex.applyAsInt(initialId);
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final long partitionKey = cacheRoute.topicPartitionKey(topicName, partitionId);
KafkaCacheServerProduceFan fan = cacheRoute.serverProduceFansByTopicPartition.get(partitionKey);
if (fan == null) {
final KafkaCacheServerProduceFan newFan = new KafkaCacheServerProduceFan(resolvedId, authorization, affinity, partitionId, topicName);
cacheRoute.serverProduceFansByTopicPartition.put(partitionKey, newFan);
fan = newFan;
}
final int leaderId = cacheRoute.leadersByPartitionId.get(partitionId);
final String cacheName = String.format("%s.%s", supplyNamespace.apply(routeId), supplyLocalName.apply(routeId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic topic = cache.supplyTopic(topicName);
final KafkaCachePartition partition = topic.supplyProducePartition(partitionId, remoteIndex);
newStream = new KafkaCacheServerProduceStream(fan, sender, routeId, initialId, leaderId, authorization, partition)::onServerMessage;
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.types.OctetsFW in project zilla by aklivity.
the class KafkaCacheServerFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::tryWrap);
assert beginEx != null;
final int typeId = beginEx.typeId();
assert beginEx != null && typeId == kafkaTypeId;
MessageConsumer newStream = null;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap);
if (kafkaBeginEx != null) {
final BindingHandler streamFactory = factories.get(kafkaBeginEx.kind());
if (streamFactory != null) {
newStream = streamFactory.newStream(begin.typeId(), begin.buffer(), begin.offset(), begin.sizeof(), sender);
}
}
return newStream;
}
Aggregations