use of io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFetchBeginExFW in project zilla by aklivity.
the class KafkaCacheClientFetchFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::tryWrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap);
assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_FETCH;
final KafkaFetchBeginExFW kafkaFetchBeginEx = kafkaBeginEx.fetch();
final String16FW beginTopic = kafkaFetchBeginEx.topic();
final KafkaOffsetFW progress = kafkaFetchBeginEx.partition();
final ArrayFW<KafkaFilterFW> filters = kafkaFetchBeginEx.filters();
final KafkaDeltaType deltaType = kafkaFetchBeginEx.deltaType().get();
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final int partitionId = progress.partitionId();
final long partitionOffset = progress.partitionOffset();
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final long partitionKey = cacheRoute.topicPartitionKey(topicName, partitionId);
KafkaCacheClientFetchFanout fanout = cacheRoute.clientFetchFanoutsByTopicPartition.get(partitionKey);
if (fanout == null) {
final String cacheName = String.format("%s.%s", supplyNamespace.apply(resolvedId), supplyLocalName.apply(resolvedId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic topic = cache.supplyTopic(topicName);
final KafkaCachePartition partition = topic.supplyFetchPartition(partitionId);
final long defaultOffset = resolved.with != null ? resolved.with.defaultOffset.value() : KafkaOffsetType.HISTORICAL.value();
final KafkaCacheClientFetchFanout newFanout = new KafkaCacheClientFetchFanout(resolvedId, authorization, affinity, partition, defaultOffset);
cacheRoute.clientFetchFanoutsByTopicPartition.put(partitionKey, newFanout);
fanout = newFanout;
}
final KafkaFilterCondition condition = cursorFactory.asCondition(filters);
final long latestOffset = kafkaFetchBeginEx.partition().latestOffset();
final KafkaOffsetType maximumOffset = KafkaOffsetType.valueOf((byte) latestOffset);
final int leaderId = cacheRoute.leadersByPartitionId.get(partitionId);
newStream = new KafkaCacheClientFetchStream(fanout, sender, routeId, initialId, leaderId, authorization, partitionOffset, condition, maximumOffset, deltaType)::onClientMessage;
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFetchBeginExFW in project zilla by aklivity.
the class KafkaClientFetchFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer application) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long leaderId = begin.affinity();
final long authorization = begin.authorization();
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit());
final KafkaBeginExFW kafkaBeginEx = beginEx.typeId() == kafkaTypeId ? extension.get(kafkaBeginExRO::wrap) : null;
assert kafkaBeginEx == null || kafkaBeginEx.kind() == KafkaBeginExFW.KIND_FETCH;
final KafkaFetchBeginExFW kafkaFetchBeginEx = kafkaBeginEx != null ? kafkaBeginEx.fetch() : null;
MessageConsumer newStream = null;
if (beginEx != null && kafkaFetchBeginEx != null && kafkaFetchBeginEx.filters().isEmpty()) {
final String16FW beginTopic = kafkaFetchBeginEx.topic();
final String topicName = beginTopic.asString();
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final KafkaOffsetFW partition = kafkaFetchBeginEx.partition();
final int partitionId = partition.partitionId();
final long initialOffset = partition.partitionOffset();
final long latestOffset = partition.latestOffset();
newStream = new KafkaFetchStream(application, routeId, initialId, resolvedId, topicName, partitionId, latestOffset, leaderId, initialOffset)::onApplication;
}
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.types.stream.KafkaFetchBeginExFW in project zilla by aklivity.
the class KafkaCacheServerFetchFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::wrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap);
final KafkaFetchBeginExFW kafkaFetchBeginEx = kafkaBeginEx.fetch();
final String16FW beginTopic = kafkaFetchBeginEx.topic();
final KafkaOffsetFW progress = kafkaFetchBeginEx.partition();
final int partitionId = progress.partitionId();
final long partitionOffset = progress.partitionOffset();
final KafkaDeltaType deltaType = kafkaFetchBeginEx.deltaType().get();
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final long partitionKey = cacheRoute.topicPartitionKey(topicName, partitionId);
KafkaCacheServerFetchFanout fanout = cacheRoute.serverFetchFanoutsByTopicPartition.get(partitionKey);
if (fanout == null) {
final KafkaTopicConfig topic = binding.topic(topicName);
final KafkaDeltaType routeDeltaType = topic != null ? topic.deltaType : deltaType;
final KafkaOffsetType defaultOffset = topic != null ? topic.defaultOffset : HISTORICAL;
final String cacheName = String.format("%s.%s", supplyNamespace.apply(routeId), supplyLocalName.apply(routeId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic cacheTopic = cache.supplyTopic(topicName);
final KafkaCachePartition partition = cacheTopic.supplyFetchPartition(partitionId);
final KafkaCacheServerFetchFanout newFanout = new KafkaCacheServerFetchFanout(resolvedId, authorization, affinity, partition, routeDeltaType, defaultOffset);
cacheRoute.serverFetchFanoutsByTopicPartition.put(partitionKey, newFanout);
fanout = newFanout;
}
final int leaderId = cacheRoute.leadersByPartitionId.get(partitionId);
newStream = new KafkaCacheServerFetchStream(fanout, sender, routeId, initialId, leaderId, authorization, partitionOffset)::onServerMessage;
}
return newStream;
}
Aggregations