use of io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig in project zilla by aklivity.
the class KafkaCacheServerDescribeFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long authorization = begin.authorization();
final long affinity = begin.affinity();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::tryWrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::tryWrap);
assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_DESCRIBE;
final KafkaDescribeBeginExFW kafkaDescribeBeginEx = kafkaBeginEx.describe();
final String16FW beginTopic = kafkaDescribeBeginEx.topic();
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final int topicKey = cacheRoute.topicKey(topicName);
KafkaCacheServerDescribeFanout fanout = cacheRoute.serverDescribeFanoutsByTopic.get(topicKey);
if (fanout == null) {
final String cacheName = String.format("%s.%s", supplyNamespace.apply(routeId), supplyLocalName.apply(routeId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic topic = cache.supplyTopic(topicName);
final List<String> configNames = new ArrayList<>();
kafkaDescribeBeginEx.configs().forEach(c -> configNames.add(c.asString()));
final KafkaCacheServerDescribeFanout newFanout = new KafkaCacheServerDescribeFanout(resolvedId, authorization, topic, configNames);
cacheRoute.serverDescribeFanoutsByTopic.put(topicKey, newFanout);
fanout = newFanout;
}
if (fanout != null) {
newStream = new KafkaCacheServerDescribeStream(fanout, sender, routeId, initialId, affinity, authorization)::onDescribeMessage;
}
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.config.KafkaBindingConfig in project zilla by aklivity.
the class KafkaMergedFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long authorization = begin.authorization();
final long affinity = begin.affinity();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit());
final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ? kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null;
assert kafkaBeginEx != null;
assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_MERGED;
final KafkaMergedBeginExFW kafkaMergedBeginEx = kafkaBeginEx.merged();
final KafkaCapabilities capabilities = kafkaMergedBeginEx.capabilities().get();
final String16FW beginTopic = kafkaMergedBeginEx.topic();
final String topicName = beginTopic.asString();
final KafkaDeltaType deltaType = kafkaMergedBeginEx.deltaType().get();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
if (binding != null && binding.merged(topicName)) {
final long resolvedId = routeId;
final ArrayFW<KafkaOffsetFW> partitions = kafkaMergedBeginEx.partitions();
final KafkaOffsetFW partition = partitions.matchFirst(p -> p.partitionId() == -1L);
final long defaultOffset = partition != null ? partition.partitionOffset() : HISTORICAL.value();
final Long2LongHashMap initialOffsetsById = new Long2LongHashMap(-3L);
partitions.forEach(p -> {
final long partitionId = p.partitionId();
if (partitionId >= 0L) {
final long partitionOffset = p.partitionOffset();
initialOffsetsById.put(partitionId, partitionOffset);
}
});
newStream = new KafkaMergedStream(sender, routeId, initialId, affinity, authorization, topicName, resolvedId, capabilities, initialOffsetsById, defaultOffset, deltaType)::onMergedMessage;
}
return newStream;
}
Aggregations