use of io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW in project zilla by aklivity.
the class KafkaCacheClientFetchFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::tryWrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap);
assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_FETCH;
final KafkaFetchBeginExFW kafkaFetchBeginEx = kafkaBeginEx.fetch();
final String16FW beginTopic = kafkaFetchBeginEx.topic();
final KafkaOffsetFW progress = kafkaFetchBeginEx.partition();
final ArrayFW<KafkaFilterFW> filters = kafkaFetchBeginEx.filters();
final KafkaDeltaType deltaType = kafkaFetchBeginEx.deltaType().get();
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final int partitionId = progress.partitionId();
final long partitionOffset = progress.partitionOffset();
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final long partitionKey = cacheRoute.topicPartitionKey(topicName, partitionId);
KafkaCacheClientFetchFanout fanout = cacheRoute.clientFetchFanoutsByTopicPartition.get(partitionKey);
if (fanout == null) {
final String cacheName = String.format("%s.%s", supplyNamespace.apply(resolvedId), supplyLocalName.apply(resolvedId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic topic = cache.supplyTopic(topicName);
final KafkaCachePartition partition = topic.supplyFetchPartition(partitionId);
final long defaultOffset = resolved.with != null ? resolved.with.defaultOffset.value() : KafkaOffsetType.HISTORICAL.value();
final KafkaCacheClientFetchFanout newFanout = new KafkaCacheClientFetchFanout(resolvedId, authorization, affinity, partition, defaultOffset);
cacheRoute.clientFetchFanoutsByTopicPartition.put(partitionKey, newFanout);
fanout = newFanout;
}
final KafkaFilterCondition condition = cursorFactory.asCondition(filters);
final long latestOffset = kafkaFetchBeginEx.partition().latestOffset();
final KafkaOffsetType maximumOffset = KafkaOffsetType.valueOf((byte) latestOffset);
final int leaderId = cacheRoute.leadersByPartitionId.get(partitionId);
newStream = new KafkaCacheClientFetchStream(fanout, sender, routeId, initialId, leaderId, authorization, partitionOffset, condition, maximumOffset, deltaType)::onClientMessage;
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW in project zilla by aklivity.
the class KafkaCacheServerProduceFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::wrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap);
final KafkaProduceBeginExFW kafkaProduceBeginEx = kafkaBeginEx.produce();
final String16FW beginTopic = kafkaProduceBeginEx.topic();
final int partitionId = kafkaProduceBeginEx.partition().partitionId();
final int remoteIndex = supplyRemoteIndex.applyAsInt(initialId);
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final long partitionKey = cacheRoute.topicPartitionKey(topicName, partitionId);
KafkaCacheServerProduceFan fan = cacheRoute.serverProduceFansByTopicPartition.get(partitionKey);
if (fan == null) {
final KafkaCacheServerProduceFan newFan = new KafkaCacheServerProduceFan(resolvedId, authorization, affinity, partitionId, topicName);
cacheRoute.serverProduceFansByTopicPartition.put(partitionKey, newFan);
fan = newFan;
}
final int leaderId = cacheRoute.leadersByPartitionId.get(partitionId);
final String cacheName = String.format("%s.%s", supplyNamespace.apply(routeId), supplyLocalName.apply(routeId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic topic = cache.supplyTopic(topicName);
final KafkaCachePartition partition = topic.supplyProducePartition(partitionId, remoteIndex);
newStream = new KafkaCacheServerProduceStream(fan, sender, routeId, initialId, leaderId, authorization, partition)::onServerMessage;
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW in project zilla by aklivity.
the class KafkaClientDescribeFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer application) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit());
final KafkaBeginExFW kafkaBeginEx = beginEx != null && beginEx.typeId() == kafkaTypeId ? kafkaBeginExRO.tryWrap(extension.buffer(), extension.offset(), extension.limit()) : null;
assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_DESCRIBE;
final KafkaDescribeBeginExFW kafkaDescribeBeginEx = kafkaBeginEx.describe();
final String16FW beginTopic = kafkaDescribeBeginEx.topic();
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final List<String> configs = new ArrayList<>();
kafkaDescribeBeginEx.configs().forEach(c -> configs.add(c.asString()));
newStream = new KafkaDescribeStream(application, routeId, initialId, affinity, resolvedId, topicName, configs)::onApplication;
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW in project zilla by aklivity.
the class KafkaClientProduceFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer application) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit());
final KafkaBeginExFW kafkaBeginEx = beginEx.typeId() == kafkaTypeId ? extension.get(kafkaBeginExRO::wrap) : null;
assert kafkaBeginEx == null || kafkaBeginEx.kind() == KafkaBeginExFW.KIND_PRODUCE;
final KafkaProduceBeginExFW kafkaProduceBeginEx = kafkaBeginEx != null ? kafkaBeginEx.produce() : null;
MessageConsumer newStream = null;
if (kafkaProduceBeginEx != null) {
final String16FW beginTopic = kafkaProduceBeginEx.topic();
final String topicName = beginTopic.asString();
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null && kafkaBeginEx != null) {
final long resolvedId = resolved.id;
final int partitionId = kafkaProduceBeginEx.partition().partitionId();
newStream = new KafkaProduceStream(application, routeId, initialId, affinity, resolvedId, topicName, partitionId)::onApplication;
}
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.types.String16FW in project zilla by aklivity.
the class KafkaClientFetchFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer application) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long leaderId = begin.affinity();
final long authorization = begin.authorization();
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extensionRO.tryWrap(extension.buffer(), extension.offset(), extension.limit());
final KafkaBeginExFW kafkaBeginEx = beginEx.typeId() == kafkaTypeId ? extension.get(kafkaBeginExRO::wrap) : null;
assert kafkaBeginEx == null || kafkaBeginEx.kind() == KafkaBeginExFW.KIND_FETCH;
final KafkaFetchBeginExFW kafkaFetchBeginEx = kafkaBeginEx != null ? kafkaBeginEx.fetch() : null;
MessageConsumer newStream = null;
if (beginEx != null && kafkaFetchBeginEx != null && kafkaFetchBeginEx.filters().isEmpty()) {
final String16FW beginTopic = kafkaFetchBeginEx.topic();
final String topicName = beginTopic.asString();
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final KafkaOffsetFW partition = kafkaFetchBeginEx.partition();
final int partitionId = partition.partitionId();
final long initialOffset = partition.partitionOffset();
final long latestOffset = partition.latestOffset();
newStream = new KafkaFetchStream(application, routeId, initialId, resolvedId, topicName, partitionId, latestOffset, leaderId, initialOffset)::onApplication;
}
}
return newStream;
}
Aggregations