use of io.aklivity.zilla.runtime.binding.kafka.internal.cache.KafkaCacheTopic in project zilla by aklivity.
the class CacheFetchIT method initPartition.
@Before
public void initPartition() {
final KafkaBinding binding = engine.binding(KafkaBinding.class);
final KafkaCache cache = binding.supplyCache("test.cache0");
final KafkaCacheTopic topic = cache.supplyTopic("test");
this.partition = topic.supplyFetchPartition(0);
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.cache.KafkaCacheTopic in project zilla by aklivity.
the class KafkaCacheClientFetchFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::tryWrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap);
assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_FETCH;
final KafkaFetchBeginExFW kafkaFetchBeginEx = kafkaBeginEx.fetch();
final String16FW beginTopic = kafkaFetchBeginEx.topic();
final KafkaOffsetFW progress = kafkaFetchBeginEx.partition();
final ArrayFW<KafkaFilterFW> filters = kafkaFetchBeginEx.filters();
final KafkaDeltaType deltaType = kafkaFetchBeginEx.deltaType().get();
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final int partitionId = progress.partitionId();
final long partitionOffset = progress.partitionOffset();
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final long partitionKey = cacheRoute.topicPartitionKey(topicName, partitionId);
KafkaCacheClientFetchFanout fanout = cacheRoute.clientFetchFanoutsByTopicPartition.get(partitionKey);
if (fanout == null) {
final String cacheName = String.format("%s.%s", supplyNamespace.apply(resolvedId), supplyLocalName.apply(resolvedId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic topic = cache.supplyTopic(topicName);
final KafkaCachePartition partition = topic.supplyFetchPartition(partitionId);
final long defaultOffset = resolved.with != null ? resolved.with.defaultOffset.value() : KafkaOffsetType.HISTORICAL.value();
final KafkaCacheClientFetchFanout newFanout = new KafkaCacheClientFetchFanout(resolvedId, authorization, affinity, partition, defaultOffset);
cacheRoute.clientFetchFanoutsByTopicPartition.put(partitionKey, newFanout);
fanout = newFanout;
}
final KafkaFilterCondition condition = cursorFactory.asCondition(filters);
final long latestOffset = kafkaFetchBeginEx.partition().latestOffset();
final KafkaOffsetType maximumOffset = KafkaOffsetType.valueOf((byte) latestOffset);
final int leaderId = cacheRoute.leadersByPartitionId.get(partitionId);
newStream = new KafkaCacheClientFetchStream(fanout, sender, routeId, initialId, leaderId, authorization, partitionOffset, condition, maximumOffset, deltaType)::onClientMessage;
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.cache.KafkaCacheTopic in project zilla by aklivity.
the class KafkaCacheServerProduceFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::wrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap);
final KafkaProduceBeginExFW kafkaProduceBeginEx = kafkaBeginEx.produce();
final String16FW beginTopic = kafkaProduceBeginEx.topic();
final int partitionId = kafkaProduceBeginEx.partition().partitionId();
final int remoteIndex = supplyRemoteIndex.applyAsInt(initialId);
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final long partitionKey = cacheRoute.topicPartitionKey(topicName, partitionId);
KafkaCacheServerProduceFan fan = cacheRoute.serverProduceFansByTopicPartition.get(partitionKey);
if (fan == null) {
final KafkaCacheServerProduceFan newFan = new KafkaCacheServerProduceFan(resolvedId, authorization, affinity, partitionId, topicName);
cacheRoute.serverProduceFansByTopicPartition.put(partitionKey, newFan);
fan = newFan;
}
final int leaderId = cacheRoute.leadersByPartitionId.get(partitionId);
final String cacheName = String.format("%s.%s", supplyNamespace.apply(routeId), supplyLocalName.apply(routeId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic topic = cache.supplyTopic(topicName);
final KafkaCachePartition partition = topic.supplyProducePartition(partitionId, remoteIndex);
newStream = new KafkaCacheServerProduceStream(fan, sender, routeId, initialId, leaderId, authorization, partition)::onServerMessage;
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.cache.KafkaCacheTopic in project zilla by aklivity.
the class KafkaCacheClientProduceFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long affinity = begin.affinity();
final long authorization = begin.authorization();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::tryWrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap);
assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_PRODUCE;
final KafkaProduceBeginExFW kafkaProduceBeginEx = kafkaBeginEx.produce();
final String16FW beginTopic = kafkaProduceBeginEx.topic();
final int partitionId = kafkaProduceBeginEx.partition().partitionId();
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final long topicKey = cacheRoute.topicKey(topicName);
final long partitionKey = cacheRoute.topicPartitionKey(topicName, partitionId);
KafkaCacheClientProduceFan fan = cacheRoute.clientProduceFansByTopicPartition.get(partitionKey);
if (fan == null) {
KafkaCacheClientBudget budget = cacheRoute.clientBudgetsByTopic.get(topicKey);
if (budget == null) {
budget = new KafkaCacheClientBudget(creditor, supplyBudgetId.getAsLong(), bufferPool.slotCapacity());
cacheRoute.clientBudgetsByTopic.put(topicKey, budget);
}
final String cacheName = String.format("%s.%s", supplyNamespace.apply(resolvedId), supplyLocalName.apply(resolvedId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic topic = cache.supplyTopic(topicName);
final KafkaCachePartition partition = topic.supplyProducePartition(partitionId, localIndex);
final KafkaCacheClientProduceFan newFan = new KafkaCacheClientProduceFan(resolvedId, authorization, affinity, budget, partition);
cacheRoute.clientProduceFansByTopicPartition.put(partitionKey, newFan);
fan = newFan;
}
final int leaderId = cacheRoute.leadersByPartitionId.get(partitionId);
newStream = new KafkaCacheClientProduceStream(fan, sender, routeId, initialId, leaderId, authorization)::onClientMessage;
}
return newStream;
}
use of io.aklivity.zilla.runtime.binding.kafka.internal.cache.KafkaCacheTopic in project zilla by aklivity.
the class KafkaCacheMetaFactory method newStream.
@Override
public MessageConsumer newStream(int msgTypeId, DirectBuffer buffer, int index, int length, MessageConsumer sender) {
final BeginFW begin = beginRO.wrap(buffer, index, index + length);
final long routeId = begin.routeId();
final long initialId = begin.streamId();
final long authorization = begin.authorization();
final long affinity = begin.affinity();
assert (initialId & 0x0000_0000_0000_0001L) != 0L;
final OctetsFW extension = begin.extension();
final ExtensionFW beginEx = extension.get(extensionRO::tryWrap);
assert beginEx != null && beginEx.typeId() == kafkaTypeId;
final KafkaBeginExFW kafkaBeginEx = extension.get(kafkaBeginExRO::wrap);
assert kafkaBeginEx.kind() == KafkaBeginExFW.KIND_META;
final KafkaMetaBeginExFW kafkaMetaBeginEx = kafkaBeginEx.meta();
final String16FW beginTopic = kafkaMetaBeginEx.topic();
final String topicName = beginTopic.asString();
MessageConsumer newStream = null;
final KafkaBindingConfig binding = supplyBinding.apply(routeId);
final KafkaRouteConfig resolved = binding != null ? binding.resolve(authorization, topicName) : null;
if (resolved != null) {
final long resolvedId = resolved.id;
final KafkaCacheRoute cacheRoute = supplyCacheRoute.apply(resolvedId);
final int topicKey = cacheRoute.topicKey(topicName);
KafkaCacheMetaFanout fanout = cacheRoute.metaFanoutsByTopic.get(topicKey);
if (fanout == null) {
final long cacheId = supplyCacheId.applyAsLong(routeId, resolvedId);
final String cacheName = String.format("%s.%s", supplyNamespace.apply(cacheId), supplyLocalName.apply(cacheId));
final KafkaCache cache = supplyCache.apply(cacheName);
final KafkaCacheTopic topic = cache.supplyTopic(topicName);
final KafkaCacheMetaFanout newFanout = new KafkaCacheMetaFanout(resolvedId, authorization, topic);
cacheRoute.metaFanoutsByTopic.put(topicKey, newFanout);
fanout = newFanout;
}
if (fanout != null) {
newStream = new KafkaCacheMetaStream(fanout, sender, routeId, initialId, affinity, authorization)::onMetaMessage;
}
}
return newStream;
}
Aggregations