use of io.streamnative.pulsar.handlers.kop.coordinator.transaction.TransactionCoordinator in project starlight-for-kafka by datastax.
the class KafkaProtocolHandler method initTransactionCoordinator.
public TransactionCoordinator initTransactionCoordinator(String tenant, PulsarAdmin pulsarAdmin, ClusterData clusterData) throws Exception {
TransactionConfig transactionConfig = TransactionConfig.builder().transactionLogNumPartitions(kafkaConfig.getKafkaTxnLogTopicNumPartitions()).transactionMetadataTopicName(MetadataUtils.constructTxnLogTopicBaseName(tenant, kafkaConfig)).transactionProducerIdTopicName(MetadataUtils.constructTxnProducerIdTopicBaseName(tenant, kafkaConfig)).abortTimedOutTransactionsIntervalMs(kafkaConfig.getKafkaTxnAbortTimedOutTransactionCleanupIntervalMs()).transactionalIdExpirationMs(kafkaConfig.getKafkaTransactionalIdExpirationMs()).removeExpiredTransactionalIdsIntervalMs(kafkaConfig.getKafkaTransactionsRemoveExpiredTransactionalIdCleanupIntervalMs()).brokerId(kafkaConfig.getKafkaBrokerId()).build();
MetadataUtils.createTxnMetadataIfMissing(tenant, pulsarAdmin, clusterData, kafkaConfig);
TransactionCoordinator transactionCoordinator = TransactionCoordinator.of(tenant, kafkaConfig, transactionConfig, txnTopicClient, brokerService.getPulsar().getLocalMetadataStore(), kopBrokerLookupManager, OrderedScheduler.newSchedulerBuilder().name("transaction-log-manager-" + tenant).numThreads(1).build(), Time.SYSTEM);
transactionCoordinator.startup(kafkaConfig.isKafkaTransactionalIdExpirationEnable()).get();
return transactionCoordinator;
}
use of io.streamnative.pulsar.handlers.kop.coordinator.transaction.TransactionCoordinator in project starlight-for-kafka by datastax.
the class KafkaRequestHandler method handleFindCoordinatorRequest.
@Override
protected void handleFindCoordinatorRequest(KafkaHeaderAndRequest findCoordinator, CompletableFuture<AbstractResponse> resultFuture) {
checkArgument(findCoordinator.getRequest() instanceof FindCoordinatorRequest);
FindCoordinatorRequest request = (FindCoordinatorRequest) findCoordinator.getRequest();
String pulsarTopicName;
int partition;
if (request.coordinatorType() == FindCoordinatorRequest.CoordinatorType.TRANSACTION) {
TransactionCoordinator transactionCoordinator = getTransactionCoordinator();
partition = transactionCoordinator.partitionFor(request.coordinatorKey());
pulsarTopicName = transactionCoordinator.getTopicPartitionName(partition);
} else if (request.coordinatorType() == FindCoordinatorRequest.CoordinatorType.GROUP) {
partition = getGroupCoordinator().partitionFor(request.coordinatorKey());
pulsarTopicName = getGroupCoordinator().getTopicPartitionName(partition);
} else {
throw new NotImplementedException("FindCoordinatorRequest not support unknown type " + request.coordinatorType());
}
String groupId = request.coordinatorKey();
String groupIdPath = GroupIdUtils.groupIdPathFormat(findCoordinator.getClientHost(), findCoordinator.getHeader().clientId());
// Store group name to metadata store for current client, use to collect consumer metrics.
storeGroupId(groupId, groupIdPath).whenComplete((__, ex) -> {
if (ex != null) {
log.warn("Store groupId failed, the groupId might already stored.", ex);
}
findBroker(TopicName.get(pulsarTopicName)).whenComplete((node, throwable) -> {
if (node.error() != Errors.NONE || throwable != null) {
log.error("[{}] Request {}: Error while find coordinator.", ctx.channel(), findCoordinator.getHeader(), throwable);
resultFuture.complete(KafkaResponseUtils.newFindCoordinator(Errors.LEADER_NOT_AVAILABLE));
return;
}
if (log.isDebugEnabled()) {
log.debug("[{}] Found node {} as coordinator for key {} partition {}.", ctx.channel(), node.leader(), request.coordinatorKey(), partition);
}
resultFuture.complete(KafkaResponseUtils.newFindCoordinator(node.leader()));
});
});
}
use of io.streamnative.pulsar.handlers.kop.coordinator.transaction.TransactionCoordinator in project starlight-for-kafka by datastax.
the class KafkaRequestHandler method handleInitProducerId.
@Override
protected void handleInitProducerId(KafkaHeaderAndRequest kafkaHeaderAndRequest, CompletableFuture<AbstractResponse> response) {
InitProducerIdRequest request = (InitProducerIdRequest) kafkaHeaderAndRequest.getRequest();
TransactionCoordinator transactionCoordinator = getTransactionCoordinator();
transactionCoordinator.handleInitProducerId(request.transactionalId(), request.transactionTimeoutMs(), Optional.empty(), (resp) -> {
response.complete(new InitProducerIdResponse(0, resp.getError(), resp.getProducerId(), resp.getProducerEpoch()));
});
}
use of io.streamnative.pulsar.handlers.kop.coordinator.transaction.TransactionCoordinator in project starlight-for-kafka by datastax.
the class KafkaRequestHandler method handleAddPartitionsToTxn.
@Override
protected void handleAddPartitionsToTxn(KafkaHeaderAndRequest kafkaHeaderAndRequest, CompletableFuture<AbstractResponse> response) {
AddPartitionsToTxnRequest request = (AddPartitionsToTxnRequest) kafkaHeaderAndRequest.getRequest();
List<TopicPartition> partitionsToAdd = request.partitions();
Map<TopicPartition, Errors> unauthorizedTopicErrors = Maps.newConcurrentMap();
Map<TopicPartition, Errors> nonExistingTopicErrors = Maps.newConcurrentMap();
Set<TopicPartition> authorizedPartitions = Sets.newConcurrentHashSet();
TransactionCoordinator transactionCoordinator = getTransactionCoordinator();
AtomicInteger unfinishedAuthorizationCount = new AtomicInteger(partitionsToAdd.size());
Consumer<Runnable> completeOne = (action) -> {
action.run();
if (unfinishedAuthorizationCount.decrementAndGet() == 0) {
if (!unauthorizedTopicErrors.isEmpty() || !nonExistingTopicErrors.isEmpty()) {
Map<TopicPartition, Errors> partitionErrors = Maps.newHashMap();
partitionErrors.putAll(unauthorizedTopicErrors);
partitionErrors.putAll(nonExistingTopicErrors);
for (TopicPartition topicPartition : authorizedPartitions) {
partitionErrors.put(topicPartition, Errors.OPERATION_NOT_ATTEMPTED);
}
response.complete(new AddPartitionsToTxnResponse(0, partitionErrors));
} else {
transactionCoordinator.handleAddPartitionsToTransaction(request.transactionalId(), request.producerId(), request.producerEpoch(), authorizedPartitions, (errors) -> {
// TODO: handle PRODUCER_FENCED errors
Map<TopicPartition, Errors> topicPartitionErrorsMap = addPartitionError(partitionsToAdd, errors);
response.complete(new AddPartitionsToTxnResponse(0, topicPartitionErrorsMap));
});
}
}
};
String namespacePrefix = currentNamespacePrefix();
partitionsToAdd.forEach(tp -> {
String fullPartitionName;
try {
fullPartitionName = KopTopic.toString(tp, namespacePrefix);
} catch (KoPTopicException e) {
log.warn("Invalid topic name: {}", tp.topic(), e);
completeOne.accept(() -> nonExistingTopicErrors.put(tp, Errors.UNKNOWN_TOPIC_OR_PARTITION));
return;
}
authorize(AclOperation.WRITE, Resource.of(ResourceType.TOPIC, fullPartitionName)).whenComplete((isAuthorized, ex) -> {
if (ex != null) {
log.error("AddPartitionsToTxn topic authorize failed, topic - {}. {}", fullPartitionName, ex.getMessage());
completeOne.accept(() -> unauthorizedTopicErrors.put(tp, Errors.TOPIC_AUTHORIZATION_FAILED));
return;
}
if (!isAuthorized) {
completeOne.accept(() -> unauthorizedTopicErrors.put(tp, Errors.TOPIC_AUTHORIZATION_FAILED));
return;
}
completeOne.accept(() -> authorizedPartitions.add(tp));
});
});
}
use of io.streamnative.pulsar.handlers.kop.coordinator.transaction.TransactionCoordinator in project kop by streamnative.
the class KafkaRequestHandler method handleInitProducerId.
@Override
protected void handleInitProducerId(KafkaHeaderAndRequest kafkaHeaderAndRequest, CompletableFuture<AbstractResponse> response) {
InitProducerIdRequest request = (InitProducerIdRequest) kafkaHeaderAndRequest.getRequest();
TransactionCoordinator transactionCoordinator = getTransactionCoordinator();
transactionCoordinator.handleInitProducerId(request.transactionalId(), request.transactionTimeoutMs(), Optional.empty(), (resp) -> {
response.complete(new InitProducerIdResponse(0, resp.getError(), resp.getProducerId(), resp.getProducerEpoch()));
});
}
Aggregations