use of org.apache.kafka.clients.admin.ProducerState in project kafka by apache.
the class DescribeProducersHandler method handleResponse.
@Override
public ApiResult<TopicPartition, PartitionProducerState> handleResponse(Node broker, Set<TopicPartition> keys, AbstractResponse abstractResponse) {
DescribeProducersResponse response = (DescribeProducersResponse) abstractResponse;
Map<TopicPartition, PartitionProducerState> completed = new HashMap<>();
Map<TopicPartition, Throwable> failed = new HashMap<>();
List<TopicPartition> unmapped = new ArrayList<>();
for (DescribeProducersResponseData.TopicResponse topicResponse : response.data().topics()) {
for (DescribeProducersResponseData.PartitionResponse partitionResponse : topicResponse.partitions()) {
TopicPartition topicPartition = new TopicPartition(topicResponse.name(), partitionResponse.partitionIndex());
Errors error = Errors.forCode(partitionResponse.errorCode());
if (error != Errors.NONE) {
ApiError apiError = new ApiError(error, partitionResponse.errorMessage());
handlePartitionError(topicPartition, apiError, failed, unmapped);
continue;
}
List<ProducerState> activeProducers = partitionResponse.activeProducers().stream().map(activeProducer -> {
OptionalLong currentTransactionFirstOffset = activeProducer.currentTxnStartOffset() < 0 ? OptionalLong.empty() : OptionalLong.of(activeProducer.currentTxnStartOffset());
OptionalInt coordinatorEpoch = activeProducer.coordinatorEpoch() < 0 ? OptionalInt.empty() : OptionalInt.of(activeProducer.coordinatorEpoch());
return new ProducerState(activeProducer.producerId(), activeProducer.producerEpoch(), activeProducer.lastSequence(), activeProducer.lastTimestamp(), coordinatorEpoch, currentTransactionFirstOffset);
}).collect(Collectors.toList());
completed.put(topicPartition, new PartitionProducerState(activeProducers));
}
}
return new ApiResult<>(completed, failed, unmapped);
}
use of org.apache.kafka.clients.admin.ProducerState in project kafka by apache.
the class TransactionsCommandTest method expectDescribeProducers.
private void expectDescribeProducers(TopicPartition topicPartition, long producerId, short producerEpoch, long lastTimestamp, OptionalInt coordinatorEpoch, OptionalLong txnStartOffset) {
PartitionProducerState partitionProducerState = new PartitionProducerState(singletonList(new ProducerState(producerId, producerEpoch, 500, lastTimestamp, coordinatorEpoch, txnStartOffset)));
DescribeProducersResult result = Mockito.mock(DescribeProducersResult.class);
Mockito.when(result.all()).thenReturn(completedFuture(singletonMap(topicPartition, partitionProducerState)));
Mockito.when(admin.describeProducers(Collections.singletonList(topicPartition), new DescribeProducersOptions())).thenReturn(result);
}
use of org.apache.kafka.clients.admin.ProducerState in project kafka by apache.
the class TransactionsCommandTest method testNewBrokerAbortTransaction.
@Test
public void testNewBrokerAbortTransaction() throws Exception {
TopicPartition topicPartition = new TopicPartition("foo", 5);
long startOffset = 9173;
long producerId = 12345L;
short producerEpoch = 15;
int coordinatorEpoch = 76;
String[] args = new String[] { "--bootstrap-server", "localhost:9092", "abort", "--topic", topicPartition.topic(), "--partition", String.valueOf(topicPartition.partition()), "--start-offset", String.valueOf(startOffset) };
DescribeProducersResult describeResult = Mockito.mock(DescribeProducersResult.class);
KafkaFuture<PartitionProducerState> describeFuture = completedFuture(new PartitionProducerState(singletonList(new ProducerState(producerId, producerEpoch, 1300, 1599509565L, OptionalInt.of(coordinatorEpoch), OptionalLong.of(startOffset)))));
AbortTransactionResult abortTransactionResult = Mockito.mock(AbortTransactionResult.class);
KafkaFuture<Void> abortFuture = completedFuture(null);
AbortTransactionSpec expectedAbortSpec = new AbortTransactionSpec(topicPartition, producerId, producerEpoch, coordinatorEpoch);
Mockito.when(describeResult.partitionResult(topicPartition)).thenReturn(describeFuture);
Mockito.when(admin.describeProducers(singleton(topicPartition))).thenReturn(describeResult);
Mockito.when(abortTransactionResult.all()).thenReturn(abortFuture);
Mockito.when(admin.abortTransaction(expectedAbortSpec)).thenReturn(abortTransactionResult);
execute(args);
assertNormalExit();
}
use of org.apache.kafka.clients.admin.ProducerState in project kafka by apache.
the class TransactionsCommandTest method testDescribeProducers.
private void testDescribeProducers(TopicPartition topicPartition, String[] args, DescribeProducersOptions expectedOptions) throws Exception {
DescribeProducersResult describeResult = Mockito.mock(DescribeProducersResult.class);
KafkaFuture<PartitionProducerState> describeFuture = completedFuture(new PartitionProducerState(asList(new ProducerState(12345L, 15, 1300, 1599509565L, OptionalInt.of(20), OptionalLong.of(990)), new ProducerState(98765L, 30, 2300, 1599509599L, OptionalInt.empty(), OptionalLong.empty()))));
Mockito.when(describeResult.partitionResult(topicPartition)).thenReturn(describeFuture);
Mockito.when(admin.describeProducers(singleton(topicPartition), expectedOptions)).thenReturn(describeResult);
execute(args);
assertNormalExit();
List<List<String>> table = readOutputAsTable();
assertEquals(3, table.size());
List<String> expectedHeaders = asList(TransactionsCommand.DescribeProducersCommand.HEADERS);
assertEquals(expectedHeaders, table.get(0));
Set<List<String>> expectedRows = Utils.mkSet(asList("12345", "15", "20", "1300", "1599509565", "990"), asList("98765", "30", "-1", "2300", "1599509599", "None"));
assertEquals(expectedRows, new HashSet<>(table.subList(1, table.size())));
}
Aggregations