use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class TransactionManagerTest method testIllegalGenerationInTxnOffsetCommitByGroupMetadata.
@Test
public void testIllegalGenerationInTxnOffsetCommitByGroupMetadata() {
final TopicPartition tp = new TopicPartition("foo", 0);
final int illegalGenerationId = 1;
doInitTransactions();
transactionManager.beginTransaction();
TransactionalRequestResult sendOffsetsResult = transactionManager.sendOffsetsToTransaction(singletonMap(tp, new OffsetAndMetadata(39L)), new ConsumerGroupMetadata(consumerGroupId, illegalGenerationId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Optional.empty()));
prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId);
runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null);
prepareTxnOffsetCommitResponse(consumerGroupId, producerId, epoch, singletonMap(tp, Errors.ILLEGAL_GENERATION));
client.prepareResponse(request -> {
TxnOffsetCommitRequest txnOffsetCommitRequest = (TxnOffsetCommitRequest) request;
assertEquals(consumerGroupId, txnOffsetCommitRequest.data().groupId());
assertEquals(producerId, txnOffsetCommitRequest.data().producerId());
assertEquals(epoch, txnOffsetCommitRequest.data().producerEpoch());
return txnOffsetCommitRequest.data().generationId() != generationId;
}, new TxnOffsetCommitResponse(0, singletonMap(tp, Errors.ILLEGAL_GENERATION)));
runUntil(transactionManager::hasError);
assertTrue(transactionManager.lastError() instanceof CommitFailedException);
assertTrue(sendOffsetsResult.isCompleted());
assertFalse(sendOffsetsResult.isSuccessful());
assertTrue(sendOffsetsResult.error() instanceof CommitFailedException);
assertAbortableError(CommitFailedException.class);
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class TransactionManagerTest method testTransactionalIdAuthorizationFailureInAddOffsetsToTxn.
@Test
public void testTransactionalIdAuthorizationFailureInAddOffsetsToTxn() {
final TopicPartition tp = new TopicPartition("foo", 0);
doInitTransactions();
transactionManager.beginTransaction();
TransactionalRequestResult sendOffsetsResult = transactionManager.sendOffsetsToTransaction(singletonMap(tp, new OffsetAndMetadata(39L)), new ConsumerGroupMetadata(consumerGroupId));
prepareAddOffsetsToTxnResponse(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED, consumerGroupId, producerId, epoch);
runUntil(transactionManager::hasError);
assertTrue(transactionManager.lastError() instanceof TransactionalIdAuthorizationException);
assertTrue(sendOffsetsResult.isCompleted());
assertFalse(sendOffsetsResult.isSuccessful());
assertTrue(sendOffsetsResult.error() instanceof TransactionalIdAuthorizationException);
assertFatalError(TransactionalIdAuthorizationException.class);
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class StreamsProducer method commitTransaction.
/**
* @throws IllegalStateException if EOS is disabled
* @throws TaskMigratedException
*/
protected void commitTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets, final ConsumerGroupMetadata consumerGroupMetadata) {
if (!eosEnabled()) {
throw new IllegalStateException(formatException("Exactly-once is not enabled"));
}
maybeBeginTransaction();
try {
// EOS-v2 assumes brokers are on version 2.5+ and thus can understand the full set of consumer group metadata
// Thus if we are using EOS-v1 and can't make this assumption, we must downgrade the request to include only the group id metadata
final ConsumerGroupMetadata maybeDowngradedGroupMetadata = processingMode == EXACTLY_ONCE_V2 ? consumerGroupMetadata : new ConsumerGroupMetadata(consumerGroupMetadata.groupId());
producer.sendOffsetsToTransaction(offsets, maybeDowngradedGroupMetadata);
producer.commitTransaction();
transactionInFlight = false;
} catch (final ProducerFencedException | InvalidProducerEpochException | CommitFailedException error) {
throw new TaskMigratedException(formatException("Producer got fenced trying to commit a transaction"), error);
} catch (final TimeoutException timeoutException) {
// re-throw to trigger `task.timeout.ms`
throw timeoutException;
} catch (final KafkaException error) {
throw new StreamsException(formatException("Error encountered trying to commit a transaction"), error);
}
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class StreamThreadTest method shouldRecordCommitLatency.
@Test
public void shouldRecordCommitLatency() {
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
expect(consumer.poll(anyObject())).andStubReturn(new ConsumerRecords<>(Collections.emptyMap()));
final Task task = niceMock(Task.class);
expect(task.id()).andStubReturn(task1);
expect(task.inputPartitions()).andStubReturn(Collections.singleton(t1p1));
expect(task.committedOffsets()).andStubReturn(Collections.emptyMap());
expect(task.highWaterMark()).andStubReturn(Collections.emptyMap());
final ActiveTaskCreator activeTaskCreator = mock(ActiveTaskCreator.class);
expect(activeTaskCreator.createTasks(anyObject(), anyObject())).andStubReturn(Collections.singleton(task));
expect(activeTaskCreator.producerClientIds()).andStubReturn(Collections.singleton("producerClientId"));
expect(activeTaskCreator.uncreatedTasksForTopologies(anyObject())).andStubReturn(emptyMap());
activeTaskCreator.removeRevokedUnknownTasks(singleton(task1));
final StandbyTaskCreator standbyTaskCreator = mock(StandbyTaskCreator.class);
expect(standbyTaskCreator.uncreatedTasksForTopologies(anyObject())).andStubReturn(emptyMap());
standbyTaskCreator.removeRevokedUnknownTasks(emptySet());
EasyMock.replay(consumer, consumerGroupMetadata, task, activeTaskCreator, standbyTaskCreator);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final TaskManager taskManager = new TaskManager(null, null, null, null, null, activeTaskCreator, standbyTaskCreator, topologyMetadata, null, null) {
@Override
int commit(final Collection<Task> tasksToCommit) {
mockTime.sleep(10L);
return 1;
}
};
taskManager.setMainConsumer(consumer);
final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata);
thread.updateThreadMetadata("adminClientId");
thread.setState(StreamThread.State.STARTING);
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
activeTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
thread.rebalanceListener().onPartitionsAssigned(Collections.singleton(t1p1));
assertTrue(Double.isNaN((Double) streamsMetrics.metrics().get(new MetricName("commit-latency-max", "stream-thread-metrics", "", Collections.singletonMap("thread-id", CLIENT_ID))).metricValue()));
assertTrue(Double.isNaN((Double) streamsMetrics.metrics().get(new MetricName("commit-latency-avg", "stream-thread-metrics", "", Collections.singletonMap("thread-id", CLIENT_ID))).metricValue()));
thread.runOnce();
assertThat(streamsMetrics.metrics().get(new MetricName("commit-latency-max", "stream-thread-metrics", "", Collections.singletonMap("thread-id", CLIENT_ID))).metricValue(), equalTo(10.0));
assertThat(streamsMetrics.metrics().get(new MetricName("commit-latency-avg", "stream-thread-metrics", "", Collections.singletonMap("thread-id", CLIENT_ID))).metricValue(), equalTo(10.0));
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class StreamThreadTest method shouldShutdownTaskManagerOnClose.
@Test
public void shouldShutdownTaskManagerOnClose() {
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
EasyMock.replay(consumerGroupMetadata);
final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
expect(taskManager.producerClientIds()).andStubReturn(Collections.emptySet());
taskManager.shutdown(true);
EasyMock.expectLastCall();
EasyMock.replay(taskManager, consumer);
final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata).updateThreadMetadata(getSharedAdminClientId(CLIENT_ID));
thread.setStateListener((t, newState, oldState) -> {
if (oldState == StreamThread.State.CREATED && newState == StreamThread.State.STARTING) {
thread.shutdown();
}
});
thread.run();
verify(taskManager);
}
Aggregations