Search in sources :

Example 6 with Producer

use of org.apache.kafka.clients.producer.Producer in project spring-boot by spring-projects.

the class KafkaAutoConfigurationIntegrationTests method testEndToEnd.

@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
void testEndToEnd() throws Exception {
    load(KafkaConfig.class, "spring.kafka.bootstrap-servers:" + getEmbeddedKafkaBrokersAsString(), "spring.kafka.consumer.group-id=testGroup", "spring.kafka.consumer.auto-offset-reset=earliest");
    KafkaTemplate<String, String> template = this.context.getBean(KafkaTemplate.class);
    template.send(TEST_TOPIC, "foo", "bar");
    Listener listener = this.context.getBean(Listener.class);
    assertThat(listener.latch.await(30, TimeUnit.SECONDS)).isTrue();
    assertThat(listener.key).isEqualTo("foo");
    assertThat(listener.received).isEqualTo("bar");
    DefaultKafkaProducerFactory producerFactory = this.context.getBean(DefaultKafkaProducerFactory.class);
    Producer producer = producerFactory.createProducer();
    assertThat(producer.partitionsFor(ADMIN_CREATED_TOPIC).size()).isEqualTo(10);
    producer.close();
}
Also used : KafkaListener(org.springframework.kafka.annotation.KafkaListener) Producer(org.apache.kafka.clients.producer.Producer) DefaultKafkaProducerFactory(org.springframework.kafka.core.DefaultKafkaProducerFactory) Test(org.junit.jupiter.api.Test)

Example 7 with Producer

use of org.apache.kafka.clients.producer.Producer in project incubator-atlas by apache.

the class KafkaNotificationMockTest method shouldSendMessagesSuccessfully.

@Test
@SuppressWarnings("unchecked")
public void shouldSendMessagesSuccessfully() throws NotificationException, ExecutionException, InterruptedException {
    Properties configProperties = mock(Properties.class);
    KafkaNotification kafkaNotification = new KafkaNotification(configProperties);
    Producer producer = mock(Producer.class);
    String topicName = kafkaNotification.getTopicName(NotificationInterface.NotificationType.HOOK);
    String message = "This is a test message";
    Future returnValue = mock(Future.class);
    when(returnValue.get()).thenReturn(new RecordMetadata(new TopicPartition(topicName, 0), 0, 0));
    ProducerRecord expectedRecord = new ProducerRecord(topicName, message);
    when(producer.send(expectedRecord)).thenReturn(returnValue);
    kafkaNotification.sendInternalToProducer(producer, NotificationInterface.NotificationType.HOOK, new String[] { message });
    verify(producer).send(expectedRecord);
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Producer(org.apache.kafka.clients.producer.Producer) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future) Properties(java.util.Properties) Test(org.testng.annotations.Test)

Example 8 with Producer

use of org.apache.kafka.clients.producer.Producer in project atlas by apache.

the class KafkaNotificationMockTest method shouldCollectAllFailedMessagesIfProducerFails.

@Test
@SuppressWarnings("unchecked")
public void shouldCollectAllFailedMessagesIfProducerFails() throws NotificationException, ExecutionException, InterruptedException {
    Properties configProperties = mock(Properties.class);
    KafkaNotification kafkaNotification = new KafkaNotification(configProperties);
    Producer producer = mock(Producer.class);
    String topicName = kafkaNotification.getTopicName(NotificationInterface.NotificationType.HOOK);
    String message1 = "This is a test message1";
    String message2 = "This is a test message2";
    Future returnValue1 = mock(Future.class);
    when(returnValue1.get()).thenThrow(new RuntimeException("Simulating exception"));
    Future returnValue2 = mock(Future.class);
    when(returnValue2.get()).thenThrow(new RuntimeException("Simulating exception"));
    ProducerRecord expectedRecord1 = new ProducerRecord(topicName, message1);
    when(producer.send(expectedRecord1)).thenReturn(returnValue1);
    ProducerRecord expectedRecord2 = new ProducerRecord(topicName, message2);
    when(producer.send(expectedRecord2)).thenReturn(returnValue1);
    try {
        kafkaNotification.sendInternalToProducer(producer, NotificationInterface.NotificationType.HOOK, Arrays.asList(new String[] { message1, message2 }));
        fail("Should have thrown NotificationException");
    } catch (NotificationException e) {
        assertEquals(e.getFailedMessages().size(), 2);
        assertEquals(e.getFailedMessages().get(0), "This is a test message1");
        assertEquals(e.getFailedMessages().get(1), "This is a test message2");
    }
}
Also used : Producer(org.apache.kafka.clients.producer.Producer) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) NotificationException(org.apache.atlas.notification.NotificationException) Future(java.util.concurrent.Future) Properties(java.util.Properties) Test(org.testng.annotations.Test)

Example 9 with Producer

use of org.apache.kafka.clients.producer.Producer in project nakadi by zalando.

the class KafkaTopicRepository method syncPostBatch.

@Override
public void syncPostBatch(final String topicId, final List<BatchItem> batch) throws EventPublishingException {
    final Producer<String, String> producer = kafkaFactory.takeProducer();
    try {
        final Map<String, String> partitionToBroker = producer.partitionsFor(topicId).stream().collect(Collectors.toMap(p -> String.valueOf(p.partition()), p -> String.valueOf(p.leader().id())));
        batch.forEach(item -> {
            Preconditions.checkNotNull(item.getPartition(), "BatchItem partition can't be null at the moment of publishing!");
            item.setBrokerId(partitionToBroker.get(item.getPartition()));
        });
        int shortCircuited = 0;
        final Map<BatchItem, CompletableFuture<Exception>> sendFutures = new HashMap<>();
        for (final BatchItem item : batch) {
            item.setStep(EventPublishingStep.PUBLISHING);
            final HystrixKafkaCircuitBreaker circuitBreaker = circuitBreakers.computeIfAbsent(item.getBrokerId(), brokerId -> new HystrixKafkaCircuitBreaker(brokerId));
            if (circuitBreaker.allowRequest()) {
                sendFutures.put(item, publishItem(producer, topicId, item, circuitBreaker));
            } else {
                shortCircuited++;
                item.updateStatusAndDetail(EventPublishingStatus.FAILED, "short circuited");
            }
        }
        if (shortCircuited > 0) {
            LOG.warn("Short circuiting request to Kafka {} time(s) due to timeout for topic {}", shortCircuited, topicId);
        }
        final CompletableFuture<Void> multiFuture = CompletableFuture.allOf(sendFutures.values().toArray(new CompletableFuture<?>[sendFutures.size()]));
        multiFuture.get(createSendTimeout(), TimeUnit.MILLISECONDS);
        // Now lets check for errors
        final Optional<Exception> needReset = sendFutures.entrySet().stream().filter(entry -> isExceptionShouldLeadToReset(entry.getValue().getNow(null))).map(entry -> entry.getValue().getNow(null)).findAny();
        if (needReset.isPresent()) {
            LOG.info("Terminating producer while publishing to topic {} because of unrecoverable exception", topicId, needReset.get());
            kafkaFactory.terminateProducer(producer);
        }
    } catch (final TimeoutException ex) {
        failUnpublished(batch, "timed out");
        throw new EventPublishingException("Error publishing message to kafka", ex);
    } catch (final ExecutionException ex) {
        failUnpublished(batch, "internal error");
        throw new EventPublishingException("Error publishing message to kafka", ex);
    } catch (final InterruptedException ex) {
        Thread.currentThread().interrupt();
        failUnpublished(batch, "interrupted");
        throw new EventPublishingException("Error publishing message to kafka", ex);
    } finally {
        kafkaFactory.releaseProducer(producer);
    }
    final boolean atLeastOneFailed = batch.stream().anyMatch(item -> item.getResponse().getPublishingStatus() == EventPublishingStatus.FAILED);
    if (atLeastOneFailed) {
        failUnpublished(batch, "internal error");
        throw new EventPublishingException("Error publishing message to kafka");
    }
}
Also used : EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) Collections.unmodifiableList(java.util.Collections.unmodifiableList) LoggerFactory(org.slf4j.LoggerFactory) TimeoutException(java.util.concurrent.TimeoutException) TopicRepositoryException(org.zalando.nakadi.exceptions.runtime.TopicRepositoryException) PARTITION_NOT_FOUND(org.zalando.nakadi.domain.CursorError.PARTITION_NOT_FOUND) ServiceUnavailableException(org.zalando.nakadi.exceptions.ServiceUnavailableException) Map(java.util.Map) RetryForSpecifiedTimeStrategy(org.echocat.jomon.runtime.concurrent.RetryForSpecifiedTimeStrategy) Consumer(org.apache.kafka.clients.consumer.Consumer) ZooKeeperHolder(org.zalando.nakadi.repository.zookeeper.ZooKeeperHolder) TopicPartition(org.apache.kafka.common.TopicPartition) TopicRepository(org.zalando.nakadi.repository.TopicRepository) Retryer(org.echocat.jomon.runtime.concurrent.Retryer) Collection(java.util.Collection) PartitionStatistics(org.zalando.nakadi.domain.PartitionStatistics) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Set(java.util.Set) ConfigType(kafka.server.ConfigType) PartitionInfo(org.apache.kafka.common.PartitionInfo) InvalidCursorException(org.zalando.nakadi.exceptions.InvalidCursorException) Collectors(java.util.stream.Collectors) TopicDeletionException(org.zalando.nakadi.exceptions.TopicDeletionException) Objects(java.util.Objects) ZkUtils(kafka.utils.ZkUtils) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) List(java.util.List) Stream(java.util.stream.Stream) Lists.newArrayList(com.google.common.collect.Lists.newArrayList) Timeline(org.zalando.nakadi.domain.Timeline) ZookeeperSettings(org.zalando.nakadi.repository.zookeeper.ZookeeperSettings) NULL_OFFSET(org.zalando.nakadi.domain.CursorError.NULL_OFFSET) BatchItem(org.zalando.nakadi.domain.BatchItem) Optional(java.util.Optional) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) AdminUtils(kafka.admin.AdminUtils) IntStream(java.util.stream.IntStream) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) NetworkException(org.apache.kafka.common.errors.NetworkException) NakadiCursor(org.zalando.nakadi.domain.NakadiCursor) NakadiSettings(org.zalando.nakadi.config.NakadiSettings) TopicCreationException(org.zalando.nakadi.exceptions.TopicCreationException) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) TopicConfigException(org.zalando.nakadi.exceptions.runtime.TopicConfigException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) UUIDGenerator(org.zalando.nakadi.util.UUIDGenerator) InterruptException(org.apache.kafka.common.errors.InterruptException) EventPublishingStep(org.zalando.nakadi.domain.EventPublishingStep) Nullable(javax.annotation.Nullable) UNAVAILABLE(org.zalando.nakadi.domain.CursorError.UNAVAILABLE) NULL_PARTITION(org.zalando.nakadi.domain.CursorError.NULL_PARTITION) Logger(org.slf4j.Logger) Properties(java.util.Properties) Producer(org.apache.kafka.clients.producer.Producer) PartitionEndStatistics(org.zalando.nakadi.domain.PartitionEndStatistics) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) EventConsumer(org.zalando.nakadi.repository.EventConsumer) Collectors.toList(java.util.stream.Collectors.toList) EventPublishingStatus(org.zalando.nakadi.domain.EventPublishingStatus) Preconditions(com.google.common.base.Preconditions) Collections(java.util.Collections) RackAwareMode(kafka.admin.RackAwareMode) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) TimeoutException(java.util.concurrent.TimeoutException) TopicRepositoryException(org.zalando.nakadi.exceptions.runtime.TopicRepositoryException) ServiceUnavailableException(org.zalando.nakadi.exceptions.ServiceUnavailableException) InvalidCursorException(org.zalando.nakadi.exceptions.InvalidCursorException) TopicDeletionException(org.zalando.nakadi.exceptions.TopicDeletionException) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) NetworkException(org.apache.kafka.common.errors.NetworkException) TopicCreationException(org.zalando.nakadi.exceptions.TopicCreationException) TopicConfigException(org.zalando.nakadi.exceptions.runtime.TopicConfigException) UnknownServerException(org.apache.kafka.common.errors.UnknownServerException) InterruptException(org.apache.kafka.common.errors.InterruptException) ExecutionException(java.util.concurrent.ExecutionException) CompletableFuture(java.util.concurrent.CompletableFuture) BatchItem(org.zalando.nakadi.domain.BatchItem) ExecutionException(java.util.concurrent.ExecutionException) EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) TimeoutException(java.util.concurrent.TimeoutException)

Example 10 with Producer

use of org.apache.kafka.clients.producer.Producer in project kafka by apache.

the class EosV2UpgradeIntegrationTest method shouldUpgradeFromEosAlphaToEosV2.

@SuppressWarnings("deprecation")
@Test
public void shouldUpgradeFromEosAlphaToEosV2() throws Exception {
    // We use two KafkaStreams clients that we upgrade from eos-alpha to eos-V2. During the upgrade,
    // we ensure that there are pending transaction and verify that data is processed correctly.
    // 
    // We either close clients cleanly (`injectError = false`) or let them crash (`injectError = true`) during
    // the upgrade. For both cases, EOS should not be violated.
    // 
    // Additionally, we inject errors while one client is on eos-alpha while the other client is on eos-V2:
    // For this case, we inject the error during task commit phase, i.e., after offsets are appended to a TX,
    // and before the TX is committed. The goal is to verify that the written but uncommitted offsets are not
    // picked up, i.e., GroupCoordinator fencing works correctly.
    // 
    // The commit interval is set to MAX_VALUE and the used `Processor` request commits manually so we have full
    // control when a commit actually happens. We use an input topic with 4 partitions and each task will request
    // a commit after processing 10 records.
    // 
    // 1.  start both clients and wait until rebalance stabilizes
    // 2.  write 10 records per input topic partition and verify that the result was committed
    // 3.  write 5 records per input topic partition to get pending transactions (verified via "read_uncommitted" mode)
    // - all 4 pending transactions are based on task producers
    // - we will get only 4 pending writes for one partition for the crash case as we crash processing the 5th record
    // 4.  stop/crash the first client, wait until rebalance stabilizes:
    // - stop case:
    // * verify that the stopped client did commit its pending transaction during shutdown
    // * the second client will still have two pending transaction
    // - crash case:
    // * the pending transactions of the crashed client got aborted
    // * the second client will have four pending transactions
    // 5.  restart the first client with eos-V2 enabled and wait until rebalance stabilizes
    // - the rebalance should result in a commit of all tasks
    // 6.  write 5 record per input topic partition
    // - stop case:
    // * verify that the result was committed
    // - crash case:
    // * fail the second (i.e., eos-alpha) client during commit
    // * the eos-V2 client should not pickup the pending offsets
    // * verify uncommitted and committed result
    // 7.  only for crash case:
    // 7a. restart the second client in eos-alpha mode and wait until rebalance stabilizes
    // 7b. write 10 records per input topic partition
    // * fail the first (i.e., eos-V2) client during commit
    // * the eos-alpha client should not pickup the pending offsets
    // * verify uncommitted and committed result
    // 7c. restart the first client in eos-V2 mode and wait until rebalance stabilizes
    // 8.  write 5 records per input topic partition to get pending transactions (verified via "read_uncommitted" mode)
    // - 2 transaction are base on a task producer; one transaction is based on a thread producer
    // - we will get 4 pending writes for the crash case as we crash processing the 5th record
    // 9.  stop/crash the second client and wait until rebalance stabilizes:
    // - stop only:
    // * verify that the stopped client did commit its pending transaction during shutdown
    // * the first client will still have one pending transaction
    // - crash case:
    // * the pending transactions of the crashed client got aborted
    // * the first client will have one pending transactions
    // 10. restart the second client with eos-V2 enabled and wait until rebalance stabilizes
    // - the rebalance should result in a commit of all tasks
    // 11. write 5 record per input topic partition and verify that the result was committed
    final List<KeyValue<KafkaStreams.State, KafkaStreams.State>> stateTransitions1 = new LinkedList<>();
    KafkaStreams streams1Alpha = null;
    KafkaStreams streams1V2 = null;
    KafkaStreams streams1V2Two = null;
    final List<KeyValue<KafkaStreams.State, KafkaStreams.State>> stateTransitions2 = new LinkedList<>();
    KafkaStreams streams2Alpha = null;
    KafkaStreams streams2AlphaTwo = null;
    KafkaStreams streams2V2 = null;
    try {
        // phase 1: start both clients
        streams1Alpha = getKafkaStreams(APP_DIR_1, StreamsConfig.EXACTLY_ONCE);
        streams1Alpha.setStateListener((newState, oldState) -> stateTransitions1.add(KeyValue.pair(oldState, newState)));
        assignmentListener.prepareForRebalance();
        streams1Alpha.cleanUp();
        streams1Alpha.start();
        assignmentListener.waitForNextStableAssignment(MAX_WAIT_TIME_MS);
        waitForRunning(stateTransitions1);
        streams2Alpha = getKafkaStreams(APP_DIR_2, StreamsConfig.EXACTLY_ONCE);
        streams2Alpha.setStateListener((newState, oldState) -> stateTransitions2.add(KeyValue.pair(oldState, newState)));
        stateTransitions1.clear();
        assignmentListener.prepareForRebalance();
        streams2Alpha.cleanUp();
        streams2Alpha.start();
        assignmentListener.waitForNextStableAssignment(MAX_WAIT_TIME_MS);
        waitForRunning(stateTransitions1);
        waitForRunning(stateTransitions2);
        // in all phases, we write comments that assume that p-0/p-1 are assigned to the first client
        // and p-2/p-3 are assigned to the second client (in reality the assignment might be different though)
        // phase 2: (write first batch of data)
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // p-0: ---> 10 rec + C
        // p-1: ---> 10 rec + C
        // p-2: ---> 10 rec + C
        // p-3: ---> 10 rec + C
        final List<KeyValue<Long, Long>> committedInputDataBeforeUpgrade = prepareData(0L, 10L, 0L, 1L, 2L, 3L);
        writeInputData(committedInputDataBeforeUpgrade);
        waitForCondition(() -> commitRequested.get() == 4, MAX_WAIT_TIME_MS, "SteamsTasks did not request commit.");
        final Map<Long, Long> committedState = new HashMap<>();
        final List<KeyValue<Long, Long>> expectedUncommittedResult = computeExpectedResult(committedInputDataBeforeUpgrade, committedState);
        verifyCommitted(expectedUncommittedResult);
        // phase 3: (write partial second batch of data)
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // stop case:
        // p-0: 10 rec + C ---> 5 rec (pending)
        // p-1: 10 rec + C ---> 5 rec (pending)
        // p-2: 10 rec + C ---> 5 rec (pending)
        // p-3: 10 rec + C ---> 5 rec (pending)
        // crash case: (we just assumes that we inject the error for p-0; in reality it might be a different partition)
        // (we don't crash right away and write one record less)
        // p-0: 10 rec + C ---> 4 rec (pending)
        // p-1: 10 rec + C ---> 5 rec (pending)
        // p-2: 10 rec + C ---> 5 rec (pending)
        // p-3: 10 rec + C ---> 5 rec (pending)
        final Set<Long> cleanKeys = mkSet(0L, 1L, 2L, 3L);
        final Set<Long> keysFirstClientAlpha = keysFromInstance(streams1Alpha);
        final long firstFailingKeyForCrashCase = keysFirstClientAlpha.iterator().next();
        cleanKeys.remove(firstFailingKeyForCrashCase);
        final List<KeyValue<Long, Long>> uncommittedInputDataBeforeFirstUpgrade = new LinkedList<>();
        final HashMap<Long, Long> uncommittedState = new HashMap<>(committedState);
        if (!injectError) {
            uncommittedInputDataBeforeFirstUpgrade.addAll(prepareData(10L, 15L, 0L, 1L, 2L, 3L));
            writeInputData(uncommittedInputDataBeforeFirstUpgrade);
            expectedUncommittedResult.addAll(computeExpectedResult(uncommittedInputDataBeforeFirstUpgrade, uncommittedState));
            verifyUncommitted(expectedUncommittedResult);
        } else {
            final List<KeyValue<Long, Long>> uncommittedInputDataWithoutFailingKey = new LinkedList<>();
            for (final long key : cleanKeys) {
                uncommittedInputDataWithoutFailingKey.addAll(prepareData(10L, 15L, key));
            }
            uncommittedInputDataWithoutFailingKey.addAll(prepareData(10L, 14L, firstFailingKeyForCrashCase));
            uncommittedInputDataBeforeFirstUpgrade.addAll(uncommittedInputDataWithoutFailingKey);
            writeInputData(uncommittedInputDataWithoutFailingKey);
            expectedUncommittedResult.addAll(computeExpectedResult(uncommittedInputDataWithoutFailingKey, new HashMap<>(committedState)));
            verifyUncommitted(expectedUncommittedResult);
        }
        // phase 4: (stop first client)
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // stop case: (client 1 will commit its two tasks on close())
        // p-0: 10 rec + C   +   5 rec ---> C
        // p-1: 10 rec + C   +   5 rec ---> C
        // p-2: 10 rec + C   +   5 rec (pending)
        // p-3: 10 rec + C   +   5 rec (pending)
        // crash case: (we write the last record that will trigger the crash; both TX from client 1 will be aborted
        // during fail over by client 2 and retried)
        // p-0: 10 rec + C   +   4 rec + A + 5 rec (pending)
        // p-1: 10 rec + C   +   5 rec + A + 5 rec (pending)
        // p-2: 10 rec + C   +   5 rec (pending)
        // p-3: 10 rec + C   +   5 rec (pending)
        stateTransitions2.clear();
        assignmentListener.prepareForRebalance();
        if (!injectError) {
            stateTransitions1.clear();
            streams1Alpha.close();
            waitForStateTransition(stateTransitions1, CLOSE);
        } else {
            errorInjectedClient1.set(true);
            final List<KeyValue<Long, Long>> dataPotentiallyFirstFailingKey = prepareData(14L, 15L, firstFailingKeyForCrashCase);
            uncommittedInputDataBeforeFirstUpgrade.addAll(dataPotentiallyFirstFailingKey);
            writeInputData(dataPotentiallyFirstFailingKey);
        }
        assignmentListener.waitForNextStableAssignment(MAX_WAIT_TIME_MS);
        waitForRunning(stateTransitions2);
        if (!injectError) {
            final List<KeyValue<Long, Long>> committedInputDataDuringFirstUpgrade = uncommittedInputDataBeforeFirstUpgrade.stream().filter(pair -> keysFirstClientAlpha.contains(pair.key)).collect(Collectors.toList());
            final List<KeyValue<Long, Long>> expectedCommittedResult = computeExpectedResult(committedInputDataDuringFirstUpgrade, committedState);
            verifyCommitted(expectedCommittedResult);
        } else {
            // retrying TX
            expectedUncommittedResult.addAll(computeExpectedResult(uncommittedInputDataBeforeFirstUpgrade.stream().filter(pair -> keysFirstClientAlpha.contains(pair.key)).collect(Collectors.toList()), new HashMap<>(committedState)));
            verifyUncommitted(expectedUncommittedResult);
            waitForStateTransitionContains(stateTransitions1, CRASH);
            errorInjectedClient1.set(false);
            stateTransitions1.clear();
            streams1Alpha.close();
            assertFalse(UNEXPECTED_EXCEPTION_MSG, hasUnexpectedError);
        }
        // phase 5: (restart first client)
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // stop case: (client 2 (alpha) will commit the two revoked task that migrate back to client 1)
        // (note: we may or may not get newly committed data, depending if the already committed tasks
        // migrate back to client 1, or different tasks)
        // (below we show the case for which we don't get newly committed data)
        // p-0: 10 rec + C   +   5 rec ---> C
        // p-1: 10 rec + C   +   5 rec ---> C
        // p-2: 10 rec + C   +   5 rec (pending)
        // p-3: 10 rec + C   +   5 rec (pending)
        // crash case: (client 2 (alpha) will commit all tasks even only two tasks are revoked and migrate back to client 1)
        // (note: because nothing was committed originally, we always get newly committed data)
        // p-0: 10 rec + C   +   4 rec + A + 5 rec ---> C
        // p-1: 10 rec + C   +   5 rec + A + 5 rec ---> C
        // p-2: 10 rec + C   +   5 rec ---> C
        // p-3: 10 rec + C   +   5 rec ---> C
        commitRequested.set(0);
        stateTransitions1.clear();
        stateTransitions2.clear();
        streams1V2 = getKafkaStreams(APP_DIR_1, StreamsConfig.EXACTLY_ONCE_V2);
        streams1V2.setStateListener((newState, oldState) -> stateTransitions1.add(KeyValue.pair(oldState, newState)));
        assignmentListener.prepareForRebalance();
        streams1V2.start();
        assignmentListener.waitForNextStableAssignment(MAX_WAIT_TIME_MS);
        waitForRunning(stateTransitions1);
        waitForRunning(stateTransitions2);
        final Set<Long> newlyCommittedKeys;
        if (!injectError) {
            newlyCommittedKeys = keysFromInstance(streams1V2);
            newlyCommittedKeys.removeAll(keysFirstClientAlpha);
        } else {
            newlyCommittedKeys = mkSet(0L, 1L, 2L, 3L);
        }
        final List<KeyValue<Long, Long>> expectedCommittedResultAfterRestartFirstClient = computeExpectedResult(uncommittedInputDataBeforeFirstUpgrade.stream().filter(pair -> newlyCommittedKeys.contains(pair.key)).collect(Collectors.toList()), committedState);
        verifyCommitted(expectedCommittedResultAfterRestartFirstClient);
        // phase 6: (complete second batch of data; crash: let second client fail on commit)
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // stop case: (both client commit regularly)
        // (depending on the task movement in phase 5, we may or may not get newly committed data;
        // we show the case for which p-2 and p-3 are newly committed below)
        // p-0: 10 rec + C   +   5 rec + C ---> 5 rec + C
        // p-1: 10 rec + C   +   5 rec + C ---> 5 rec + C
        // p-2: 10 rec + C   +   5 rec     ---> 5 rec + C
        // p-3: 10 rec + C   +   5 rec     ---> 5 rec + C
        // crash case: (second/alpha client fails and both TX are aborted)
        // (first/V2 client reprocessed the 10 records and commits TX)
        // p-0: 10 rec + C   +   4 rec + A + 5 rec + C ---> 5 rec + C
        // p-1: 10 rec + C   +   5 rec + A + 5 rec + C ---> 5 rec + C
        // p-2: 10 rec + C   +   5 rec + C             ---> 5 rec + A + 5 rec + C
        // p-3: 10 rec + C   +   5 rec + C             ---> 5 rec + A + 5 rec + C
        commitCounterClient1.set(0);
        if (!injectError) {
            final List<KeyValue<Long, Long>> finishSecondBatch = prepareData(15L, 20L, 0L, 1L, 2L, 3L);
            writeInputData(finishSecondBatch);
            final List<KeyValue<Long, Long>> committedInputDataDuringUpgrade = uncommittedInputDataBeforeFirstUpgrade.stream().filter(pair -> !keysFirstClientAlpha.contains(pair.key)).filter(pair -> !newlyCommittedKeys.contains(pair.key)).collect(Collectors.toList());
            committedInputDataDuringUpgrade.addAll(finishSecondBatch);
            expectedUncommittedResult.addAll(computeExpectedResult(finishSecondBatch, uncommittedState));
            final List<KeyValue<Long, Long>> expectedCommittedResult = computeExpectedResult(committedInputDataDuringUpgrade, committedState);
            verifyCommitted(expectedCommittedResult);
        } else {
            final Set<Long> keysFirstClientV2 = keysFromInstance(streams1V2);
            final Set<Long> keysSecondClientAlpha = keysFromInstance(streams2Alpha);
            final List<KeyValue<Long, Long>> committedInputDataAfterFirstUpgrade = prepareData(15L, 20L, keysFirstClientV2.toArray(new Long[0]));
            writeInputData(committedInputDataAfterFirstUpgrade);
            final List<KeyValue<Long, Long>> expectedCommittedResultBeforeFailure = computeExpectedResult(committedInputDataAfterFirstUpgrade, committedState);
            verifyCommitted(expectedCommittedResultBeforeFailure);
            expectedUncommittedResult.addAll(expectedCommittedResultBeforeFailure);
            commitCounterClient2.set(0);
            final Iterator<Long> it = keysSecondClientAlpha.iterator();
            final Long otherKey = it.next();
            final Long failingKey = it.next();
            final List<KeyValue<Long, Long>> uncommittedInputDataAfterFirstUpgrade = prepareData(15L, 19L, keysSecondClientAlpha.toArray(new Long[0]));
            uncommittedInputDataAfterFirstUpgrade.addAll(prepareData(19L, 20L, otherKey));
            writeInputData(uncommittedInputDataAfterFirstUpgrade);
            uncommittedState.putAll(committedState);
            expectedUncommittedResult.addAll(computeExpectedResult(uncommittedInputDataAfterFirstUpgrade, uncommittedState));
            verifyUncommitted(expectedUncommittedResult);
            stateTransitions1.clear();
            stateTransitions2.clear();
            assignmentListener.prepareForRebalance();
            commitCounterClient1.set(0);
            commitErrorInjectedClient2.set(true);
            final List<KeyValue<Long, Long>> dataFailingKey = prepareData(19L, 20L, failingKey);
            uncommittedInputDataAfterFirstUpgrade.addAll(dataFailingKey);
            writeInputData(dataFailingKey);
            expectedUncommittedResult.addAll(computeExpectedResult(dataFailingKey, uncommittedState));
            verifyUncommitted(expectedUncommittedResult);
            assignmentListener.waitForNextStableAssignment(MAX_WAIT_TIME_MS);
            waitForStateTransitionContains(stateTransitions2, CRASH);
            commitErrorInjectedClient2.set(false);
            stateTransitions2.clear();
            streams2Alpha.close();
            assertFalse(UNEXPECTED_EXCEPTION_MSG, hasUnexpectedError);
            final List<KeyValue<Long, Long>> expectedCommittedResultAfterFailure = computeExpectedResult(uncommittedInputDataAfterFirstUpgrade, committedState);
            verifyCommitted(expectedCommittedResultAfterFailure);
            expectedUncommittedResult.addAll(expectedCommittedResultAfterFailure);
        }
        // p-3: 10 rec + C   +   5 rec + C + 5 rec + A + 5 rec + C ---> 10 rec + C
        if (!injectError) {
            streams2AlphaTwo = streams2Alpha;
        } else {
            // 7a restart the second client in eos-alpha mode and wait until rebalance stabilizes
            commitCounterClient1.set(0);
            commitCounterClient2.set(-1);
            stateTransitions1.clear();
            stateTransitions2.clear();
            streams2AlphaTwo = getKafkaStreams(APP_DIR_2, StreamsConfig.EXACTLY_ONCE);
            streams2AlphaTwo.setStateListener((newState, oldState) -> stateTransitions2.add(KeyValue.pair(oldState, newState)));
            assignmentListener.prepareForRebalance();
            streams2AlphaTwo.start();
            assignmentListener.waitForNextStableAssignment(MAX_WAIT_TIME_MS);
            waitForRunning(stateTransitions1);
            waitForRunning(stateTransitions2);
            // 7b. write third batch of input data
            final Set<Long> keysFirstClientV2 = keysFromInstance(streams1V2);
            final Set<Long> keysSecondClientAlphaTwo = keysFromInstance(streams2AlphaTwo);
            final List<KeyValue<Long, Long>> committedInputDataBetweenUpgrades = prepareData(20L, 30L, keysSecondClientAlphaTwo.toArray(new Long[0]));
            writeInputData(committedInputDataBetweenUpgrades);
            final List<KeyValue<Long, Long>> expectedCommittedResultBeforeFailure = computeExpectedResult(committedInputDataBetweenUpgrades, committedState);
            verifyCommitted(expectedCommittedResultBeforeFailure);
            expectedUncommittedResult.addAll(expectedCommittedResultBeforeFailure);
            commitCounterClient2.set(0);
            final Iterator<Long> it = keysFirstClientV2.iterator();
            final Long otherKey = it.next();
            final Long failingKey = it.next();
            final List<KeyValue<Long, Long>> uncommittedInputDataBetweenUpgrade = prepareData(20L, 29L, keysFirstClientV2.toArray(new Long[0]));
            uncommittedInputDataBetweenUpgrade.addAll(prepareData(29L, 30L, otherKey));
            writeInputData(uncommittedInputDataBetweenUpgrade);
            uncommittedState.putAll(committedState);
            expectedUncommittedResult.addAll(computeExpectedResult(uncommittedInputDataBetweenUpgrade, uncommittedState));
            verifyUncommitted(expectedUncommittedResult);
            stateTransitions1.clear();
            stateTransitions2.clear();
            assignmentListener.prepareForRebalance();
            commitCounterClient2.set(0);
            commitErrorInjectedClient1.set(true);
            final List<KeyValue<Long, Long>> dataFailingKey = prepareData(29L, 30L, failingKey);
            uncommittedInputDataBetweenUpgrade.addAll(dataFailingKey);
            writeInputData(dataFailingKey);
            expectedUncommittedResult.addAll(computeExpectedResult(dataFailingKey, uncommittedState));
            verifyUncommitted(expectedUncommittedResult);
            assignmentListener.waitForNextStableAssignment(MAX_WAIT_TIME_MS);
            waitForStateTransitionContains(stateTransitions1, CRASH);
            commitErrorInjectedClient1.set(false);
            stateTransitions1.clear();
            streams1V2.close();
            assertFalse(UNEXPECTED_EXCEPTION_MSG, hasUnexpectedError);
            final List<KeyValue<Long, Long>> expectedCommittedResultAfterFailure = computeExpectedResult(uncommittedInputDataBetweenUpgrade, committedState);
            verifyCommitted(expectedCommittedResultAfterFailure);
            expectedUncommittedResult.addAll(expectedCommittedResultAfterFailure);
            // 7c. restart the first client in eos-V2 mode and wait until rebalance stabilizes
            stateTransitions1.clear();
            stateTransitions2.clear();
            streams1V2Two = getKafkaStreams(APP_DIR_1, StreamsConfig.EXACTLY_ONCE_V2);
            streams1V2Two.setStateListener((newState, oldState) -> stateTransitions1.add(KeyValue.pair(oldState, newState)));
            assignmentListener.prepareForRebalance();
            streams1V2Two.start();
            assignmentListener.waitForNextStableAssignment(MAX_WAIT_TIME_MS);
            waitForRunning(stateTransitions1);
            waitForRunning(stateTransitions2);
        }
        // phase 8: (write partial last batch of data)
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // stop case:
        // p-0: 10 rec + C   +   5 rec + C + 5 rec + C ---> 5 rec (pending)
        // p-1: 10 rec + C   +   5 rec + C + 5 rec + C ---> 5 rec (pending)
        // p-2: 10 rec + C   +   5 rec + C + 5 rec + C ---> 5 rec (pending)
        // p-3: 10 rec + C   +   5 rec + C + 5 rec + C ---> 5 rec (pending)
        // crash case: (we just assumes that we inject the error for p-2; in reality it might be a different partition)
        // (we don't crash right away and write one record less)
        // p-0: 10 rec + C   +   4 rec + A + 5 rec + C + 5 rec + C   +   10 rec + A + 10 rec + C ---> 5 rec (pending)
        // p-1: 10 rec + C   +   5 rec + A + 5 rec + C + 5 rec + C   +   10 rec + A + 10 rec + C ---> 5 rec (pending)
        // p-2: 10 rec + C   +   5 rec + C + 5 rec + A + 5 rec + C   +   10 rec + C              ---> 4 rec (pending)
        // p-3: 10 rec + C   +   5 rec + C + 5 rec + A + 5 rec + C   +   10 rec + C              ---> 5 rec (pending)
        cleanKeys.addAll(mkSet(0L, 1L, 2L, 3L));
        final Set<Long> keysSecondClientAlphaTwo = keysFromInstance(streams2AlphaTwo);
        final long secondFailingKeyForCrashCase = keysSecondClientAlphaTwo.iterator().next();
        cleanKeys.remove(secondFailingKeyForCrashCase);
        final List<KeyValue<Long, Long>> uncommittedInputDataBeforeSecondUpgrade = new LinkedList<>();
        if (!injectError) {
            uncommittedInputDataBeforeSecondUpgrade.addAll(prepareData(30L, 35L, 0L, 1L, 2L, 3L));
            writeInputData(uncommittedInputDataBeforeSecondUpgrade);
            expectedUncommittedResult.addAll(computeExpectedResult(uncommittedInputDataBeforeSecondUpgrade, new HashMap<>(committedState)));
            verifyUncommitted(expectedUncommittedResult);
        } else {
            final List<KeyValue<Long, Long>> uncommittedInputDataWithoutFailingKey = new LinkedList<>();
            for (final long key : cleanKeys) {
                uncommittedInputDataWithoutFailingKey.addAll(prepareData(30L, 35L, key));
            }
            uncommittedInputDataWithoutFailingKey.addAll(prepareData(30L, 34L, secondFailingKeyForCrashCase));
            uncommittedInputDataBeforeSecondUpgrade.addAll(uncommittedInputDataWithoutFailingKey);
            writeInputData(uncommittedInputDataWithoutFailingKey);
            expectedUncommittedResult.addAll(computeExpectedResult(uncommittedInputDataWithoutFailingKey, new HashMap<>(committedState)));
            verifyUncommitted(expectedUncommittedResult);
        }
        // phase 9: (stop/crash second client)
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // stop case: (client 2 (alpha) will commit its two tasks on close())
        // p-0: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec (pending)
        // p-1: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec (pending)
        // p-2: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec ---> C
        // p-3: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec ---> C
        // crash case: (we write the last record that will trigger the crash; both TX from client 2 will be aborted
        // during fail over by client 1 and retried)
        // p-0: 10 rec + C   +   4 rec + A + 5 rec + C + 5 rec + C   +   10 rec + A + 10 rec + C   +   5 rec (pending)
        // p-1: 10 rec + C   +   5 rec + A + 5 rec + C + 5 rec + C   +   10 rec + A + 10 rec + C   +   5 rec (pending)
        // p-2: 10 rec + C   +   5 rec + C + 5 rec + A + 5 rec + C   +   10 rec + C                +   4 rec ---> A + 5 rec (pending)
        // p-3: 10 rec + C   +   5 rec + C + 5 rec + A + 5 rec + C   +   10 rec + C                +   5 rec ---> A + 5 rec (pending)
        stateTransitions1.clear();
        assignmentListener.prepareForRebalance();
        if (!injectError) {
            stateTransitions2.clear();
            streams2AlphaTwo.close();
            waitForStateTransition(stateTransitions2, CLOSE);
        } else {
            errorInjectedClient2.set(true);
            final List<KeyValue<Long, Long>> dataPotentiallySecondFailingKey = prepareData(34L, 35L, secondFailingKeyForCrashCase);
            uncommittedInputDataBeforeSecondUpgrade.addAll(dataPotentiallySecondFailingKey);
            writeInputData(dataPotentiallySecondFailingKey);
        }
        assignmentListener.waitForNextStableAssignment(MAX_WAIT_TIME_MS);
        waitForRunning(stateTransitions1);
        if (!injectError) {
            final List<KeyValue<Long, Long>> committedInputDataDuringSecondUpgrade = uncommittedInputDataBeforeSecondUpgrade.stream().filter(pair -> keysSecondClientAlphaTwo.contains(pair.key)).collect(Collectors.toList());
            final List<KeyValue<Long, Long>> expectedCommittedResult = computeExpectedResult(committedInputDataDuringSecondUpgrade, committedState);
            verifyCommitted(expectedCommittedResult);
        } else {
            // retrying TX
            expectedUncommittedResult.addAll(computeExpectedResult(uncommittedInputDataBeforeSecondUpgrade.stream().filter(pair -> keysSecondClientAlphaTwo.contains(pair.key)).collect(Collectors.toList()), new HashMap<>(committedState)));
            verifyUncommitted(expectedUncommittedResult);
            waitForStateTransitionContains(stateTransitions2, CRASH);
            errorInjectedClient2.set(false);
            stateTransitions2.clear();
            streams2AlphaTwo.close();
            assertFalse(UNEXPECTED_EXCEPTION_MSG, hasUnexpectedError);
        }
        // phase 10: (restart second client)
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // the state below indicate the case for which the "original" tasks of client2 are migrated back to client2
        // if a task "switch" happens, we might get additional commits (omitted in the comment for brevity)
        // 
        // stop case: (client 1 (V2) will commit all four tasks if at least one revoked and migrate task needs committing back to client 2)
        // p-0: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec ---> C
        // p-1: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec ---> C
        // p-2: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec + C
        // p-3: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec + C
        // crash case: (client 1 (V2) will commit all four tasks even only two are migrate back to client 2)
        // p-0: 10 rec + C   +   4 rec + A + 5 rec + C + 5 rec + C   +   10 rec + A + 10 rec + C   +   5 rec ---> C
        // p-1: 10 rec + C   +   5 rec + A + 5 rec + C + 5 rec + C   +   10 rec + A + 10 rec + C   +   5 rec ---> C
        // p-2: 10 rec + C   +   5 rec + C + 5 rec + A + 5 rec + C   +   10 rec + C                +   4 rec + A + 5 rec ---> C
        // p-3: 10 rec + C   +   5 rec + C + 5 rec + A + 5 rec + C   +   10 rec + C                +   5 rec + A + 5 rec ---> C
        commitRequested.set(0);
        stateTransitions1.clear();
        stateTransitions2.clear();
        streams2V2 = getKafkaStreams(APP_DIR_1, StreamsConfig.EXACTLY_ONCE_V2);
        streams2V2.setStateListener((newState, oldState) -> stateTransitions2.add(KeyValue.pair(oldState, newState)));
        assignmentListener.prepareForRebalance();
        streams2V2.start();
        assignmentListener.waitForNextStableAssignment(MAX_WAIT_TIME_MS);
        waitForRunning(stateTransitions1);
        waitForRunning(stateTransitions2);
        newlyCommittedKeys.clear();
        if (!injectError) {
            newlyCommittedKeys.addAll(keysFromInstance(streams2V2));
            newlyCommittedKeys.removeAll(keysSecondClientAlphaTwo);
        } else {
            newlyCommittedKeys.addAll(mkSet(0L, 1L, 2L, 3L));
        }
        final List<KeyValue<Long, Long>> expectedCommittedResultAfterRestartSecondClient = computeExpectedResult(uncommittedInputDataBeforeSecondUpgrade.stream().filter(pair -> newlyCommittedKeys.contains(pair.key)).collect(Collectors.toList()), committedState);
        verifyCommitted(expectedCommittedResultAfterRestartSecondClient);
        // phase 11: (complete fourth batch of data)
        // expected end state per output partition (C == COMMIT; A == ABORT; ---> indicate the changes):
        // 
        // stop case:
        // p-0: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec + C ---> 5 rec + C
        // p-1: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec + C ---> 5 rec + C
        // p-2: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec + C ---> 5 rec + C
        // p-3: 10 rec + C   +   5 rec + C + 5 rec + C   +   5 rec + C ---> 5 rec + C
        // crash case:  (we just assumes that we inject the error for p-2; in reality it might be a different partition)
        // p-0: 10 rec + C   +   4 rec + A + 5 rec + C + 5 rec + C   +   10 rec + A + 10 rec + C   +   5 rec + C             ---> 5 rec + C
        // p-1: 10 rec + C   +   5 rec + A + 5 rec + C + 5 rec + C   +   10 rec + A + 10 rec + C   +   5 rec + C             ---> 5 rec + C
        // p-2: 10 rec + C   +   5 rec + C + 5 rec + A + 5 rec + C   +   10 rec + C                +   4 rec + A + 5 rec + C ---> 5 rec + C
        // p-3: 10 rec + C   +   5 rec + C + 5 rec + A + 5 rec + C   +   10 rec + C                +   5 rec + A + 5 rec + C ---> 5 rec + C
        commitCounterClient1.set(-1);
        commitCounterClient2.set(-1);
        final List<KeyValue<Long, Long>> finishLastBatch = prepareData(35L, 40L, 0L, 1L, 2L, 3L);
        writeInputData(finishLastBatch);
        final Set<Long> uncommittedKeys = mkSet(0L, 1L, 2L, 3L);
        uncommittedKeys.removeAll(keysSecondClientAlphaTwo);
        uncommittedKeys.removeAll(newlyCommittedKeys);
        final List<KeyValue<Long, Long>> committedInputDataDuringUpgrade = uncommittedInputDataBeforeSecondUpgrade.stream().filter(pair -> uncommittedKeys.contains(pair.key)).collect(Collectors.toList());
        committedInputDataDuringUpgrade.addAll(finishLastBatch);
        final List<KeyValue<Long, Long>> expectedCommittedResult = computeExpectedResult(committedInputDataDuringUpgrade, committedState);
        verifyCommitted(expectedCommittedResult);
    } finally {
        if (streams1Alpha != null) {
            streams1Alpha.close();
        }
        if (streams1V2 != null) {
            streams1V2.close();
        }
        if (streams1V2Two != null) {
            streams1V2Two.close();
        }
        if (streams2Alpha != null) {
            streams2Alpha.close();
        }
        if (streams2AlphaTwo != null) {
            streams2AlphaTwo.close();
        }
        if (streams2V2 != null) {
            streams2V2.close();
        }
    }
}
Also used : DefaultKafkaClientSupplier(org.apache.kafka.streams.processor.internals.DefaultKafkaClientSupplier) Arrays(java.util.Arrays) Stores(org.apache.kafka.streams.state.Stores) Cluster(org.apache.kafka.common.Cluster) TransformerSupplier(org.apache.kafka.streams.kstream.TransformerSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StableAssignmentListener(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.StableAssignmentListener) Locale(java.util.Locale) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Duration(java.time.Duration) Map(java.util.Map) Serdes(org.apache.kafka.common.serialization.Serdes) Parameterized(org.junit.runners.Parameterized) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) State(org.apache.kafka.streams.KafkaStreams.State) Category(org.junit.experimental.categories.Category) Collectors(java.util.stream.Collectors) QueryableStoreTypes(org.apache.kafka.streams.state.QueryableStoreTypes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) ProcessorContext(org.apache.kafka.streams.processor.ProcessorContext) List(java.util.List) Assert.assertFalse(org.junit.Assert.assertFalse) Partitioner(org.apache.kafka.clients.producer.Partitioner) ReadOnlyKeyValueStore(org.apache.kafka.streams.state.ReadOnlyKeyValueStore) StreamsConfig(org.apache.kafka.streams.StreamsConfig) BeforeClass(org.junit.BeforeClass) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) RunWith(org.junit.runner.RunWith) IntegrationTest(org.apache.kafka.test.IntegrationTest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) Before(org.junit.Before) Utils(org.apache.kafka.common.utils.Utils) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Properties(java.util.Properties) StreamsUncaughtExceptionHandler(org.apache.kafka.streams.errors.StreamsUncaughtExceptionHandler) Iterator(java.util.Iterator) Producer(org.apache.kafka.clients.producer.Producer) Transformer(org.apache.kafka.streams.kstream.Transformer) TestUtils.waitForCondition(org.apache.kafka.test.TestUtils.waitForCondition) Test(org.junit.Test) IOException(java.io.IOException) StoreBuilder(org.apache.kafka.streams.state.StoreBuilder) File(java.io.File) IsolationLevel(org.apache.kafka.common.IsolationLevel) KeyValueIterator(org.apache.kafka.streams.state.KeyValueIterator) InternalConfig(org.apache.kafka.streams.StreamsConfig.InternalConfig) StoreQueryParameters(org.apache.kafka.streams.StoreQueryParameters) KafkaStreams(org.apache.kafka.streams.KafkaStreams) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) Collections(java.util.Collections) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KeyValue(org.apache.kafka.streams.KeyValue) HashMap(java.util.HashMap) LinkedList(java.util.LinkedList) IntegrationTest(org.apache.kafka.test.IntegrationTest) Test(org.junit.Test)

Aggregations

Producer (org.apache.kafka.clients.producer.Producer)13 Properties (java.util.Properties)10 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)9 HashMap (java.util.HashMap)6 Future (java.util.concurrent.Future)6 TopicPartition (org.apache.kafka.common.TopicPartition)6 Test (org.testng.annotations.Test)6 Set (java.util.Set)5 ArrayList (java.util.ArrayList)4 Collection (java.util.Collection)4 Collections (java.util.Collections)4 List (java.util.List)4 Map (java.util.Map)4 NotificationException (org.apache.atlas.notification.NotificationException)4 HashSet (java.util.HashSet)3 Objects (java.util.Objects)3 Optional (java.util.Optional)3 TimeUnit (java.util.concurrent.TimeUnit)3 Collectors (java.util.stream.Collectors)3 Consumer (org.apache.kafka.clients.consumer.Consumer)3