Search in sources :

Example 11 with MockClientSupplier

use of org.apache.kafka.test.MockClientSupplier in project kafka by apache.

the class RecordCollectorTest method shouldNotAbortTxnOnEOSCloseDirtyIfNothingSent.

@Test
public void shouldNotAbortTxnOnEOSCloseDirtyIfNothingSent() {
    final AtomicBoolean functionCalled = new AtomicBoolean(false);
    final RecordCollector collector = new RecordCollectorImpl(logContext, taskId, new StreamsProducer(eosConfig, "-StreamThread-1", new MockClientSupplier() {

        @Override
        public Producer<byte[], byte[]> getProducer(final Map<String, Object> config) {
            return new MockProducer<byte[], byte[]>(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {

                @Override
                public void abortTransaction() {
                    functionCalled.set(true);
                }
            };
        }
    }, taskId, processId, logContext, Time.SYSTEM), productionExceptionHandler, streamsMetrics);
    collector.closeDirty();
    assertFalse(functionCalled.get());
}
Also used : AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) MockProducer(org.apache.kafka.clients.producer.MockProducer) DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) Test(org.junit.Test)

Example 12 with MockClientSupplier

use of org.apache.kafka.test.MockClientSupplier in project kafka by apache.

the class RecordCollectorTest method setup.

@Before
public void setup() {
    final MockClientSupplier clientSupplier = new MockClientSupplier();
    clientSupplier.setCluster(cluster);
    streamsProducer = new StreamsProducer(config, processId + "-StreamThread-1", clientSupplier, null, processId, logContext, Time.SYSTEM);
    mockProducer = clientSupplier.producers.get(0);
    collector = new RecordCollectorImpl(logContext, taskId, streamsProducer, productionExceptionHandler, streamsMetrics);
}
Also used : MockClientSupplier(org.apache.kafka.test.MockClientSupplier) Before(org.junit.Before)

Example 13 with MockClientSupplier

use of org.apache.kafka.test.MockClientSupplier in project kafka by apache.

the class RecordCollectorTest method shouldThrowIfTopicIsUnknownOnSendWithPartitioner.

@Test
public void shouldThrowIfTopicIsUnknownOnSendWithPartitioner() {
    final RecordCollector collector = new RecordCollectorImpl(logContext, taskId, new StreamsProducer(config, processId + "-StreamThread-1", new MockClientSupplier() {

        @Override
        public Producer<byte[], byte[]> getProducer(final Map<String, Object> config) {
            return new MockProducer<byte[], byte[]>(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {

                @Override
                public List<PartitionInfo> partitionsFor(final String topic) {
                    return Collections.emptyList();
                }
            };
        }
    }, null, null, logContext, Time.SYSTEM), productionExceptionHandler, streamsMetrics);
    collector.initialize();
    final StreamsException thrown = assertThrows(StreamsException.class, () -> collector.send(topic, "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner));
    assertThat(thrown.getMessage(), equalTo("Could not get partition information for topic topic for task 0_0." + " This can happen if the topic does not exist."));
}
Also used : MockProducer(org.apache.kafka.clients.producer.MockProducer) DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) StreamsException(org.apache.kafka.streams.errors.StreamsException) PartitionInfo(org.apache.kafka.common.PartitionInfo) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) Test(org.junit.Test)

Example 14 with MockClientSupplier

use of org.apache.kafka.test.MockClientSupplier in project kafka by apache.

the class StreamThreadTest method shouldCloseActiveTasksThatAreAssignedToThisStreamThreadButAssignmentHasChangedBeforeCreatingNewTasks.

@Test
public void shouldCloseActiveTasksThatAreAssignedToThisStreamThreadButAssignmentHasChangedBeforeCreatingNewTasks() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    builder.stream(Pattern.compile("t.*")).to("out");
    final StreamsConfig config = new StreamsConfig(configProps());
    final MockClientSupplier clientSupplier = new MockClientSupplier();
    final Map<Collection<TopicPartition>, TestStreamTask> createdTasks = new HashMap<>();
    final StreamThread thread = new StreamThread(builder, config, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {

        @Override
        protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
            final ProcessorTopology topology = builder.build(id.topicGroupId);
            final TestStreamTask task = new TestStreamTask(id, applicationId, partitions, topology, consumer, producer, restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory);
            createdTasks.put(partitions, task);
            return task;
        }
    };
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final TopicPartition t1 = new TopicPartition("t1", 0);
    final Set<TopicPartition> task00Partitions = new HashSet<>();
    task00Partitions.add(t1);
    final TaskId taskId = new TaskId(0, 0);
    activeTasks.put(taskId, task00Partitions);
    thread.partitionAssignor(new StreamPartitionAssignor() {

        @Override
        Map<TaskId, Set<TopicPartition>> activeTasks() {
            return activeTasks;
        }
    });
    // should create task for id 0_0 with a single partition
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(task00Partitions);
    final TestStreamTask firstTask = createdTasks.get(task00Partitions);
    assertThat(firstTask.id(), is(taskId));
    // update assignment for the task 0_0 so it now has 2 partitions
    task00Partitions.add(new TopicPartition("t2", 0));
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(task00Partitions);
    // should close the first task as the assignment has changed
    assertTrue("task should have been closed as assignment has changed", firstTask.closed);
    assertTrue("tasks state manager should have been closed as assignment has changed", firstTask.closedStateManager);
    // should have created a new task for 00
    assertThat(createdTasks.get(task00Partitions).id(), is(taskId));
}
Also used : KStreamBuilder(org.apache.kafka.streams.kstream.KStreamBuilder) TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) Map(java.util.Map) HashMap(java.util.HashMap) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 15 with MockClientSupplier

use of org.apache.kafka.test.MockClientSupplier in project kafka by apache.

the class StreamThreadTest method shouldNotViolateAtLeastOnceWhenAnExceptionOccursOnTaskCloseDuringShutdown.

@Test
public void shouldNotViolateAtLeastOnceWhenAnExceptionOccursOnTaskCloseDuringShutdown() throws Exception {
    final KStreamBuilder builder = new KStreamBuilder();
    builder.setApplicationId(applicationId);
    builder.stream("t1").groupByKey();
    final StreamsConfig config = new StreamsConfig(configProps());
    final MockClientSupplier clientSupplier = new MockClientSupplier();
    final TestStreamTask testStreamTask = new TestStreamTask(new TaskId(0, 0), applicationId, Utils.mkSet(new TopicPartition("t1", 0)), builder.build(0), clientSupplier.consumer, clientSupplier.producer, clientSupplier.restoreConsumer, config, new MockStreamsMetrics(new Metrics()), new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), time)) {

        @Override
        public void close() {
            throw new RuntimeException("KABOOM!");
        }
    };
    final StreamsConfig config1 = new StreamsConfig(configProps());
    final StreamThread thread = new StreamThread(builder, config1, clientSupplier, applicationId, clientId, processId, new Metrics(), new MockTime(), new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) {

        @Override
        protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) {
            return testStreamTask;
        }
    };
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    activeTasks.put(testStreamTask.id, testStreamTask.partitions);
    thread.partitionAssignor(new MockStreamsPartitionAssignor(activeTasks));
    thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList());
    thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions);
    thread.start();
    thread.close();
    thread.join();
    assertFalse("task shouldn't have been committed as there was an exception during shutdown", testStreamTask.committed);
}
Also used : KStreamBuilder(org.apache.kafka.streams.kstream.KStreamBuilder) TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) Metrics(org.apache.kafka.common.metrics.Metrics) StreamsMetrics(org.apache.kafka.streams.StreamsMetrics) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

MockClientSupplier (org.apache.kafka.test.MockClientSupplier)28 Test (org.junit.Test)23 TaskId (org.apache.kafka.streams.processor.TaskId)19 StreamsConfig (org.apache.kafka.streams.StreamsConfig)17 HashMap (java.util.HashMap)16 Metrics (org.apache.kafka.common.metrics.Metrics)16 MockTime (org.apache.kafka.common.utils.MockTime)16 StreamsMetrics (org.apache.kafka.streams.StreamsMetrics)14 TopicPartition (org.apache.kafka.common.TopicPartition)13 HashSet (java.util.HashSet)12 Map (java.util.Map)10 Set (java.util.Set)9 Collection (java.util.Collection)8 TopologyBuilder (org.apache.kafka.streams.processor.TopologyBuilder)8 KStreamBuilder (org.apache.kafka.streams.kstream.KStreamBuilder)7 Properties (java.util.Properties)5 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)5 PartitionInfo (org.apache.kafka.common.PartitionInfo)4 MockProcessorSupplier (org.apache.kafka.test.MockProcessorSupplier)4 Before (org.junit.Before)4