use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StreamThreadTest method shouldInjectProducerPerThreadUsingClientSupplierOnCreateIfEosV2Enabled.
@Test
public void shouldInjectProducerPerThreadUsingClientSupplierOnCreateIfEosV2Enabled() {
internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
final Properties props = configProps(true);
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(props), true);
thread.setState(StreamThread.State.STARTING);
thread.rebalanceListener().onPartitionsRevoked(Collections.emptyList());
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final List<TopicPartition> assignedPartitions = new ArrayList<>();
// assign single partition
assignedPartitions.add(t1p1);
assignedPartitions.add(t1p2);
activeTasks.put(task1, Collections.singleton(t1p1));
activeTasks.put(task2, Collections.singleton(t1p2));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(assignedPartitions);
final Map<TopicPartition, Long> beginOffsets = new HashMap<>();
beginOffsets.put(t1p1, 0L);
beginOffsets.put(t1p2, 0L);
mockConsumer.updateBeginningOffsets(beginOffsets);
thread.rebalanceListener().onPartitionsAssigned(new HashSet<>(assignedPartitions));
thread.runOnce();
assertThat(clientSupplier.producers.size(), is(1));
assertSame(clientSupplier.consumer, thread.mainConsumer());
assertSame(clientSupplier.restoreConsumer, thread.restoreConsumer());
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StreamThreadTest method shouldOnlyCompleteShutdownAfterRebalanceNotInProgress.
@Test
public void shouldOnlyCompleteShutdownAfterRebalanceNotInProgress() throws InterruptedException {
internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
thread.start();
TestUtils.waitForCondition(() -> thread.state() == StreamThread.State.STARTING, 10 * 1000, "Thread never started.");
thread.rebalanceListener().onPartitionsRevoked(Collections.emptyList());
thread.taskManager().handleRebalanceStart(Collections.singleton(topic1));
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final List<TopicPartition> assignedPartitions = new ArrayList<>();
// assign single partition
assignedPartitions.add(t1p1);
assignedPartitions.add(t1p2);
activeTasks.put(task1, Collections.singleton(t1p1));
activeTasks.put(task2, Collections.singleton(t1p2));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
thread.shutdown();
// even if thread is no longer running, it should still be polling
// as long as the rebalance is still ongoing
assertFalse(thread.isRunning());
Thread.sleep(1000);
assertEquals(Utils.mkSet(task1, task2), thread.taskManager().activeTaskIds());
assertEquals(StreamThread.State.PENDING_SHUTDOWN, thread.state());
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
TestUtils.waitForCondition(() -> thread.state() == StreamThread.State.DEAD, 10 * 1000, "Thread never shut down.");
assertEquals(Collections.emptySet(), thread.taskManager().activeTaskIds());
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount.
@Test
public void shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount() {
builder = new CorruptedInternalTopologyBuilder();
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps()));
final InternalStreamsBuilder streamsBuilder = new InternalStreamsBuilder(builder);
final KStream<String, String> inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>());
final KTable<String, String> inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(), new MaterializedInternal<>(Materialized.as("store")));
inputTopic.groupBy((k, v) -> k, Grouped.with("GroupName", Serdes.String(), Serdes.String())).windowedBy(TimeWindows.of(Duration.ofMinutes(10))).aggregate(() -> "", (k, v, a) -> a + k).leftJoin(inputTable, v -> v, (x, y) -> x + y);
streamsBuilder.buildAndOptimizeTopology();
configureDefault();
subscriptions.put("consumer", new Subscription(singletonList("topic"), defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(AssignmentInfo.decode(assignments.get("consumer").userData()).errCode(), equalTo(AssignorError.ASSIGNMENT_ERROR.code()));
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StoreChangelogReaderTest method shouldRestoreToLimitInStandbyState.
@Test
public void shouldRestoreToLimitInStandbyState() {
final Map<TaskId, Task> mockTasks = mock(Map.class);
EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
final AtomicLong offset = new AtomicLong(7L);
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
return partitions.stream().collect(Collectors.toMap(Function.identity(), partition -> new OffsetAndMetadata(offset.get())));
}
};
final long now = time.milliseconds();
final Properties properties = new Properties();
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
changelogReader.setMainConsumer(consumer);
changelogReader.transitToUpdateStandby();
consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
changelogReader.register(tp, standbyStateManager);
assertEquals(0L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
changelogReader.restore(mockTasks);
assertNull(callback.restoreTopicPartition);
assertNull(callback.storeNameCalledStates.get(RESTORE_START));
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
// null key should be ignored
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
changelogReader.restore(mockTasks);
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
assertNull(callback.storeNameCalledStates.get(RESTORE_END));
assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
offset.set(10L);
time.setCurrentTimeMs(now + 100L);
// should not try to read committed offsets if interval has not reached
changelogReader.restore(mockTasks);
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
time.setCurrentTimeMs(now + 101L);
// the first restore would only update the limit, same below
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
offset.set(15L);
// after we've updated once, the timer should be reset and we should not try again until next interval elapsed
time.setCurrentTimeMs(now + 201L);
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
// once we are in update active mode, we should not try to update limit offset
time.setCurrentTimeMs(now + 202L);
changelogReader.enforceRestoreActive();
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.transitToUpdateStandby();
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 12L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 13L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 14L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 15L, "key".getBytes(), "value".getBytes()));
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(9L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(1, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StoreChangelogReaderTest method shouldNotUpdateLimitForNonSourceStandbyChangelog.
@Test
public void shouldNotUpdateLimitForNonSourceStandbyChangelog() {
final Map<TaskId, Task> mockTasks = mock(Map.class);
EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(false).anyTimes();
EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
throw new AssertionError("Should not try to fetch committed offsets");
}
};
final Properties properties = new Properties();
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
changelogReader.setMainConsumer(consumer);
changelogReader.transitToUpdateStandby();
consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
changelogReader.register(tp, standbyStateManager);
assertNull(changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
// if there's no records fetchable, nothings gets restored
changelogReader.restore(mockTasks);
assertNull(callback.restoreTopicPartition);
assertNull(callback.storeNameCalledStates.get(RESTORE_START));
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertNull(changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
// null key should be ignored
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
// we should be able to restore to the log end offsets since there's no limit
changelogReader.restore(mockTasks);
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertNull(changelogReader.changelogMetadata(tp).endOffset());
assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
assertNull(callback.storeNameCalledStates.get(RESTORE_END));
assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
}
Aggregations