use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldThrowTimeoutExceptionWhenCreatingChangelogTopicsTimesOut.
@Test
public void shouldThrowTimeoutExceptionWhenCreatingChangelogTopicsTimesOut() {
final StreamsConfig config = new StreamsConfig(configProps());
final StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder.table("topic1", Materialized.as("store"));
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
topologyMetadata = new TopologyMetadata(builder, config);
createDefaultMockTaskManager();
EasyMock.replay(taskManager);
partitionAssignor.configure(configProps());
final MockInternalTopicManager mockInternalTopicManager = new MockInternalTopicManager(time, config, mockClientSupplier.restoreConsumer, false) {
@Override
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
if (topics.isEmpty()) {
return emptySet();
}
throw new TimeoutException("KABOOM!");
}
};
partitionAssignor.setInternalTopicManager(mockInternalTopicManager);
subscriptions.put(client, new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
assertThrows(TimeoutException.class, () -> partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)));
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StreamThreadTest method shouldNotCloseTaskProducerWhenSuspending.
@Test
public void shouldNotCloseTaskProducerWhenSuspending() {
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
internalTopologyBuilder.addSource(null, "name", null, null, null, topic1);
internalTopologyBuilder.addSink("out", "output", null, null, null, "name");
thread.setState(StreamThread.State.STARTING);
thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final List<TopicPartition> assignedPartitions = new ArrayList<>();
// assign single partition
assignedPartitions.add(t1p1);
activeTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(assignedPartitions);
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
assertThat(thread.activeTasks().size(), equalTo(1));
// need to process a record to enable committing
addRecord(mockConsumer, 0L);
thread.runOnce();
thread.rebalanceListener().onPartitionsRevoked(assignedPartitions);
assertTrue(clientSupplier.producers.get(0).transactionCommitted());
assertFalse(clientSupplier.producers.get(0).closed());
assertEquals(1, thread.activeTasks().size());
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StreamsProducerTest method shouldNotSetTransactionIdIfEosDisabled.
// non-EOS tests
// functional tests
@Test
public void shouldNotSetTransactionIdIfEosDisabled() {
final StreamsConfig mockConfig = mock(StreamsConfig.class);
expect(mockConfig.getProducerConfigs("threadId-producer")).andReturn(mock(Map.class));
expect(mockConfig.getString(StreamsConfig.PROCESSING_GUARANTEE_CONFIG)).andReturn(StreamsConfig.AT_LEAST_ONCE).anyTimes();
replay(mockConfig);
new StreamsProducer(mockConfig, "threadId", mockClientSupplier, null, null, logContext, mockTime);
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StreamsProducerTest method shouldSetTransactionIdUsingTaskIdIfEosAlphaEnabled.
@SuppressWarnings("deprecation")
@Test
public void shouldSetTransactionIdUsingTaskIdIfEosAlphaEnabled() {
final Map<String, Object> mockMap = mock(Map.class);
expect(mockMap.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "appId-0_0")).andReturn(null);
expect(mockMap.get(ProducerConfig.TRANSACTIONAL_ID_CONFIG)).andReturn("appId-0_0");
final StreamsConfig mockConfig = mock(StreamsConfig.class);
expect(mockConfig.getProducerConfigs("threadId-0_0-producer")).andReturn(mockMap);
expect(mockConfig.getString(StreamsConfig.APPLICATION_ID_CONFIG)).andReturn("appId");
expect(mockConfig.getString(StreamsConfig.PROCESSING_GUARANTEE_CONFIG)).andReturn(StreamsConfig.EXACTLY_ONCE);
replay(mockMap, mockConfig);
new StreamsProducer(mockConfig, "threadId", eosAlphaMockClientSupplier, new TaskId(0, 0), null, logContext, mockTime);
verify(mockMap);
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class KTableKTableForeignKeyJoinMaterializationIntegrationTest method getTopology.
private Topology getTopology(final Properties streamsConfig, final String queryableStoreName) {
final StreamsBuilder builder = new StreamsBuilder();
final KTable<String, String> left = builder.table(LEFT_TABLE, Consumed.with(Serdes.String(), Serdes.String()));
final KTable<String, String> right = builder.table(RIGHT_TABLE, Consumed.with(Serdes.String(), Serdes.String()));
final Function<String, String> extractor = value -> value.split("\\|")[1];
final ValueJoiner<String, String, String> joiner = (value1, value2) -> "(" + value1 + "," + value2 + ")";
final Materialized<String, String, KeyValueStore<Bytes, byte[]>> materialized;
if (queryable) {
materialized = Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(queryableStoreName).withValueSerde(Serdes.String());
} else {
materialized = Materialized.with(null, Serdes.String());
}
final KTable<String, String> joinResult;
if (this.materialized) {
joinResult = left.join(right, extractor, joiner, materialized);
} else {
joinResult = left.join(right, extractor, joiner);
}
joinResult.toStream().to(OUTPUT, Produced.with(null, Serdes.String()));
return builder.build(streamsConfig);
}
Aggregations