use of org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder in project apache-kafka-on-k8s by banzaicloud.
the class StandbyTaskTest method shouldNotThrowUnsupportedOperationExceptionWhenInitializingStateStores.
@Test
public void shouldNotThrowUnsupportedOperationExceptionWhenInitializingStateStores() throws IOException {
final String changelogName = "test-application-my-store-changelog";
final List<TopicPartition> partitions = Utils.mkList(new TopicPartition(changelogName, 0));
consumer.assign(partitions);
final Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
committedOffsets.put(new TopicPartition(changelogName, 0), new OffsetAndMetadata(0L));
consumer.commitSync(committedOffsets);
restoreStateConsumer.updatePartitions(changelogName, Utils.mkList(new PartitionInfo(changelogName, 0, Node.noNode(), new Node[0], new Node[0])));
final InternalStreamsBuilder builder = new InternalStreamsBuilder(new InternalTopologyBuilder());
builder.stream(Collections.singleton("topic"), new ConsumedInternal<>()).groupByKey().count();
final StreamsConfig config = createConfig(baseDir);
final InternalTopologyBuilder internalTopologyBuilder = InternalStreamsBuilderTest.internalTopologyBuilder(builder);
final ProcessorTopology topology = internalTopologyBuilder.setApplicationId(applicationId).build(0);
new StandbyTask(taskId, partitions, topology, consumer, changelogReader, config, new MockStreamsMetrics(new Metrics()), stateDirectory);
}
use of org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount.
@Test
public void shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount() {
builder = new CorruptedInternalTopologyBuilder();
topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps()));
final InternalStreamsBuilder streamsBuilder = new InternalStreamsBuilder(builder);
final KStream<String, String> inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>());
final KTable<String, String> inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(), new MaterializedInternal<>(Materialized.as("store")));
inputTopic.groupBy((k, v) -> k, Grouped.with("GroupName", Serdes.String(), Serdes.String())).windowedBy(TimeWindows.of(Duration.ofMinutes(10))).aggregate(() -> "", (k, v, a) -> a + k).leftJoin(inputTable, v -> v, (x, y) -> x + y);
streamsBuilder.buildAndOptimizeTopology();
configureDefault();
subscriptions.put("consumer", new Subscription(singletonList("topic"), defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(AssignmentInfo.decode(assignments.get("consumer").userData()).errCode(), equalTo(AssignorError.ASSIGNMENT_ERROR.code()));
}
Aggregations