use of org.apache.kafka.streams.processor.internals.ProcessorContextImpl in project ksql by confluentinc.
the class TransientQuerySinkProcessor method getCurrentPositions.
@SuppressWarnings("unchecked")
private Map<TopicPartition, OffsetAndMetadata> getCurrentPositions() {
// by reflection. Obviously, this code is pretty brittle.
try {
if (context.getClass().equals(ProcessorContextImpl.class)) {
final Field streamTask;
streamTask = ProcessorContextImpl.class.getDeclaredField("streamTask");
streamTask.setAccessible(true);
final StreamTask task = (StreamTask) streamTask.get(context);
final Method committableOffsetsAndMetadata = StreamTask.class.getDeclaredMethod("committableOffsetsAndMetadata");
committableOffsetsAndMetadata.setAccessible(true);
return (Map<TopicPartition, OffsetAndMetadata>) committableOffsetsAndMetadata.invoke(task);
} else {
// use a ProcessorContextImpl, which should be the only way this processor gets run.
throw new IllegalStateException("Expected only to run in the KafkaStreams or TopologyTestDriver runtimes.");
}
} catch (final NoSuchFieldException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) {
throw new RuntimeException(e);
}
}
use of org.apache.kafka.streams.processor.internals.ProcessorContextImpl in project kafka by apache.
the class GlobalStateStoreProviderTest method before.
@Before
public void before() {
stores.put("kv-store", Stores.keyValueStoreBuilder(Stores.inMemoryKeyValueStore("kv-store"), Serdes.String(), Serdes.String()).build());
stores.put("ts-kv-store", Stores.timestampedKeyValueStoreBuilder(Stores.inMemoryKeyValueStore("ts-kv-store"), Serdes.String(), Serdes.String()).build());
stores.put("w-store", Stores.windowStoreBuilder(Stores.inMemoryWindowStore("w-store", Duration.ofMillis(10L), Duration.ofMillis(2L), false), Serdes.String(), Serdes.String()).build());
stores.put("ts-w-store", Stores.timestampedWindowStoreBuilder(Stores.inMemoryWindowStore("ts-w-store", Duration.ofMillis(10L), Duration.ofMillis(2L), false), Serdes.String(), Serdes.String()).build());
stores.put("s-store", Stores.sessionStoreBuilder(Stores.inMemorySessionStore("s-store", Duration.ofMillis(10L)), Serdes.String(), Serdes.String()).build());
final ProcessorContextImpl mockContext = niceMock(ProcessorContextImpl.class);
expect(mockContext.applicationId()).andStubReturn("appId");
expect(mockContext.metrics()).andStubReturn(new StreamsMetricsImpl(new Metrics(), "threadName", StreamsConfig.METRICS_LATEST, new MockTime()));
expect(mockContext.taskId()).andStubReturn(new TaskId(0, 0));
expect(mockContext.recordCollector()).andStubReturn(null);
expect(mockContext.appConfigs()).andStubReturn(CONFIGS);
expectSerdes(mockContext);
replay(mockContext);
for (final StateStore store : stores.values()) {
store.init((StateStoreContext) mockContext, null);
}
}
use of org.apache.kafka.streams.processor.internals.ProcessorContextImpl in project kafka by apache.
the class TopologyTestDriver method setupTask.
@SuppressWarnings("deprecation")
private void setupTask(final StreamsConfig streamsConfig, final StreamsMetricsImpl streamsMetrics, final ThreadCache cache, final TaskConfig taskConfig) {
if (!partitionsByInputTopic.isEmpty()) {
consumer.assign(partitionsByInputTopic.values());
final Map<TopicPartition, Long> startOffsets = new HashMap<>();
for (final TopicPartition topicPartition : partitionsByInputTopic.values()) {
startOffsets.put(topicPartition, 0L);
}
consumer.updateBeginningOffsets(startOffsets);
final ProcessorStateManager stateManager = new ProcessorStateManager(TASK_ID, Task.TaskType.ACTIVE, StreamsConfig.EXACTLY_ONCE.equals(streamsConfig.getString(StreamsConfig.PROCESSING_GUARANTEE_CONFIG)), logContext, stateDirectory, new MockChangelogRegister(), processorTopology.storeToChangelogTopic(), new HashSet<>(partitionsByInputTopic.values()));
final RecordCollector recordCollector = new RecordCollectorImpl(logContext, TASK_ID, testDriverProducer, streamsConfig.defaultProductionExceptionHandler(), streamsMetrics);
final InternalProcessorContext context = new ProcessorContextImpl(TASK_ID, streamsConfig, stateManager, streamsMetrics, cache);
task = new StreamTask(TASK_ID, new HashSet<>(partitionsByInputTopic.values()), processorTopology, consumer, taskConfig, streamsMetrics, stateDirectory, cache, mockWallClockTime, stateManager, recordCollector, context, logContext);
task.initializeIfNeeded();
task.completeRestoration(noOpResetter -> {
});
task.processorContext().setRecordContext(null);
} else {
task = null;
}
}
use of org.apache.kafka.streams.processor.internals.ProcessorContextImpl in project kafka by apache.
the class StreamThreadStateStoreProviderTest method createStreamsTask.
private StreamTask createStreamsTask(final StreamsConfig streamsConfig, final MockClientSupplier clientSupplier, final ProcessorTopology topology, final TaskId taskId) {
final Metrics metrics = new Metrics();
final LogContext logContext = new LogContext("test-stream-task ");
final Set<TopicPartition> partitions = Collections.singleton(new TopicPartition(topicName, taskId.partition()));
final ProcessorStateManager stateManager = new ProcessorStateManager(taskId, Task.TaskType.ACTIVE, StreamsConfigUtils.eosEnabled(streamsConfig), logContext, stateDirectory, new StoreChangelogReader(new MockTime(), streamsConfig, logContext, clientSupplier.adminClient, clientSupplier.restoreConsumer, new MockStateRestoreListener()), topology.storeToChangelogTopic(), partitions);
final RecordCollector recordCollector = new RecordCollectorImpl(logContext, taskId, new StreamsProducer(streamsConfig, "threadId", clientSupplier, new TaskId(0, 0), UUID.randomUUID(), logContext, Time.SYSTEM), streamsConfig.defaultProductionExceptionHandler(), new MockStreamsMetrics(metrics));
final StreamsMetricsImpl streamsMetrics = new MockStreamsMetrics(metrics);
final InternalProcessorContext context = new ProcessorContextImpl(taskId, streamsConfig, stateManager, streamsMetrics, null);
return new StreamTask(taskId, partitions, topology, clientSupplier.consumer, new TopologyConfig(null, streamsConfig, new Properties()).getTaskConfig(), streamsMetrics, stateDirectory, EasyMock.createNiceMock(ThreadCache.class), new MockTime(), stateManager, recordCollector, context, logContext);
}
Aggregations