use of io.confluent.ksql.physical.scalablepush.consumer.CatchupConsumer in project ksql by confluentinc.
the class ScalablePushRegistry method close.
/**
* Called when the server is shutting down.
*/
public synchronized void close() {
if (closed) {
LOG.warn("Already closed registry");
return;
}
LOG.info("Closing scalable push registry for topic " + ksqlTopic.getKafkaTopicName());
final LatestConsumer latestConsumer = this.latestConsumer.get();
if (latestConsumer != null) {
latestConsumer.closeAsync();
}
for (CatchupConsumer catchupConsumer : catchupConsumers.values()) {
catchupConsumer.closeAsync();
}
catchupConsumers.clear();
MoreExecutors.shutdownAndAwaitTermination(executorService, 5000, TimeUnit.MILLISECONDS);
MoreExecutors.shutdownAndAwaitTermination(executorServiceCatchup, 5000, TimeUnit.MILLISECONDS);
closed = true;
}
use of io.confluent.ksql.physical.scalablepush.consumer.CatchupConsumer in project ksql by confluentinc.
the class ScalablePushRegistry method createCatchupConsumer.
/**
* Creates the latest consumer and its underlying kafka consumer.
* @param processingQueue The queue on which to send an error if anything goes wrong
* @return The new LatestConsumer
*/
private CatchupConsumer createCatchupConsumer(final ProcessingQueue processingQueue, final PushOffsetRange offsetRange, final String consumerGroup) {
KafkaConsumer<Object, GenericRow> consumer = null;
CatchupConsumer catchupConsumer = null;
try {
consumer = kafkaConsumerFactory.create(ksqlTopic, logicalSchema, serviceContext, consumerProperties, ksqlConfig, consumerGroup);
catchupConsumer = catchupConsumerFactory.create(ksqlTopic.getKafkaTopicName(), isWindowed(), logicalSchema, consumer, latestConsumer::get, catchupCoordinator, offsetRange, Clock.systemUTC(), ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_PUSH_V2_CATCHUP_CONSUMER_MSG_WINDOW), this::unregisterCatchup);
return catchupConsumer;
} catch (Exception e) {
LOG.error("Couldn't create catchup consumer", e);
processingQueue.onError();
// We're not supposed to block here, but if it fails here, hopefully it can immediately close.
if (consumer != null) {
consumer.close();
}
throw e;
}
}
use of io.confluent.ksql.physical.scalablepush.consumer.CatchupConsumer in project ksql by confluentinc.
the class ScalablePushRegistry method runCatchup.
private void runCatchup(final CatchupConsumer catchupConsumerToRun, final ProcessingQueue processingQueue) {
try (CatchupConsumer catchupConsumer = catchupConsumerToRun) {
catchupConsumer.run();
} catch (Throwable t) {
LOG.error("Got error while running catchup", t);
catchupConsumerToRun.onError();
} finally {
catchupConsumerToRun.unregister(processingQueue);
catchupConsumers.remove(processingQueue.getQueryId());
// If things ended exceptionally, stop latest
stopLatestConsumerOnLastRequest();
}
}
use of io.confluent.ksql.physical.scalablepush.consumer.CatchupConsumer in project ksql by confluentinc.
the class ScalablePushRegistry method startCatchup.
/**
* Starts the catchup consumer
* @param processingQueue The queue to register with the catchup consumer.
* @param catchupMetadata The catchup metadata
*/
private synchronized void startCatchup(final ProcessingQueue processingQueue, final CatchupMetadata catchupMetadata) {
final CatchupConsumer catchupConsumer = createCatchupConsumer(processingQueue, catchupMetadata.getPushOffsetRange(), catchupMetadata.getCatchupConsumerGroup());
catchupConsumer.register(processingQueue);
catchupConsumers.put(processingQueue.getQueryId(), catchupConsumer);
executorServiceCatchup.submit(() -> runCatchup(catchupConsumer, processingQueue));
}
use of io.confluent.ksql.physical.scalablepush.consumer.CatchupConsumer in project ksql by confluentinc.
the class ScalablePushRegistryTest method setUp.
@Before
public void setUp() {
when(ksqlTopic.getKafkaTopicName()).thenReturn(TOPIC);
when(kafkaConsumerFactory.create(any(), any(), any(), any(), any(), any())).thenReturn(kafkaConsumer);
catchupCoordinator = new TestCatchupCoordinator();
latestConsumer = new TestLatestConsumer(TOPIC, false, SCHEMA, kafkaConsumer, catchupCoordinator, assignment -> {
}, ksqlConfig, Clock.systemUTC());
latestConsumer2 = new TestLatestConsumer(TOPIC, false, SCHEMA, kafkaConsumer, catchupCoordinator, assignment -> {
}, ksqlConfig, Clock.systemUTC());
catchupConsumer = new TestCatchupConsumer(TOPIC, false, SCHEMA, kafkaConsumer, () -> latestConsumer, catchupCoordinator, pushOffsetRange, Clock.systemUTC(), pq -> {
});
when(latestConsumerFactory.create(any(), anyBoolean(), any(), any(), any(), any(), any(), any())).thenReturn(latestConsumer, latestConsumer2);
when(catchupConsumerFactory.create(any(), anyBoolean(), any(), any(), any(), any(), any(), any(), anyLong(), any())).thenReturn(catchupConsumer);
when(ksqlTopic.getKeyFormat()).thenReturn(keyFormat);
when(keyFormat.isWindowed()).thenReturn(false);
realExecutorService = Executors.newFixedThreadPool(2);
doAnswer(a -> {
final Runnable runnable = a.getArgument(0);
startLatestRunnable.set(runnable);
realExecutorService.submit(runnable);
return null;
}).when(executorService).submit(any(Runnable.class));
doAnswer(a -> {
final Runnable runnable = a.getArgument(0);
realExecutorService.submit(runnable);
return null;
}).when(catchupService).submit(any(Runnable.class));
when(processingQueue.getQueryId()).thenReturn(new QueryId("q1"));
when(processingQueue2.getQueryId()).thenReturn(new QueryId("q2"));
registry = new ScalablePushRegistry(locator, SCHEMA, false, ImmutableMap.of(), ksqlTopic, serviceContext, ksqlConfig, SOURCE_APP_ID, kafkaConsumerFactory, latestConsumerFactory, catchupConsumerFactory, executorService, catchupService);
when(ksqlConfig.getInt(KsqlConfig.KSQL_QUERY_PUSH_V2_MAX_CATCHUP_CONSUMERS)).thenReturn(10);
}
Aggregations