use of org.apache.kafka.common.errors.WakeupException in project incubator-gobblin by apache.
the class KafkaSimpleStreamingTest method testThreadedExtractor.
/**
* testThreadedExtractor verifies its safe to call close from a different thread when the original thread is stuck in poll
* We create a topic and then wait for the extractor to return a record (which it never does) in a side thread. The
* original thread calls close on the extractor and verifies the waiting thread gets an expected exception and exits
* as expected.
*/
@Test(timeOut = 30000)
public void testThreadedExtractor() {
final String topic = "testThreadedExtractor";
final KafkaSimpleStreamingExtractor<String, byte[]> kSSE = getStreamingExtractor(topic);
Thread waitingThread = new Thread() {
public void run() {
TopicPartition tP = new TopicPartition(topic, 0);
KafkaSimpleStreamingExtractor.KafkaWatermark kwm = new KafkaSimpleStreamingExtractor.KafkaWatermark(tP, new LongWatermark(0));
byte[] reuse = new byte[1];
RecordEnvelope<byte[]> oldRecord = new RecordEnvelope<>(reuse, kwm);
try {
RecordEnvelope<byte[]> record = kSSE.readRecordEnvelope();
} catch (Exception e) {
Assert.assertTrue((e instanceof WakeupException) || (e instanceof ClosedChannelException));
}
}
};
waitingThread.start();
try {
kSSE.close();
waitingThread.join();
} catch (Exception e) {
// should never come here
throw new Error(e);
}
}
use of org.apache.kafka.common.errors.WakeupException in project kafka by apache.
the class AbstractTask method initializeOffsetLimits.
protected void initializeOffsetLimits() {
for (TopicPartition partition : partitions) {
try {
// TODO: batch API?
OffsetAndMetadata metadata = consumer.committed(partition);
stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L);
} catch (AuthorizationException e) {
throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e);
} catch (WakeupException e) {
throw e;
} catch (KafkaException e) {
throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e);
}
}
}
use of org.apache.kafka.common.errors.WakeupException in project ignite by apache.
the class IgniteSourceConnectorTest method checkDataDelivered.
/**
* Checks if events were delivered to Kafka server.
*
* @param expectedEventsCnt Expected events count.
* @throws Exception If failed.
*/
private void checkDataDelivered(final int expectedEventsCnt) throws Exception {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBroker.getBrokerAddress());
props.put(ConsumerConfig.GROUP_ID_CONFIG, "test-grp");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, 1);
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 10000);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.ignite.stream.kafka.connect.serialization.CacheEventDeserializer");
final KafkaConsumer<String, CacheEvent> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(TOPICS));
final AtomicInteger evtCnt = new AtomicInteger();
try {
// Wait for expected events count.
GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
ConsumerRecords<String, CacheEvent> records = consumer.poll(10);
for (ConsumerRecord<String, CacheEvent> record : records) {
info("Record: " + record);
evtCnt.getAndIncrement();
}
return evtCnt.get() >= expectedEventsCnt;
}
}, 20_000);
info("Waiting for unexpected records for 5 secs.");
assertFalse(GridTestUtils.waitForCondition(new GridAbsPredicate() {
@Override
public boolean apply() {
ConsumerRecords<String, CacheEvent> records = consumer.poll(10);
for (ConsumerRecord<String, CacheEvent> record : records) {
error("Unexpected record: " + record);
evtCnt.getAndIncrement();
}
return evtCnt.get() > expectedEventsCnt;
}
}, 5_000));
} catch (WakeupException ignored) {
// ignore for shutdown.
} finally {
consumer.close();
assertEquals(expectedEventsCnt, evtCnt.get());
}
}
use of org.apache.kafka.common.errors.WakeupException in project ksql by confluentinc.
the class StatementExecutor method handleStatementWithTerminatedQueries.
/**
* Attempt to execute a single statement.
*
* @param command The string containing the statement to be executed
* @param commandId The ID to be used to track the status of the command
* @param terminatedQueries An optional map from terminated query IDs to the commands that
* requested their termination
* @param wasDropped was this table/stream subsequently dropped
* @throws Exception TODO: Refine this.
*/
private void handleStatementWithTerminatedQueries(Command command, CommandId commandId, Map<QueryId, CommandId> terminatedQueries, boolean wasDropped) throws Exception {
try {
String statementString = command.getStatement();
statusStore.put(commandId, new CommandStatus(CommandStatus.Status.PARSING, "Parsing statement"));
Statement statement = statementParser.parseSingleStatement(statementString);
statusStore.put(commandId, new CommandStatus(CommandStatus.Status.EXECUTING, "Executing statement"));
executeStatement(statement, command, commandId, terminatedQueries, wasDropped);
} catch (WakeupException exception) {
throw exception;
} catch (Exception exception) {
log.error("Failed to handle: " + command, exception);
CommandStatus errorStatus = new CommandStatus(CommandStatus.Status.ERROR, ExceptionUtil.stackTraceToString(exception));
statusStore.put(commandId, errorStatus);
completeStatusFuture(commandId, errorStatus);
}
}
use of org.apache.kafka.common.errors.WakeupException in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testWakeupWithFetchDataAvailable.
@Test
public void testWakeupWithFetchDataAvailable() throws Exception {
final Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, true);
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
prepareRebalance(client, node, assignor, singletonList(tp0), null);
consumer.poll(0);
// respond to the outstanding fetch so that we have data available on the next poll
client.respondFrom(fetchResponse(tp0, 0, 5), node);
client.poll(0, time.milliseconds());
consumer.wakeup();
try {
consumer.poll(0);
fail();
} catch (WakeupException e) {
}
// make sure the position hasn't been updated
assertEquals(0, consumer.position(tp0));
// the next poll should return the completed fetch
ConsumerRecords<String, String> records = consumer.poll(0);
assertEquals(5, records.count());
// Increment time asynchronously to clear timeouts in closing the consumer
final ScheduledExecutorService exec = Executors.newSingleThreadScheduledExecutor();
exec.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
time.sleep(sessionTimeoutMs);
}
}, 0L, 10L, TimeUnit.MILLISECONDS);
consumer.close();
exec.shutdownNow();
exec.awaitTermination(5L, TimeUnit.SECONDS);
}
Aggregations