use of java.util.concurrent.TimeoutException in project kafka by apache.
the class KafkaConsumerTest method consumerCloseTest.
private void consumerCloseTest(final long closeTimeoutMs, List<? extends AbstractResponse> responses, long waitMs, boolean interrupt) throws Exception {
int rebalanceTimeoutMs = 60000;
int sessionTimeoutMs = 30000;
int heartbeatIntervalMs = 5000;
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 1);
Node node = cluster.nodes().get(0);
Metadata metadata = new Metadata(0, Long.MAX_VALUE);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
PartitionAssignor assignor = new RoundRobinAssignor();
final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, false, 1000);
consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);
client.prepareMetadataUpdate(cluster, Collections.<String>emptySet());
// Poll with responses
client.prepareResponseFrom(fetchResponse(tp0, 0, 1), node);
client.prepareResponseFrom(fetchResponse(tp0, 1, 0), node);
consumer.poll(0);
// Initiate close() after a commit request on another thread.
// Kafka consumer is single-threaded, but the implementation allows calls on a
// different thread as long as the calls are not executed concurrently. So this is safe.
ExecutorService executor = Executors.newSingleThreadExecutor();
final AtomicReference<Exception> closeException = new AtomicReference<Exception>();
try {
Future<?> future = executor.submit(new Runnable() {
@Override
public void run() {
consumer.commitAsync();
try {
consumer.close(closeTimeoutMs, TimeUnit.MILLISECONDS);
} catch (Exception e) {
closeException.set(e);
}
}
});
// if close timeout is not zero.
try {
future.get(100, TimeUnit.MILLISECONDS);
if (closeTimeoutMs != 0)
fail("Close completed without waiting for commit or leave response");
} catch (TimeoutException e) {
// Expected exception
}
// Ensure close has started and queued at least one more request after commitAsync
client.waitForRequests(2, 1000);
// In non-graceful mode, close() times out without an exception even though commit response is pending
for (int i = 0; i < responses.size(); i++) {
client.waitForRequests(1, 1000);
client.respondFrom(responses.get(i), coordinator);
if (i != responses.size() - 1) {
try {
future.get(100, TimeUnit.MILLISECONDS);
fail("Close completed without waiting for response");
} catch (TimeoutException e) {
// Expected exception
}
}
}
if (waitMs > 0)
time.sleep(waitMs);
if (interrupt)
assertTrue("Close terminated prematurely", future.cancel(true));
// Make sure that close task completes and another task can be run on the single threaded executor
executor.submit(new Runnable() {
@Override
public void run() {
}
}).get(500, TimeUnit.MILLISECONDS);
if (!interrupt) {
// Should succeed without TimeoutException or ExecutionException
future.get(500, TimeUnit.MILLISECONDS);
assertNull("Unexpected exception during close", closeException.get());
} else
assertTrue("Expected exception not thrown " + closeException, closeException.get() instanceof InterruptException);
} finally {
executor.shutdownNow();
}
}
use of java.util.concurrent.TimeoutException in project kafka by apache.
the class KafkaConfigBackingStore method removeConnectorConfig.
/**
* Remove configuration for a given connector.
* @param connector name of the connector to remove
*/
@Override
public void removeConnectorConfig(String connector) {
log.debug("Removing connector configuration for connector {}", connector);
try {
configLog.send(CONNECTOR_KEY(connector), null);
configLog.send(TARGET_STATE_KEY(connector), null);
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to remove connector configuration from Kafka: ", e);
throw new ConnectException("Error removing connector configuration from Kafka", e);
}
}
use of java.util.concurrent.TimeoutException in project kafka by apache.
the class KafkaConfigBackingStore method putTaskConfigs.
/**
* Write these task configurations and associated commit messages, unless an inconsistency is found that indicates
* that we would be leaving one of the referenced connectors with an inconsistent state.
*
* @param connector the connector to write task configuration
* @param configs list of task configurations for the connector
* @throws ConnectException if the task configurations do not resolve inconsistencies found in the existing root
* and task configurations.
*/
@Override
public void putTaskConfigs(String connector, List<Map<String, String>> configs) {
// any outstanding lagging data to consume.
try {
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to write root configuration to Kafka: ", e);
throw new ConnectException("Error writing root configuration to Kafka", e);
}
int taskCount = configs.size();
// Start sending all the individual updates
int index = 0;
for (Map<String, String> taskConfig : configs) {
Struct connectConfig = new Struct(TASK_CONFIGURATION_V0);
connectConfig.put("properties", taskConfig);
byte[] serializedConfig = converter.fromConnectData(topic, TASK_CONFIGURATION_V0, connectConfig);
log.debug("Writing configuration for task " + index + " configuration: " + taskConfig);
ConnectorTaskId connectorTaskId = new ConnectorTaskId(connector, index);
configLog.send(TASK_KEY(connectorTaskId), serializedConfig);
index++;
}
// the end of the log
try {
// Read to end to ensure all the task configs have been written
if (taskCount > 0) {
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
}
// Write the commit message
Struct connectConfig = new Struct(CONNECTOR_TASKS_COMMIT_V0);
connectConfig.put("tasks", taskCount);
byte[] serializedConfig = converter.fromConnectData(topic, CONNECTOR_TASKS_COMMIT_V0, connectConfig);
log.debug("Writing commit for connector " + connector + " with " + taskCount + " tasks.");
configLog.send(COMMIT_TASKS_KEY(connector), serializedConfig);
// Read to end to ensure all the commit messages have been written
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to write root configuration to Kafka: ", e);
throw new ConnectException("Error writing root configuration to Kafka", e);
}
}
use of java.util.concurrent.TimeoutException in project kafka by apache.
the class KafkaConfigBackingStore method updateConnectorConfig.
private void updateConnectorConfig(String connector, byte[] serializedConfig) {
try {
configLog.send(CONNECTOR_KEY(connector), serializedConfig);
configLog.readToEnd().get(READ_TO_END_TIMEOUT_MS, TimeUnit.MILLISECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.error("Failed to write connector configuration to Kafka: ", e);
throw new ConnectException("Error writing connector configuration to Kafka", e);
}
}
use of java.util.concurrent.TimeoutException in project kafka by apache.
the class WorkerSourceTaskTest method expectOffsetFlush.
@SuppressWarnings("unchecked")
private void expectOffsetFlush(boolean succeed) throws Exception {
EasyMock.expect(offsetWriter.beginFlush()).andReturn(true);
Future<Void> flushFuture = PowerMock.createMock(Future.class);
EasyMock.expect(offsetWriter.doFlush(EasyMock.anyObject(Callback.class))).andReturn(flushFuture);
// Should throw for failure
IExpectationSetters<Void> futureGetExpect = EasyMock.expect(flushFuture.get(EasyMock.anyLong(), EasyMock.anyObject(TimeUnit.class)));
if (succeed) {
sourceTask.commit();
EasyMock.expectLastCall();
futureGetExpect.andReturn(null);
} else {
futureGetExpect.andThrow(new TimeoutException());
offsetWriter.cancelFlush();
PowerMock.expectLastCall();
}
}
Aggregations