use of kafka.common.KafkaException in project flink by apache.
the class KafkaTestEnvironmentImpl method getKafkaServer.
/**
* Copied from com.github.sakserv.minicluster.KafkaLocalBrokerIntegrationTest (ASL licensed)
*/
protected KafkaServer getKafkaServer(int brokerId, File tmpFolder) throws Exception {
Properties kafkaProperties = new Properties();
// properties have to be Strings
kafkaProperties.put("advertised.host.name", KAFKA_HOST);
kafkaProperties.put("broker.id", Integer.toString(brokerId));
kafkaProperties.put("log.dir", tmpFolder.toString());
kafkaProperties.put("zookeeper.connect", zookeeperConnectionString);
kafkaProperties.put("message.max.bytes", String.valueOf(50 * 1024 * 1024));
kafkaProperties.put("replica.fetch.max.bytes", String.valueOf(50 * 1024 * 1024));
// for CI stability, increase zookeeper session timeout
kafkaProperties.put("zookeeper.session.timeout.ms", zkTimeout);
kafkaProperties.put("zookeeper.connection.timeout.ms", zkTimeout);
if (additionalServerProperties != null) {
kafkaProperties.putAll(additionalServerProperties);
}
final int numTries = 5;
for (int i = 1; i <= numTries; i++) {
int kafkaPort = NetUtils.getAvailablePort();
kafkaProperties.put("port", Integer.toString(kafkaPort));
//to support secure kafka cluster
if (secureMode) {
LOG.info("Adding Kafka secure configurations");
kafkaProperties.put("listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
kafkaProperties.put("advertised.listeners", "SASL_PLAINTEXT://" + KAFKA_HOST + ":" + kafkaPort);
kafkaProperties.putAll(getSecureProperties());
}
KafkaConfig kafkaConfig = new KafkaConfig(kafkaProperties);
try {
scala.Option<String> stringNone = scala.Option.apply(null);
KafkaServer server = new KafkaServer(kafkaConfig, SystemTime$.MODULE$, stringNone);
server.startup();
return server;
} catch (KafkaException e) {
if (e.getCause() instanceof BindException) {
// port conflict, retry...
LOG.info("Port conflict when starting Kafka Broker. Retrying...");
} else {
throw e;
}
}
}
throw new Exception("Could not start Kafka after " + numTries + " retries due to port conflicts.");
}
use of kafka.common.KafkaException in project drill by axbaretto.
the class MessageIterator method hasNext.
@Override
public boolean hasNext() {
if (recordIter != null && recordIter.hasNext()) {
return true;
}
long nextPosition = kafkaConsumer.position(topicPartition);
if (nextPosition >= endOffset) {
return false;
}
ConsumerRecords<byte[], byte[]> consumerRecords = null;
Stopwatch stopwatch = Stopwatch.createStarted();
try {
consumerRecords = kafkaConsumer.poll(kafkaPollTimeOut);
} catch (KafkaException ke) {
logger.error(ke.getMessage(), ke);
throw UserException.dataReadError(ke).message(ke.getMessage()).build(logger);
}
stopwatch.stop();
if (consumerRecords.isEmpty()) {
String errorMsg = new StringBuilder().append("Failed to fetch messages within ").append(kafkaPollTimeOut).append(" milliseconds. Consider increasing the value of the property : ").append(ExecConstants.KAFKA_POLL_TIMEOUT).toString();
throw UserException.dataReadError().message(errorMsg).build(logger);
}
long lastFetchTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);
logger.debug("Total number of messages fetched : {}", consumerRecords.count());
logger.debug("Time taken to fetch : {} milliseconds", lastFetchTime);
totalFetchTime += lastFetchTime;
recordIter = consumerRecords.iterator();
return recordIter.hasNext();
}
use of kafka.common.KafkaException in project jstorm by alibaba.
the class KafkaConsumer method fetchMessages.
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {
String topic = config.topic;
FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes).maxWait(config.fetchWaitMaxMs).build();
FetchResponse fetchResponse = null;
SimpleConsumer simpleConsumer = null;
try {
simpleConsumer = findLeaderConsumer(partition);
if (simpleConsumer == null) {
// LOG.error(message);
return null;
}
fetchResponse = simpleConsumer.fetch(req);
} catch (Exception e) {
if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException || e instanceof UnresolvedAddressException) {
LOG.warn("Network error when fetching messages:", e);
if (simpleConsumer != null) {
String host = simpleConsumer.host();
int port = simpleConsumer.port();
simpleConsumer = null;
throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
}
} else {
throw new RuntimeException(e);
}
}
if (fetchResponse.hasError()) {
short code = fetchResponse.errorCode(topic, partition);
if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
long startOffset = getOffset(topic, partition, config.startOffsetTime);
offset = startOffset;
}
if (leaderBroker != null) {
LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition[" + partition + "] error:" + code);
} else {
}
return null;
} else {
ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
return msgs;
}
}
use of kafka.common.KafkaException in project drill by apache.
the class MessageIterator method hasNext.
@Override
public boolean hasNext() {
if (recordIter != null && recordIter.hasNext()) {
return true;
}
long nextPosition = kafkaConsumer.position(topicPartition);
if (nextPosition >= endOffset) {
return false;
}
ConsumerRecords<byte[], byte[]> consumerRecords;
Stopwatch stopwatch = logger.isDebugEnabled() ? Stopwatch.createStarted() : null;
try {
consumerRecords = kafkaConsumer.poll(Duration.ofMillis(kafkaPollTimeOut));
} catch (KafkaException ke) {
throw UserException.dataReadError(ke).message(ke.getMessage()).build(logger);
} finally {
if (stopwatch != null) {
stopwatch.stop();
}
}
if (consumerRecords.isEmpty()) {
throw UserException.dataReadError().message("Failed to fetch messages within %s milliseconds. " + "Consider increasing the value of the property: %s", kafkaPollTimeOut, ExecConstants.KAFKA_POLL_TIMEOUT).build(logger);
}
if (stopwatch != null) {
long lastFetchTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);
logger.debug("Time taken to fetch : {} milliseconds", lastFetchTime);
totalFetchTime += lastFetchTime;
logger.debug("Total number of messages fetched : {}", consumerRecords.count());
}
recordIter = consumerRecords.iterator();
return recordIter.hasNext();
}
Aggregations