use of kafka.javaapi.consumer.SimpleConsumer in project heron by twitter.
the class TestUtils method getKafkaConsumer.
public static SimpleConsumer getKafkaConsumer(KafkaTestBroker broker) {
BrokerHosts brokerHosts = getBrokerHosts(broker);
KafkaConfig kafkaConfig = new KafkaConfig(brokerHosts, TOPIC);
SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
return simpleConsumer;
}
use of kafka.javaapi.consumer.SimpleConsumer in project heron by twitter.
the class ZkCoordinatorTest method setUp.
@Before
public void setUp() throws Exception {
MockitoAnnotations.initMocks(this);
server = new TestingServer();
String connectionString = server.getConnectString();
ZkHosts hosts = new ZkHosts(connectionString);
hosts.refreshFreqSecs = 1;
spoutConfig = new SpoutConfig(hosts, "topic", "/test", "id");
Map<String, Object> conf = buildZookeeperConfig(server);
state = new ZkState(conf);
simpleConsumer = new SimpleConsumer("localhost", broker.getPort(), 60000, 1024, "testClient");
when(dynamicPartitionConnections.register(any(Broker.class), any(String.class), anyInt())).thenReturn(simpleConsumer);
}
use of kafka.javaapi.consumer.SimpleConsumer in project cdap by caskdata.
the class KafkaLogProcessorPipeline method shutDown.
@Override
protected void shutDown() throws Exception {
LOG.debug("Shutting down log processor pipeline for {}", name);
fetchExecutor.shutdownNow();
try {
context.stop();
// Persist the checkpoints. It can only be done after successfully stopping the appenders.
// Since persistCheckpoint never throw, putting it inside try is ok.
persistCheckpoints();
} catch (Exception e) {
// Just log, not to fail the shutdown
LOG.warn("Exception raised when stopping pipeline {}", name, e);
}
for (SimpleConsumer consumer : kafkaConsumers.values()) {
try {
consumer.close();
} catch (Exception e) {
// Just log, not to fail the shutdown
LOG.warn("Exception raised when closing Kafka consumer.", e);
}
}
LOG.info("Log processor pipeline for {} stopped with latest checkpoints {}", name, checkpoints);
}
use of kafka.javaapi.consumer.SimpleConsumer in project cdap by caskdata.
the class KafkaConsumer method fetchMessageSet.
private ByteBufferMessageSet fetchMessageSet(long fetchOffset) throws OffsetOutOfRangeException {
Preconditions.checkArgument(fetchOffset >= 0, String.format("Illegal fetch offset %d", fetchOffset));
int failureCount = 0;
while (true) {
SimpleConsumer consumer = getConsumer();
FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(topic, partition, fetchOffset, BUFFER_SIZE_BYTES).maxWait(fetchTimeoutMs).build();
FetchResponse fetchResponse = consumer.fetch(req);
if (!fetchResponse.hasError()) {
return fetchResponse.messageSet(topic, partition);
}
short errorCode = fetchResponse.errorCode(topic, partition);
if (++failureCount >= MAX_KAFKA_FETCH_RETRIES) {
throw new RuntimeException(String.format("Error fetching data from broker %s:%d for topic %s, partition %d. Error code: %d", consumer.host(), consumer.port(), topic, partition, errorCode));
}
LOG.warn("Error fetching data from broker {}:{} for topic {}, partition {}. Error code: {}", consumer.host(), consumer.port(), topic, partition, errorCode);
if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
throw new OffsetOutOfRangeException(String.format("Requested offset %d is out of range for topic %s partition %d", fetchOffset, topic, partition));
}
closeConsumer();
}
}
use of kafka.javaapi.consumer.SimpleConsumer in project cdap by caskdata.
the class KafkaOffsetResolver method getStartOffset.
/**
* Check whether the message fetched with the offset {@code checkpoint.getNextOffset() - 1} contains the
* same timestamp as in the given checkpoint. If they match, directly return {@code checkpoint.getNextOffset()}.
* If they don't, search for the smallest offset of the message with the same log event time
* as {@code checkpoint.getNextEventTime()}
*
* @param checkpoint A {@link Checkpoint} containing the next offset of a message and its log event timestamp.
* {@link Checkpoint#getNextOffset()}, {@link Checkpoint#getNextEventTime()}
* and {@link Checkpoint#getMaxEventTime()} all must return a non-negative long
* @param partition the partition in the topic for searching matching offset
* @return the next offset of the message with smallest offset and log event time equal to
* {@code checkpoint.getNextEventTime()}.
* {@code -1} if no such offset can be found or {@code checkpoint.getNextOffset()} is negative.
*
* @throws LeaderNotAvailableException if there is no Kafka broker to talk to.
* @throws OffsetOutOfRangeException if the given offset is out of range.
* @throws NotLeaderForPartitionException if the broker that the consumer is talking to is not the leader
* for the given topic and partition.
* @throws UnknownTopicOrPartitionException if the topic or partition is not known by the Kafka server
* @throws UnknownServerException if the Kafka server responded with error.
*/
long getStartOffset(final Checkpoint checkpoint, final int partition) {
// This should never happen
Preconditions.checkArgument(checkpoint.getNextOffset() > 0, "Invalid checkpoint offset");
// Get BrokerInfo for constructing SimpleConsumer
String topic = config.getTopic();
BrokerInfo brokerInfo = brokerService.getLeader(topic, partition);
if (brokerInfo == null) {
throw new LeaderNotAvailableException(String.format("BrokerInfo from BrokerService is null for topic %s partition %d. Will retry in next run.", topic, partition));
}
SimpleConsumer consumer = new SimpleConsumer(brokerInfo.getHost(), brokerInfo.getPort(), SO_TIMEOUT_MILLIS, BUFFER_SIZE, "offset-finder-" + topic + "-" + partition);
// Check whether the message fetched with the offset in the given checkpoint has the timestamp from
// checkpoint.getNextOffset() - 1 to get the offset corresponding to the timestamp in checkpoint
long offset = checkpoint.getNextOffset() - 1;
try {
long timestamp = getEventTimeByOffset(consumer, partition, offset);
if (timestamp == checkpoint.getNextEventTime()) {
return checkpoint.getNextOffset();
}
// This can happen in replicated cluster
LOG.debug("Event timestamp in {}:{} at offset {} is {}. It doesn't match with checkpoint timestamp {}", topic, partition, offset, timestamp, checkpoint.getNextEventTime());
} catch (NotFoundException | OffsetOutOfRangeException e) {
// This means we can't find the timestamp. This can happen in replicated cluster
LOG.debug("Cannot get valid log event in {}:{} at offset {}", topic, partition, offset);
}
// Find offset that has an event that matches the timestamp
long nextOffset = findStartOffset(consumer, partition, checkpoint.getNextEventTime());
LOG.debug("Found new nextOffset {} for topic {} partition {} with existing checkpoint {}.", nextOffset, topic, partition, checkpoint);
return nextOffset;
}
Aggregations