use of com.continuuity.weave.kafka.client.FetchException in project weave by continuuity.
the class SimpleKafkaClient method getOffset.
@Override
public ListenableFuture<long[]> getOffset(final String topic, final int partition, long time, int maxOffsets) {
final SettableFuture<long[]> resultFuture = SettableFuture.create();
final ChannelBuffer body = ChannelBuffers.buffer(Longs.BYTES + Ints.BYTES);
body.writeLong(time);
body.writeInt(maxOffsets);
connectionPool.connect(getTopicBroker(topic, partition).getAddress()).getChannelFuture().addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (checkFailure(future)) {
return;
}
future.getChannel().write(KafkaRequest.createOffsets(topic, partition, body, new ResponseHandler() {
@Override
public void received(KafkaResponse response) {
if (response.getErrorCode() != FetchException.ErrorCode.OK) {
resultFuture.setException(new FetchException("Failed to fetch offset.", response.getErrorCode()));
} else {
// Decode the offset response, which contains 4 bytes number of offsets, followed by number of offsets,
// each 8 bytes in size.
ChannelBuffer resultBuffer = response.getBody();
int size = resultBuffer.readInt();
long[] result = new long[size];
for (int i = 0; i < size; i++) {
result[i] = resultBuffer.readLong();
}
resultFuture.set(result);
}
}
})).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
checkFailure(future);
}
});
}
private boolean checkFailure(ChannelFuture future) {
if (!future.isSuccess()) {
if (future.isCancelled()) {
resultFuture.cancel(true);
} else {
resultFuture.setException(future.getCause());
}
return true;
}
return false;
}
});
return resultFuture;
}
Aggregations