use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class KafkaConsumerTest method testOsDefaultSocketBufferSizes.
@Test
public void testOsDefaultSocketBufferSizes() {
Map<String, Object> config = new HashMap<>();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
config.put(ConsumerConfig.SEND_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE);
config.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE);
KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(config, new ByteArrayDeserializer(), new ByteArrayDeserializer());
consumer.close();
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class FetcherTest method testListOffsetsSendsIsolationLevel.
private void testListOffsetsSendsIsolationLevel(IsolationLevel isolationLevel) {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, isolationLevel);
assignFromUser(singleton(tp0));
subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST);
client.prepareResponse(body -> {
ListOffsetsRequest request = (ListOffsetsRequest) body;
return request.isolationLevel() == isolationLevel;
}, listOffsetResponse(Errors.NONE, 1L, 5L));
fetcher.resetOffsetsIfNeeded();
consumerClient.pollNoWakeup();
assertFalse(subscriptions.isOffsetResetNeeded(tp0));
assertTrue(subscriptions.isFetchable(tp0));
assertEquals(5, subscriptions.position(tp0).offset);
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project kafka by apache.
the class FetcherTest method testEmptyControlBatch.
@Test
public void testEmptyControlBatch() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 1;
// Empty control batch should not cause an exception
DefaultRecordBatch.writeEmptyHeader(buffer, RecordBatch.MAGIC_VALUE_V2, 1L, (short) 0, -1, 0, 0, RecordBatch.NO_PARTITION_LEADER_EPOCH, TimestampType.CREATE_TIME, time.milliseconds(), true, true);
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(body -> {
FetchRequest request = (FetchRequest) body;
assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel());
return true;
}, fullFetchResponseWithAbortedTransactions(records, Collections.emptyList(), Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project drill by apache.
the class KafkaGroupScan method init.
/**
* Computes work per topic partition, based on start and end offset of each
* corresponding topicPartition
*/
private void init() {
partitionWorkMap = Maps.newHashMap();
Collection<DrillbitEndpoint> endpoints = kafkaStoragePlugin.getContext().getBits();
Map<String, DrillbitEndpoint> endpointMap = endpoints.stream().collect(Collectors.toMap(DrillbitEndpoint::getAddress, Function.identity(), (o, n) -> n));
Map<TopicPartition, Long> startOffsetsMap = Maps.newHashMap();
Map<TopicPartition, Long> endOffsetsMap = Maps.newHashMap();
List<PartitionInfo> topicPartitions;
String topicName = kafkaScanSpec.getTopicName();
KafkaConsumer<?, ?> kafkaConsumer = null;
try {
kafkaConsumer = new KafkaConsumer<>(kafkaStoragePlugin.getConfig().getKafkaConsumerProps(), new ByteArrayDeserializer(), new ByteArrayDeserializer());
if (!kafkaConsumer.listTopics().containsKey(topicName)) {
throw UserException.dataReadError().message("Table '%s' does not exist", topicName).build(logger);
}
kafkaConsumer.subscribe(Collections.singletonList(topicName));
// based on KafkaConsumer JavaDoc, seekToBeginning/seekToEnd functions
// evaluates lazily, seeking to the first/last offset in all partitions only
// when poll(long) or
// position(TopicPartition) are called
kafkaConsumer.poll(Duration.ofSeconds(5));
Set<TopicPartition> assignments = waitForConsumerAssignment(kafkaConsumer);
topicPartitions = kafkaConsumer.partitionsFor(topicName);
// fetch start offsets for each topicPartition
kafkaConsumer.seekToBeginning(assignments);
for (TopicPartition topicPartition : assignments) {
startOffsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
}
// fetch end offsets for each topicPartition
kafkaConsumer.seekToEnd(assignments);
for (TopicPartition topicPartition : assignments) {
endOffsetsMap.put(topicPartition, kafkaConsumer.position(topicPartition));
}
} catch (Exception e) {
throw UserException.dataReadError(e).message("Failed to fetch start/end offsets of the topic %s", topicName).addContext(e.getMessage()).build(logger);
} finally {
kafkaStoragePlugin.registerToClose(kafkaConsumer);
}
// computes work for each end point
for (PartitionInfo partitionInfo : topicPartitions) {
TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
long lastCommittedOffset = startOffsetsMap.get(topicPartition);
long latestOffset = endOffsetsMap.get(topicPartition);
logger.debug("Latest offset of {} is {}", topicPartition, latestOffset);
logger.debug("Last committed offset of {} is {}", topicPartition, lastCommittedOffset);
KafkaPartitionScanSpec partitionScanSpec = new KafkaPartitionScanSpec(topicPartition.topic(), topicPartition.partition(), lastCommittedOffset, latestOffset);
PartitionScanWork work = new PartitionScanWork(new EndpointByteMapImpl(), partitionScanSpec);
Node[] inSyncReplicas = partitionInfo.inSyncReplicas();
for (Node isr : inSyncReplicas) {
String host = isr.host();
DrillbitEndpoint ep = endpointMap.get(host);
if (ep != null) {
work.getByteMap().add(ep, work.getTotalBytes());
}
}
partitionWorkMap.put(topicPartition, work);
}
}
Aggregations