use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testReturnCommittedTransactions.
@Test
public void testReturnCommittedTransactions() {
Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "key".getBytes(), "value".getBytes()));
currentOffset += commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
MemoryRecords records = MemoryRecords.readableRecords(buffer);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
FetchRequest request = (FetchRequest) body;
assertEquals(IsolationLevel.READ_COMMITTED, request.isolationLevel());
return true;
}
}, fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(0);
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testListOffsetsSendsIsolationLevel.
@Test
public void testListOffsetsSendsIsolationLevel() {
for (final IsolationLevel isolationLevel : IsolationLevel.values()) {
Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, isolationLevel);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.requestOffsetReset(tp0, OffsetResetStrategy.LATEST);
client.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
ListOffsetRequest request = (ListOffsetRequest) body;
return request.isolationLevel() == isolationLevel;
}
}, listOffsetResponse(Errors.NONE, 1L, 5L));
fetcher.resetOffsetsIfNeeded();
consumerClient.pollNoWakeup();
assertFalse(subscriptions.isOffsetResetNeeded(tp0));
assertTrue(subscriptions.isFetchable(tp0));
assertEquals(5, subscriptions.position(tp0).longValue());
}
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testMultipleAbortMarkers.
@Test
public void testMultipleAbortMarkers() {
Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Duplicate abort -- should be ignored.
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Now commit a transaction.
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "commit1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "commit1-2".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponse.AbortedTransaction> abortedTransactions = new ArrayList<>();
abortedTransactions.add(new FetchResponse.AbortedTransaction(1, 0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(0);
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetcher.fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
Set<String> committedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2"));
Set<String> actuallyCommittedKeys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
}
assertTrue(actuallyCommittedKeys.equals(committedKeys));
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project core-ng-project by neowu.
the class Kafka method consumer.
public Consumer<String, byte[]> consumer(String group, Set<String> topics) {
if (uri == null)
throw new Error("uri must not be null");
StopWatch watch = new StopWatch();
try {
Map<String, Object> config = Maps.newHashMap();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, uri);
config.put(ConsumerConfig.GROUP_ID_CONFIG, group);
config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
config.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, (int) maxProcessTime.toMillis());
config.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, (int) maxProcessTime.plusSeconds(5).toMillis());
config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
config.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, maxPollBytes);
config.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, minPollBytes);
config.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, (int) minPollMaxWaitTime.toMillis());
Consumer<String, byte[]> consumer = new KafkaConsumer<>(config, new StringDeserializer(), new ByteArrayDeserializer());
consumer.subscribe(topics);
consumerMetrics.add(consumer.metrics());
return consumer;
} finally {
logger.info("create kafka consumer, uri={}, name={}, topics={}, elapsedTime={}", uri, name, topics, watch.elapsedTime());
}
}
use of org.apache.kafka.common.serialization.ByteArrayDeserializer in project core-ng-project by neowu.
the class KafkaConsumerFactory method create.
public Consumer<String, byte[]> create() {
Map<String, Object> config = Maps.newHashMap();
config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, uri);
config.put(ConsumerConfig.GROUP_ID_CONFIG, "log-processor");
config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 2000);
// get 3M message at max
config.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, 3 * 1024 * 1024);
// try to get at least 1M message
config.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, 1024 * 1024);
// pause 500ms if not enough data to process
config.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, 500);
return new KafkaConsumer<>(config, new StringDeserializer(), new ByteArrayDeserializer());
}
Aggregations