use of java.util.Collections.singletonList in project kafka by apache.
the class FetcherTest method testMultipleAbortMarkers.
@Test
public void testMultipleAbortMarkers() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Duplicate abort -- should be ignored.
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Now commit a transaction.
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "commit1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "commit1-2".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
Set<String> committedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2"));
Set<String> actuallyCommittedKeys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
}
assertEquals(actuallyCommittedKeys, committedKeys);
}
use of java.util.Collections.singletonList in project kafka by apache.
the class FetcherTest method testGetTopicMetadataOfflinePartitions.
@Test
public void testGetTopicMetadataOfflinePartitions() {
buildFetcher();
assignFromUser(singleton(tp0));
// baseline ok response
MetadataResponse originalResponse = newMetadataResponse(topicName, Errors.NONE);
// create a response based on the above one with all partitions being leaderless
List<MetadataResponse.TopicMetadata> altTopics = new ArrayList<>();
for (MetadataResponse.TopicMetadata item : originalResponse.topicMetadata()) {
List<MetadataResponse.PartitionMetadata> partitions = item.partitionMetadata();
List<MetadataResponse.PartitionMetadata> altPartitions = new ArrayList<>();
for (MetadataResponse.PartitionMetadata p : partitions) {
altPartitions.add(new MetadataResponse.PartitionMetadata(p.error, p.topicPartition, // no leader
Optional.empty(), Optional.empty(), p.replicaIds, p.inSyncReplicaIds, p.offlineReplicaIds));
}
MetadataResponse.TopicMetadata alteredTopic = new MetadataResponse.TopicMetadata(item.error(), item.topic(), item.isInternal(), altPartitions);
altTopics.add(alteredTopic);
}
Node controller = originalResponse.controller();
MetadataResponse altered = RequestTestUtils.metadataResponse(originalResponse.brokers(), originalResponse.clusterId(), controller != null ? controller.id() : MetadataResponse.NO_CONTROLLER_ID, altTopics);
client.prepareResponse(altered);
Map<String, List<PartitionInfo>> topicMetadata = fetcher.getTopicMetadata(new MetadataRequest.Builder(Collections.singletonList(topicName), false), time.timer(5000L));
assertNotNull(topicMetadata);
assertNotNull(topicMetadata.get(topicName));
// noinspection ConstantConditions
assertEquals(metadata.fetch().partitionCountForTopic(topicName).longValue(), topicMetadata.get(topicName).size());
}
use of java.util.Collections.singletonList in project kafka by apache.
the class FetcherTest method testReadCommittedAbortMarkerWithNoData.
@Test
public void testReadCommittedAbortMarkerWithNoData() {
buildFetcher(OffsetResetStrategy.EARLIEST, new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
long producerId = 1L;
abortTransaction(buffer, producerId, 5L);
appendTransactionalRecords(buffer, producerId, 6L, new SimpleRecord("6".getBytes(), null), new SimpleRecord("7".getBytes(), null), new SimpleRecord("8".getBytes(), null));
commitTransaction(buffer, producerId, 9L);
buffer.flip();
// send the fetch
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
// prepare the response. the aborted transactions begin at offsets which are no longer in the log
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(producerId).setFirstOffset(0L));
client.prepareResponse(fullFetchResponseWithAbortedTransactions(MemoryRecords.readableRecords(buffer), abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<String, String>>> allFetchedRecords = fetchedRecords();
assertTrue(allFetchedRecords.containsKey(tp0));
List<ConsumerRecord<String, String>> fetchedRecords = allFetchedRecords.get(tp0);
assertEquals(3, fetchedRecords.size());
assertEquals(Arrays.asList(6L, 7L, 8L), collectRecordOffsets(fetchedRecords));
}
use of java.util.Collections.singletonList in project kafka by apache.
the class TransactionManagerTest method testAllowDrainInAbortableErrorState.
@Test
public void testAllowDrainInAbortableErrorState() throws InterruptedException {
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp1);
prepareAddPartitionsToTxn(tp1, Errors.NONE);
runUntil(() -> transactionManager.transactionContainsPartition(tp1));
transactionManager.maybeAddPartition(tp0);
prepareAddPartitionsToTxn(tp0, Errors.TOPIC_AUTHORIZATION_FAILED);
runUntil(transactionManager::hasAbortableError);
assertTrue(transactionManager.isSendToPartitionAllowed(tp1));
// Try to drain a message destined for tp1, it should get drained.
Node node1 = new Node(1, "localhost", 1112);
PartitionInfo part1 = new PartitionInfo(topic, 1, node1, null, null);
Cluster cluster = new Cluster(null, Collections.singletonList(node1), Collections.singletonList(part1), Collections.emptySet(), Collections.emptySet());
appendToAccumulator(tp1);
Map<Integer, List<ProducerBatch>> drainedBatches = accumulator.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, time.milliseconds());
// We should drain the appended record since we are in abortable state and the partition has already been
// added to the transaction.
assertTrue(drainedBatches.containsKey(node1.id()));
assertEquals(1, drainedBatches.get(node1.id()).size());
assertTrue(transactionManager.hasAbortableError());
}
use of java.util.Collections.singletonList in project kafka by apache.
the class StreamsPartitionAssignorTest method shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks.
@Test
public void shouldNotLoopInfinitelyOnMissingMetadataAndShouldNotCreateRelatedTasks() {
final StreamsBuilder streamsBuilder = new StreamsBuilder();
final KStream<Object, Object> stream1 = streamsBuilder.stream("topic1").selectKey((key, value) -> null).groupByKey().count(Materialized.as("count")).toStream().map((KeyValueMapper<Object, Long, KeyValue<Object, Object>>) (key, value) -> null);
streamsBuilder.stream("unknownTopic").selectKey((key, value) -> null).join(stream1, (ValueJoiner<Object, Object, Void>) (value1, value2) -> null, JoinWindows.of(ofMillis(0)));
final String client = "client1";
builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build());
final MockInternalTopicManager mockInternalTopicManager = configureDefault();
subscriptions.put(client, new Subscription(Collections.singletonList("unknownTopic"), defaultSubscriptionInfo.encode()));
final Map<String, Assignment> assignment = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
assertThat(mockInternalTopicManager.readyTopics.isEmpty(), equalTo(true));
assertThat(assignment.get(client).partitions().isEmpty(), equalTo(true));
}
Aggregations