use of java.util.Arrays.asList in project kafka by apache.
the class RequestResponseTest method testSerialization.
@Test
public void testSerialization() {
Map<ApiKeys, List<Short>> toSkip = new HashMap<>();
// It's not possible to create a MetadataRequest v0 via the builder
toSkip.put(METADATA, singletonList((short) 0));
// DescribeLogDirsResponse v0, v1 and v2 don't have a top level error field
toSkip.put(DESCRIBE_LOG_DIRS, Arrays.asList((short) 0, (short) 1, (short) 2));
// ElectLeaders v0 does not have a top level error field, when accessing it, it defaults to NONE
toSkip.put(ELECT_LEADERS, singletonList((short) 0));
for (ApiKeys apikey : ApiKeys.values()) {
for (short version : apikey.allVersions()) {
if (toSkip.containsKey(apikey) && toSkip.get(apikey).contains(version))
continue;
AbstractRequest request = getRequest(apikey, version);
checkRequest(request);
checkErrorResponse(request, unknownServerException);
checkResponse(getResponse(apikey, version), version);
}
}
}
use of java.util.Arrays.asList in project kafka by apache.
the class KStreamImplTest method testNumProcesses.
// specifically testing the deprecated variant
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testNumProcesses() {
final StreamsBuilder builder = new StreamsBuilder();
final KStream<String, String> source1 = builder.stream(Arrays.asList("topic-1", "topic-2"), stringConsumed);
final KStream<String, String> source2 = builder.stream(Arrays.asList("topic-3", "topic-4"), stringConsumed);
final KStream<String, String> stream1 = source1.filter((key, value) -> true).filterNot((key, value) -> false);
final KStream<String, Integer> stream2 = stream1.mapValues((ValueMapper<String, Integer>) Integer::valueOf);
final KStream<String, Integer> stream3 = source2.flatMapValues((ValueMapper<String, Iterable<Integer>>) value -> Collections.singletonList(Integer.valueOf(value)));
final KStream<String, Integer>[] streams2 = stream2.branch((key, value) -> (value % 2) == 0, (key, value) -> true);
final KStream<String, Integer>[] streams3 = stream3.branch((key, value) -> (value % 2) == 0, (key, value) -> true);
final int anyWindowSize = 1;
final StreamJoined<String, Integer, Integer> joined = StreamJoined.with(Serdes.String(), Serdes.Integer(), Serdes.Integer());
final KStream<String, Integer> stream4 = streams2[0].join(streams3[0], Integer::sum, JoinWindows.of(ofMillis(anyWindowSize)), joined);
streams2[1].join(streams3[1], Integer::sum, JoinWindows.of(ofMillis(anyWindowSize)), joined);
stream4.to("topic-5");
streams2[1].through("topic-6").process(new MockProcessorSupplier<>());
streams2[1].repartition().process(new MockProcessorSupplier<>());
assertEquals(// sources
2 + // stream1
2 + // stream2
1 + // stream3
1 + 1 + // streams2
2 + 1 + // streams3
2 + // stream2-stream3 joins
5 * 2 + // to
1 + // through
2 + // process
1 + // repartition
3 + // process
1, TopologyWrapper.getInternalTopologyBuilder(builder.build()).setApplicationId("X").buildTopology().processors().size());
}
use of java.util.Arrays.asList in project kafka by apache.
the class FetcherTest method testMultipleAbortMarkers.
@Test
public void testMultipleAbortMarkers() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Duplicate abort -- should be ignored.
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Now commit a transaction.
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset, new SimpleRecord(time.milliseconds(), "commit1-1".getBytes(), "value".getBytes()), new SimpleRecord(time.milliseconds(), "commit1-2".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0));
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
Set<String> committedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2"));
Set<String> actuallyCommittedKeys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
}
assertEquals(actuallyCommittedKeys, committedKeys);
}
use of java.util.Arrays.asList in project kafka by apache.
the class FetcherTest method testReadCommittedAbortMarkerWithNoData.
@Test
public void testReadCommittedAbortMarkerWithNoData() {
buildFetcher(OffsetResetStrategy.EARLIEST, new StringDeserializer(), new StringDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
long producerId = 1L;
abortTransaction(buffer, producerId, 5L);
appendTransactionalRecords(buffer, producerId, 6L, new SimpleRecord("6".getBytes(), null), new SimpleRecord("7".getBytes(), null), new SimpleRecord("8".getBytes(), null));
commitTransaction(buffer, producerId, 9L);
buffer.flip();
// send the fetch
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
assertEquals(1, fetcher.sendFetches());
// prepare the response. the aborted transactions begin at offsets which are no longer in the log
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(new FetchResponseData.AbortedTransaction().setProducerId(producerId).setFirstOffset(0L));
client.prepareResponse(fullFetchResponseWithAbortedTransactions(MemoryRecords.readableRecords(buffer), abortedTransactions, Errors.NONE, 100L, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<String, String>>> allFetchedRecords = fetchedRecords();
assertTrue(allFetchedRecords.containsKey(tp0));
List<ConsumerRecord<String, String>> fetchedRecords = allFetchedRecords.get(tp0);
assertEquals(3, fetchedRecords.size());
assertEquals(Arrays.asList(6L, 7L, 8L), collectRecordOffsets(fetchedRecords));
}
use of java.util.Arrays.asList in project kafka by apache.
the class WorkerSinkTaskTest method testPollRedeliveryWithConsumerRebalance.
@Test
public void testPollRedeliveryWithConsumerRebalance() throws Exception {
createTask(initialState);
expectInitializeTask();
expectTaskGetTopic(true);
expectPollInitialAssignment();
// If a retriable exception is thrown, we should redeliver the same batch, pausing the consumer in the meantime
expectConsumerPoll(1);
expectConversionAndTransformation(1);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
// Pause
EasyMock.expect(consumer.assignment()).andReturn(INITIAL_ASSIGNMENT);
consumer.pause(INITIAL_ASSIGNMENT);
PowerMock.expectLastCall();
// Empty consumer poll (all partitions are paused) with rebalance; one new partition is assigned
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet());
rebalanceListener.getValue().onPartitionsAssigned(Collections.singleton(TOPIC_PARTITION3));
return ConsumerRecords.empty();
});
Set<TopicPartition> newAssignment = new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3));
EasyMock.expect(consumer.assignment()).andReturn(newAssignment).times(3);
EasyMock.expect(consumer.position(TOPIC_PARTITION3)).andReturn(FIRST_OFFSET);
sinkTask.open(Collections.singleton(TOPIC_PARTITION3));
EasyMock.expectLastCall();
// All partitions are re-paused in order to pause any newly-assigned partitions so that redelivery efforts can continue
consumer.pause(newAssignment);
EasyMock.expectLastCall();
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
// Next delivery attempt fails again
expectConsumerPoll(0);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall().andThrow(new RetriableException("retry"));
// Non-empty consumer poll; all initially-assigned partitions are revoked in rebalance, and new partitions are allowed to resume
ConsumerRecord<byte[], byte[]> newRecord = new ConsumerRecord<>(TOPIC, PARTITION3, FIRST_OFFSET, RAW_KEY, RAW_VALUE);
EasyMock.expect(consumer.poll(Duration.ofMillis(EasyMock.anyLong()))).andAnswer(() -> {
rebalanceListener.getValue().onPartitionsRevoked(INITIAL_ASSIGNMENT);
rebalanceListener.getValue().onPartitionsAssigned(Collections.emptyList());
return new ConsumerRecords<>(Collections.singletonMap(TOPIC_PARTITION3, Collections.singletonList(newRecord)));
});
newAssignment = Collections.singleton(TOPIC_PARTITION3);
EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(newAssignment)).times(3);
final Map<TopicPartition, OffsetAndMetadata> offsets = INITIAL_ASSIGNMENT.stream().collect(Collectors.toMap(Function.identity(), tp -> new OffsetAndMetadata(FIRST_OFFSET)));
sinkTask.preCommit(offsets);
EasyMock.expectLastCall().andReturn(offsets);
sinkTask.close(INITIAL_ASSIGNMENT);
EasyMock.expectLastCall();
// All partitions are resumed, as all previously paused-for-redelivery partitions were revoked
newAssignment.forEach(tp -> {
consumer.resume(Collections.singleton(tp));
EasyMock.expectLastCall();
});
expectConversionAndTransformation(1);
sinkTask.put(EasyMock.anyObject());
EasyMock.expectLastCall();
PowerMock.replayAll();
workerTask.initialize(TASK_CONFIG);
workerTask.initializeAndStart();
workerTask.iteration();
workerTask.iteration();
workerTask.iteration();
workerTask.iteration();
workerTask.iteration();
PowerMock.verifyAll();
}
Aggregations