use of java.util.Collections.singletonMap in project kafka by apache.
the class StoreChangelogReaderTest method shouldRestoreToLimitInStandbyState.
@Test
public void shouldRestoreToLimitInStandbyState() {
final Map<TaskId, Task> mockTasks = mock(Map.class);
EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
final AtomicLong offset = new AtomicLong(7L);
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
return partitions.stream().collect(Collectors.toMap(Function.identity(), partition -> new OffsetAndMetadata(offset.get())));
}
};
final long now = time.milliseconds();
final Properties properties = new Properties();
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
changelogReader.setMainConsumer(consumer);
changelogReader.transitToUpdateStandby();
consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
changelogReader.register(tp, standbyStateManager);
assertEquals(0L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
changelogReader.restore(mockTasks);
assertNull(callback.restoreTopicPartition);
assertNull(callback.storeNameCalledStates.get(RESTORE_START));
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
// null key should be ignored
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
changelogReader.restore(mockTasks);
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
assertNull(callback.storeNameCalledStates.get(RESTORE_END));
assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
offset.set(10L);
time.setCurrentTimeMs(now + 100L);
// should not try to read committed offsets if interval has not reached
changelogReader.restore(mockTasks);
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
time.setCurrentTimeMs(now + 101L);
// the first restore would only update the limit, same below
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
offset.set(15L);
// after we've updated once, the timer should be reset and we should not try again until next interval elapsed
time.setCurrentTimeMs(now + 201L);
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
// once we are in update active mode, we should not try to update limit offset
time.setCurrentTimeMs(now + 202L);
changelogReader.enforceRestoreActive();
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.transitToUpdateStandby();
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 12L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 13L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 14L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 15L, "key".getBytes(), "value".getBytes()));
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(9L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(1, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
}
use of java.util.Collections.singletonMap in project lucene-solr by apache.
the class RequestParams method createParamSet.
public static ParamSet createParamSet(Map map, Long version) {
Map copy = getDeepCopy(map, 3);
Map meta = (Map) copy.remove("");
if (meta == null && version != null) {
meta = Collections.singletonMap("v", version);
}
Map invariants = (Map) copy.remove(INVARIANTS);
Map appends = (Map) copy.remove(APPENDS);
return new ParamSet(copy, invariants, appends, meta);
}
use of java.util.Collections.singletonMap in project apache-kafka-on-k8s by banzaicloud.
the class KafkaConsumerTest method testFetchProgressWithMissingPartitionPosition.
@Test
public void testFetchProgressWithMissingPartitionPosition() {
// Verifies that we can make progress on one partition while we are awaiting
// a reset on another partition.
Time time = new MockTime();
Cluster cluster = TestUtils.singletonCluster(topic, 2);
Node node = cluster.nodes().get(0);
Metadata metadata = createMetadata();
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
MockClient client = new MockClient(time, metadata);
client.setNode(node);
KafkaConsumer<String, String> consumer = newConsumerNoAutoCommit(time, client, metadata);
consumer.assign(Arrays.asList(tp0, tp1));
consumer.seekToEnd(singleton(tp0));
consumer.seekToBeginning(singleton(tp1));
client.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
ListOffsetRequest request = (ListOffsetRequest) body;
Map<TopicPartition, Long> expectedTimestamps = new HashMap<>();
expectedTimestamps.put(tp0, ListOffsetRequest.LATEST_TIMESTAMP);
expectedTimestamps.put(tp1, ListOffsetRequest.EARLIEST_TIMESTAMP);
return expectedTimestamps.equals(request.partitionTimestamps());
}
}, listOffsetsResponse(Collections.singletonMap(tp0, 50L), Collections.singletonMap(tp1, Errors.NOT_LEADER_FOR_PARTITION)));
client.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
FetchRequest request = (FetchRequest) body;
return request.fetchData().keySet().equals(singleton(tp0)) && request.fetchData().get(tp0).fetchOffset == 50L;
}
}, fetchResponse(tp0, 50L, 5));
ConsumerRecords<String, String> records = consumer.poll(5);
assertEquals(5, records.count());
assertEquals(singleton(tp0), records.partitions());
}
use of java.util.Collections.singletonMap in project kafka by apache.
the class ConsumerCoordinatorTest method testPerformAssignmentShouldUpdateGroupSubscriptionAfterAssignmentIfNeeded.
@SuppressWarnings("unchecked")
@Test
public void testPerformAssignmentShouldUpdateGroupSubscriptionAfterAssignmentIfNeeded() {
SubscriptionState mockSubscriptionState = Mockito.mock(SubscriptionState.class);
// the consumer only subscribed to "topic1"
Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
List<JoinGroupResponseData.JoinGroupResponseMember> metadata = new ArrayList<>();
for (Map.Entry<String, List<String>> subscriptionEntry : memberSubscriptions.entrySet()) {
ConsumerPartitionAssignor.Subscription subscription = new ConsumerPartitionAssignor.Subscription(subscriptionEntry.getValue());
ByteBuffer buf = ConsumerProtocol.serializeSubscription(subscription);
metadata.add(new JoinGroupResponseData.JoinGroupResponseMember().setMemberId(subscriptionEntry.getKey()).setMetadata(buf.array()));
}
// normal case: the assignment result will have partitions for only the subscribed topic: "topic1"
partitionAssignor.prepare(Collections.singletonMap(consumerId, singletonList(t1p)));
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, mockSubscriptionState)) {
coordinator.onLeaderElected("1", partitionAssignor.name(), metadata, false);
ArgumentCaptor<Collection<String>> topicsCaptor = ArgumentCaptor.forClass(Collection.class);
// groupSubscribe should be only called 1 time, which is before assignment,
// because the assigned topics are the same as the subscribed topics
Mockito.verify(mockSubscriptionState, Mockito.times(1)).groupSubscribe(topicsCaptor.capture());
List<Collection<String>> capturedTopics = topicsCaptor.getAllValues();
// expected the final group subscribed topics to be updated to "topic1"
Set<String> expectedTopicsGotCalled = new HashSet<>(Arrays.asList(topic1));
assertEquals(expectedTopicsGotCalled, capturedTopics.get(0));
}
Mockito.clearInvocations(mockSubscriptionState);
// unsubscribed topic partition assigned case: the assignment result will have partitions for (1) subscribed topic: "topic1"
// and (2) the additional unsubscribed topic: "topic2". We should add "topic2" into group subscription list
partitionAssignor.prepare(Collections.singletonMap(consumerId, Arrays.asList(t1p, t2p)));
try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, mockSubscriptionState)) {
coordinator.onLeaderElected("1", partitionAssignor.name(), metadata, false);
ArgumentCaptor<Collection<String>> topicsCaptor = ArgumentCaptor.forClass(Collection.class);
// groupSubscribe should be called 2 times, once before assignment, once after assignment
// (because the assigned topics are not the same as the subscribed topics)
Mockito.verify(mockSubscriptionState, Mockito.times(2)).groupSubscribe(topicsCaptor.capture());
List<Collection<String>> capturedTopics = topicsCaptor.getAllValues();
// expected the final group subscribed topics to be updated to "topic1" and "topic2"
Set<String> expectedTopicsGotCalled = new HashSet<>(Arrays.asList(topic1, topic2));
assertEquals(expectedTopicsGotCalled, capturedTopics.get(1));
}
}
use of java.util.Collections.singletonMap in project kafka by apache.
the class FetcherTest method testEpochSetInFetchRequest.
@Test
public void testEpochSetInFetchRequest() {
buildFetcher();
subscriptions.assignFromUser(singleton(tp0));
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap(topicName, 4), tp -> 99, topicIds);
client.updateMetadata(metadataResponse);
subscriptions.seek(tp0, 10);
assertEquals(1, fetcher.sendFetches());
// Check for epoch in outgoing request
MockClient.RequestMatcher matcher = body -> {
if (body instanceof FetchRequest) {
FetchRequest fetchRequest = (FetchRequest) body;
fetchRequest.fetchData(topicNames).values().forEach(partitionData -> {
assertTrue(partitionData.currentLeaderEpoch.isPresent(), "Expected Fetcher to set leader epoch in request");
assertEquals(99, partitionData.currentLeaderEpoch.get().longValue(), "Expected leader epoch to match epoch from metadata update");
});
return true;
} else {
fail("Should have seen FetchRequest");
return false;
}
};
client.prepareResponse(matcher, fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, 0));
consumerClient.pollNoWakeup();
}
Aggregations