use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class TopicBasedRemoteLogMetadataManagerRestartTest method testRLMMAPIsAfterRestart.
@Test
public void testRLMMAPIsAfterRestart() throws Exception {
// Create topics.
String leaderTopic = "new-leader";
HashMap<Object, Seq<Object>> assignedLeaderTopicReplicas = new HashMap<>();
List<Object> leaderTopicReplicas = new ArrayList<>();
// Set broker id 0 as the first entry which is taken as the leader.
leaderTopicReplicas.add(0);
leaderTopicReplicas.add(1);
leaderTopicReplicas.add(2);
assignedLeaderTopicReplicas.put(0, JavaConverters.asScalaBuffer(leaderTopicReplicas));
remoteLogMetadataManagerHarness.createTopicWithAssignment(leaderTopic, JavaConverters.mapAsScalaMap(assignedLeaderTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
String followerTopic = "new-follower";
HashMap<Object, Seq<Object>> assignedFollowerTopicReplicas = new HashMap<>();
List<Object> followerTopicReplicas = new ArrayList<>();
// Set broker id 1 as the first entry which is taken as the leader.
followerTopicReplicas.add(1);
followerTopicReplicas.add(2);
followerTopicReplicas.add(0);
assignedFollowerTopicReplicas.put(0, JavaConverters.asScalaBuffer(followerTopicReplicas));
remoteLogMetadataManagerHarness.createTopicWithAssignment(followerTopic, JavaConverters.mapAsScalaMap(assignedFollowerTopicReplicas), remoteLogMetadataManagerHarness.listenerName());
final TopicIdPartition leaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(leaderTopic, 0));
final TopicIdPartition followerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition(followerTopic, 0));
// Register these partitions to RLMM.
topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
// Add segments for these partitions but they are not available as they have not yet been subscribed.
RemoteLogSegmentMetadata leaderSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata).get();
RemoteLogSegmentMetadata followerSegmentMetadata = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(followerTopicIdPartition, Uuid.randomUuid()), 0, 100, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 0L));
topicBasedRlmm().addRemoteLogSegmentMetadata(followerSegmentMetadata).get();
// Stop TopicBasedRemoteLogMetadataManager only.
stopTopicBasedRemoteLogMetadataManagerHarness();
// Start TopicBasedRemoteLogMetadataManager but do not start consumer thread to check whether the stored metadata is
// loaded successfully or not.
startTopicBasedRemoteLogMetadataManagerHarness(false);
// Register these partitions to RLMM, which loads the respective metadata snapshots.
topicBasedRlmm().onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition));
// Check for the stored entries from the earlier run.
Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(leaderSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Collections.singleton(followerSegmentMetadata).iterator(), topicBasedRlmm().listRemoteLogSegments(followerTopicIdPartition)));
// Check whether the check-pointed consumer offsets are stored or not.
Path committedOffsetsPath = new File(logDir, COMMITTED_OFFSETS_FILE_NAME).toPath();
Assertions.assertTrue(committedOffsetsPath.toFile().exists());
CommittedOffsetsFile committedOffsetsFile = new CommittedOffsetsFile(committedOffsetsPath.toFile());
int metadataPartition1 = topicBasedRlmm().metadataPartition(leaderTopicIdPartition);
int metadataPartition2 = topicBasedRlmm().metadataPartition(followerTopicIdPartition);
Optional<Long> receivedOffsetForPartition1 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition1);
Optional<Long> receivedOffsetForPartition2 = topicBasedRlmm().receivedOffsetForPartition(metadataPartition2);
Assertions.assertTrue(receivedOffsetForPartition1.isPresent());
Assertions.assertTrue(receivedOffsetForPartition2.isPresent());
// Make sure these offsets are at least 0.
Assertions.assertTrue(receivedOffsetForPartition1.get() >= 0);
Assertions.assertTrue(receivedOffsetForPartition2.get() >= 0);
// Check the stored entries and the offsets that were set on consumer are the same.
Map<Integer, Long> partitionToOffset = committedOffsetsFile.readEntries();
Assertions.assertEquals(partitionToOffset.get(metadataPartition1), receivedOffsetForPartition1.get());
Assertions.assertEquals(partitionToOffset.get(metadataPartition2), receivedOffsetForPartition2.get());
// Start Consumer thread
topicBasedRlmm().startConsumerThread();
// Add one more segment
RemoteLogSegmentMetadata leaderSegmentMetadata2 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), 101, 200, -1L, 0, time.milliseconds(), SEG_SIZE, Collections.singletonMap(0, 101L));
topicBasedRlmm().addRemoteLogSegmentMetadata(leaderSegmentMetadata2).get();
// Check that both the stored segment and recently added segment are available.
Assertions.assertTrue(TestUtils.sameElementsWithoutOrder(Arrays.asList(leaderSegmentMetadata, leaderSegmentMetadata2).iterator(), topicBasedRlmm().listRemoteLogSegments(leaderTopicIdPartition)));
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class InmemoryRemoteLogMetadataManager method putRemotePartitionDeleteMetadata.
@Override
public CompletableFuture<Void> putRemotePartitionDeleteMetadata(RemotePartitionDeleteMetadata remotePartitionDeleteMetadata) throws RemoteStorageException {
log.debug("Adding delete state with: [{}]", remotePartitionDeleteMetadata);
Objects.requireNonNull(remotePartitionDeleteMetadata, "remotePartitionDeleteMetadata can not be null");
TopicIdPartition topicIdPartition = remotePartitionDeleteMetadata.topicIdPartition();
RemotePartitionDeleteState targetState = remotePartitionDeleteMetadata.state();
RemotePartitionDeleteMetadata existingMetadata = idToPartitionDeleteMetadata.get(topicIdPartition);
RemotePartitionDeleteState existingState = existingMetadata != null ? existingMetadata.state() : null;
if (!RemotePartitionDeleteState.isValidTransition(existingState, targetState)) {
throw new IllegalStateException("Current state: " + existingState + ", target state: " + targetState);
}
idToPartitionDeleteMetadata.put(topicIdPartition, remotePartitionDeleteMetadata);
if (targetState == RemotePartitionDeleteState.DELETE_PARTITION_FINISHED) {
// Remove the association for the partition.
idToRemoteLogMetadataCache.remove(topicIdPartition);
idToPartitionDeleteMetadata.remove(topicIdPartition);
}
return COMPLETED_FUTURE;
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetcherTest method testCompletedFetchRemoval.
@Test
public void testCompletedFetchRemoval() {
// Ensure the removal of completed fetches that cause an Exception if and only if they contain empty records.
buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
assignFromUser(mkSet(tp0, tp1, tp2, tp3));
subscriptions.seek(tp0, 1);
subscriptions.seek(tp1, 1);
subscriptions.seek(tp2, 1);
subscriptions.seek(tp3, 1);
assertEquals(1, fetcher.sendFetches());
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setRecords(records));
partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
partitions.put(tidp2, new FetchResponseData.PartitionData().setPartitionIndex(tp2.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(nextRecords));
partitions.put(tidp3, new FetchResponseData.PartitionData().setPartitionIndex(tp3.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(partialRecords));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>();
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
assertEquals(fetchedRecords.size(), subscriptions.position(tp1).offset - 1);
assertEquals(4, subscriptions.position(tp1).offset);
assertEquals(3, fetchedRecords.size());
List<OffsetOutOfRangeException> oorExceptions = new ArrayList<>();
try {
recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
} catch (OffsetOutOfRangeException oor) {
oorExceptions.add(oor);
}
// Should have received one OffsetOutOfRangeException for partition tp1
assertEquals(1, oorExceptions.size());
OffsetOutOfRangeException oor = oorExceptions.get(0);
assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0));
assertEquals(oor.offsetOutOfRangePartitions().size(), 1);
recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
// Should not have received an Exception for tp2.
assertEquals(6, subscriptions.position(tp2).offset);
assertEquals(5, fetchedRecords.size());
int numExceptionsExpected = 3;
List<KafkaException> kafkaExceptions = new ArrayList<>();
for (int i = 1; i <= numExceptionsExpected; i++) {
try {
recordsByPartition = fetchedRecords();
for (List<ConsumerRecord<byte[], byte[]>> records : recordsByPartition.values()) fetchedRecords.addAll(records);
} catch (KafkaException e) {
kafkaExceptions.add(e);
}
}
// Should have received as much as numExceptionsExpected Kafka exceptions for tp3.
assertEquals(numExceptionsExpected, kafkaExceptions.size());
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetcherTest method testFetcherConcurrency.
@Test
public void testFetcherConcurrency() throws Exception {
int numPartitions = 20;
Set<TopicPartition> topicPartitions = new HashSet<>();
for (int i = 0; i < numPartitions; i++) topicPartitions.add(new TopicPartition(topicName, i));
LogContext logContext = new LogContext();
buildDependencies(new MetricConfig(), Long.MAX_VALUE, new SubscriptionState(logContext, OffsetResetStrategy.EARLIEST), logContext);
fetcher = new Fetcher<byte[], byte[]>(new LogContext(), consumerClient, minBytes, maxBytes, maxWaitMs, fetchSize, 2 * numPartitions, true, "", new ByteArrayDeserializer(), new ByteArrayDeserializer(), metadata, subscriptions, metrics, metricsRegistry, time, retryBackoffMs, requestTimeoutMs, IsolationLevel.READ_UNCOMMITTED, apiVersions) {
@Override
protected FetchSessionHandler sessionHandler(int id) {
final FetchSessionHandler handler = super.sessionHandler(id);
if (handler == null)
return null;
else {
return new FetchSessionHandler(new LogContext(), id) {
@Override
public Builder newBuilder() {
verifySessionPartitions();
return handler.newBuilder();
}
@Override
public boolean handleResponse(FetchResponse response, short version) {
verifySessionPartitions();
return handler.handleResponse(response, version);
}
@Override
public void handleError(Throwable t) {
verifySessionPartitions();
handler.handleError(t);
}
// Verify that session partitions can be traversed safely.
private void verifySessionPartitions() {
try {
Field field = FetchSessionHandler.class.getDeclaredField("sessionPartitions");
field.setAccessible(true);
LinkedHashMap<?, ?> sessionPartitions = (LinkedHashMap<?, ?>) field.get(handler);
for (Map.Entry<?, ?> entry : sessionPartitions.entrySet()) {
// If `sessionPartitions` are modified on another thread, Thread.yield will increase the
// possibility of ConcurrentModificationException if appropriate synchronization is not used.
Thread.yield();
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
}
}
};
MetadataResponse initialMetadataResponse = RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, numPartitions), tp -> validLeaderEpoch, topicIds);
client.updateMetadata(initialMetadataResponse);
fetchSize = 10000;
assignFromUser(topicPartitions);
topicPartitions.forEach(tp -> subscriptions.seek(tp, 0L));
AtomicInteger fetchesRemaining = new AtomicInteger(1000);
executorService = Executors.newSingleThreadExecutor();
Future<?> future = executorService.submit(() -> {
while (fetchesRemaining.get() > 0) {
synchronized (consumerClient) {
if (!client.requests().isEmpty()) {
ClientRequest request = client.requests().peek();
FetchRequest fetchRequest = (FetchRequest) request.requestBuilder().build();
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseMap = new LinkedHashMap<>();
for (Map.Entry<TopicIdPartition, FetchRequest.PartitionData> entry : fetchRequest.fetchData(topicNames).entrySet()) {
TopicIdPartition tp = entry.getKey();
long offset = entry.getValue().fetchOffset;
responseMap.put(tp, new FetchResponseData.PartitionData().setPartitionIndex(tp.topicPartition().partition()).setHighWatermark(offset + 2).setLastStableOffset(offset + 2).setLogStartOffset(0).setRecords(buildRecords(offset, 2, offset)));
}
client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap));
consumerClient.poll(time.timer(0));
}
}
}
return fetchesRemaining.get();
});
Map<TopicPartition, Long> nextFetchOffsets = topicPartitions.stream().collect(Collectors.toMap(Function.identity(), t -> 0L));
while (fetchesRemaining.get() > 0 && !future.isDone()) {
if (fetcher.sendFetches() == 1) {
synchronized (consumerClient) {
consumerClient.poll(time.timer(0));
}
}
if (fetcher.hasCompletedFetches()) {
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
if (!fetchedRecords.isEmpty()) {
fetchesRemaining.decrementAndGet();
fetchedRecords.forEach((tp, records) -> {
assertEquals(2, records.size());
long nextOffset = nextFetchOffsets.get(tp);
assertEquals(nextOffset, records.get(0).offset());
assertEquals(nextOffset + 1, records.get(1).offset());
nextFetchOffsets.put(tp, nextOffset + 2);
});
}
}
}
assertEquals(0, future.get());
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetcherTest method testFetchResponseMetricsWithOnePartitionError.
@Test
public void testFetchResponseMetricsWithOnePartitionError() {
buildFetcher();
assignFromUser(mkSet(tp0, tp1));
subscriptions.seek(tp0, 0);
subscriptions.seek(tp1, 0);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(100).setLogStartOffset(0).setRecords(records));
partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100).setLogStartOffset(0));
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
fetcher.collectFetch();
int expectedBytes = 0;
for (Record record : records.records()) expectedBytes += record.sizeInBytes();
assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
assertEquals(3, (Double) recordsCountAverage.metricValue(), EPSILON);
}
Aggregations