use of org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState in project kafka by apache.
the class LeaderAndIsrRequestTest method testGetErrorResponse.
@Test
public void testGetErrorResponse() {
Uuid topicId = Uuid.randomUuid();
String topicName = "topic";
int partition = 0;
for (short version : LEADER_AND_ISR.allVersions()) {
LeaderAndIsrRequest request = new LeaderAndIsrRequest.Builder(version, 0, 0, 0, Collections.singletonList(new LeaderAndIsrPartitionState().setTopicName(topicName).setPartitionIndex(partition)), Collections.singletonMap(topicName, topicId), Collections.emptySet()).build(version);
LeaderAndIsrResponse response = request.getErrorResponse(0, new ClusterAuthorizationException("Not authorized"));
assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED, response.error());
if (version < 5) {
assertEquals(Collections.singletonList(new LeaderAndIsrPartitionError().setTopicName(topicName).setPartitionIndex(partition).setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code())), response.data().partitionErrors());
assertEquals(0, response.data().topics().size());
} else {
LeaderAndIsrTopicError topicState = response.topics().find(topicId);
assertEquals(topicId, topicState.topicId());
assertEquals(Collections.singletonList(new LeaderAndIsrPartitionError().setPartitionIndex(partition).setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code())), topicState.partitionErrors());
assertEquals(0, response.data().partitionErrors().size());
}
}
}
use of org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState in project kafka by apache.
the class UpdateFollowerFetchStateBenchmark method setUp.
@Setup(Level.Trial)
public void setUp() {
scheduler.startup();
LogConfig logConfig = createLogConfig();
logManager = new LogManagerBuilder().setLogDirs(Collections.singletonList(logDir)).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class);
Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), topicPartition)).thenReturn(Option.apply(0L));
DelayedOperations delayedOperations = new DelayedOperationsMock();
// one leader, plus two followers
List<Integer> replicas = new ArrayList<>();
replicas.add(0);
replicas.add(1);
replicas.add(2);
LeaderAndIsrPartitionState partitionState = new LeaderAndIsrPartitionState().setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas).setIsNew(true);
IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
AlterIsrManager alterIsrManager = Mockito.mock(AlterIsrManager.class);
partition = new Partition(topicPartition, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterIsrManager);
partition.makeLeader(partitionState, offsetCheckpoints, topicId);
}
use of org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState in project kafka by apache.
the class LeaderAndIsrRequestTest method testVersionLogic.
/**
* Verifies the logic we have in LeaderAndIsrRequest to present a unified interface across the various versions
* works correctly. For example, `LeaderAndIsrPartitionState.topicName` is not serialiazed/deserialized in
* recent versions, but we set it manually so that we can always present the ungrouped partition states
* independently of the version.
*/
@Test
public void testVersionLogic() {
for (short version : LEADER_AND_ISR.allVersions()) {
List<LeaderAndIsrPartitionState> partitionStates = asList(new LeaderAndIsrPartitionState().setTopicName("topic0").setPartitionIndex(0).setControllerEpoch(2).setLeader(0).setLeaderEpoch(10).setIsr(asList(0, 1)).setZkVersion(10).setReplicas(asList(0, 1, 2)).setAddingReplicas(asList(3)).setRemovingReplicas(asList(2)), new LeaderAndIsrPartitionState().setTopicName("topic0").setPartitionIndex(1).setControllerEpoch(2).setLeader(1).setLeaderEpoch(11).setIsr(asList(1, 2, 3)).setZkVersion(11).setReplicas(asList(1, 2, 3)).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList()), new LeaderAndIsrPartitionState().setTopicName("topic1").setPartitionIndex(0).setControllerEpoch(2).setLeader(2).setLeaderEpoch(11).setIsr(asList(2, 3, 4)).setZkVersion(11).setReplicas(asList(2, 3, 4)).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList()));
List<Node> liveNodes = asList(new Node(0, "host0", 9090), new Node(1, "host1", 9091));
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put("topic0", Uuid.randomUuid());
topicIds.put("topic1", Uuid.randomUuid());
LeaderAndIsrRequest request = new LeaderAndIsrRequest.Builder(version, 1, 2, 3, partitionStates, topicIds, liveNodes).build();
List<LeaderAndIsrLiveLeader> liveLeaders = liveNodes.stream().map(n -> new LeaderAndIsrLiveLeader().setBrokerId(n.id()).setHostName(n.host()).setPort(n.port())).collect(Collectors.toList());
assertEquals(new HashSet<>(partitionStates), iterableToSet(request.partitionStates()));
assertEquals(liveLeaders, request.liveLeaders());
assertEquals(1, request.controllerId());
assertEquals(2, request.controllerEpoch());
assertEquals(3, request.brokerEpoch());
ByteBuffer byteBuffer = request.serialize();
LeaderAndIsrRequest deserializedRequest = new LeaderAndIsrRequest(new LeaderAndIsrRequestData(new ByteBufferAccessor(byteBuffer), version), version);
// them for earlier versions.
if (version < 3) {
partitionStates.get(0).setAddingReplicas(emptyList()).setRemovingReplicas(emptyList());
}
// TopicStates is an empty map.
if (version < 2) {
topicIds = new HashMap<>();
}
// Zero Uuids in place.
if (version > 1 && version < 5) {
topicIds.put("topic0", Uuid.ZERO_UUID);
topicIds.put("topic1", Uuid.ZERO_UUID);
}
assertEquals(new HashSet<>(partitionStates), iterableToSet(deserializedRequest.partitionStates()));
assertEquals(topicIds, deserializedRequest.topicIds());
assertEquals(liveLeaders, deserializedRequest.liveLeaders());
assertEquals(1, request.controllerId());
assertEquals(2, request.controllerEpoch());
assertEquals(3, request.brokerEpoch());
}
}
use of org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState in project kafka by apache.
the class RequestResponseTest method createLeaderAndIsrRequest.
private LeaderAndIsrRequest createLeaderAndIsrRequest(short version) {
List<LeaderAndIsrPartitionState> partitionStates = new ArrayList<>();
List<Integer> isr = asList(1, 2);
List<Integer> replicas = asList(1, 2, 3, 4);
partitionStates.add(new LeaderAndIsrPartitionState().setTopicName("topic5").setPartitionIndex(105).setControllerEpoch(0).setLeader(2).setLeaderEpoch(1).setIsr(isr).setZkVersion(2).setReplicas(replicas).setIsNew(false));
partitionStates.add(new LeaderAndIsrPartitionState().setTopicName("topic5").setPartitionIndex(1).setControllerEpoch(1).setLeader(1).setLeaderEpoch(1).setIsr(isr).setZkVersion(2).setReplicas(replicas).setIsNew(false));
partitionStates.add(new LeaderAndIsrPartitionState().setTopicName("topic20").setPartitionIndex(1).setControllerEpoch(1).setLeader(0).setLeaderEpoch(1).setIsr(isr).setZkVersion(2).setReplicas(replicas).setIsNew(false));
Set<Node> leaders = Utils.mkSet(new Node(0, "test0", 1223), new Node(1, "test1", 1223));
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put("topic5", Uuid.randomUuid());
topicIds.put("topic20", Uuid.randomUuid());
return new LeaderAndIsrRequest.Builder(version, 1, 10, 0, partitionStates, topicIds, leaders).build();
}
use of org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState in project kafka by apache.
the class LeaderAndIsrRequestTest method testTopicPartitionGroupingSizeReduction.
@Test
public void testTopicPartitionGroupingSizeReduction() {
Set<TopicPartition> tps = TestUtils.generateRandomTopicPartitions(10, 10);
List<LeaderAndIsrPartitionState> partitionStates = new ArrayList<>();
Map<String, Uuid> topicIds = new HashMap<>();
for (TopicPartition tp : tps) {
partitionStates.add(new LeaderAndIsrPartitionState().setTopicName(tp.topic()).setPartitionIndex(tp.partition()));
topicIds.put(tp.topic(), Uuid.randomUuid());
}
LeaderAndIsrRequest.Builder builder = new LeaderAndIsrRequest.Builder((short) 2, 0, 0, 0, partitionStates, topicIds, Collections.emptySet());
LeaderAndIsrRequest v2 = builder.build((short) 2);
LeaderAndIsrRequest v1 = builder.build((short) 1);
assertTrue(v2.sizeInBytes() < v1.sizeInBytes(), "Expected v2 < v1: v2=" + v2.sizeInBytes() + ", v1=" + v1.sizeInBytes());
}
Aggregations