use of org.apache.kafka.common.requests.UpdateMetadataRequest in project kafka by apache.
the class ReplicaFetcherThreadBenchmark method setup.
@Setup(Level.Trial)
public void setup() throws IOException {
if (!logDir.mkdir())
throw new IOException("error creating test directory");
scheduler.startup();
Properties props = new Properties();
props.put("zookeeper.connect", "127.0.0.1:9999");
KafkaConfig config = new KafkaConfig(props);
LogConfig logConfig = createLogConfig();
BrokerTopicStats brokerTopicStats = new BrokerTopicStats();
LogDirFailureChannel logDirFailureChannel = Mockito.mock(LogDirFailureChannel.class);
List<File> logDirs = Collections.singletonList(logDir);
logManager = new LogManagerBuilder().setLogDirs(logDirs).setInitialOfflineDirs(Collections.emptyList()).setConfigRepository(new MockConfigRepository()).setInitialDefaultConfig(logConfig).setCleanerConfig(new CleanerConfig(0, 0, 0, 0, 0, 0.0, 0, false, "MD5")).setRecoveryThreadsPerDataDir(1).setFlushCheckMs(1000L).setFlushRecoveryOffsetCheckpointMs(10000L).setFlushStartOffsetCheckpointMs(10000L).setRetentionCheckMs(1000L).setMaxPidExpirationMs(60000).setInterBrokerProtocolVersion(ApiVersion.latestVersion()).setScheduler(scheduler).setBrokerTopicStats(brokerTopicStats).setLogDirFailureChannel(logDirFailureChannel).setTime(Time.SYSTEM).setKeepPartitionMetadataFile(true).build();
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> initialFetched = new LinkedHashMap<>();
HashMap<String, Uuid> topicIds = new HashMap<>();
scala.collection.mutable.Map<TopicPartition, InitialFetchState> initialFetchStates = new scala.collection.mutable.HashMap<>();
List<UpdateMetadataRequestData.UpdateMetadataPartitionState> updatePartitionState = new ArrayList<>();
for (int i = 0; i < partitionCount; i++) {
TopicPartition tp = new TopicPartition("topic", i);
List<Integer> replicas = Arrays.asList(0, 1, 2);
LeaderAndIsrRequestData.LeaderAndIsrPartitionState partitionState = new LeaderAndIsrRequestData.LeaderAndIsrPartitionState().setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas).setIsNew(true);
IsrChangeListener isrChangeListener = Mockito.mock(IsrChangeListener.class);
OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class);
Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Option.apply(0L));
AlterIsrManager isrChannelManager = Mockito.mock(AlterIsrManager.class);
Partition partition = new Partition(tp, 100, ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, isrChangeListener, new DelayedOperationsMock(tp), Mockito.mock(MetadataCache.class), logManager, isrChannelManager);
partition.makeFollower(partitionState, offsetCheckpoints, topicId);
pool.put(tp, partition);
initialFetchStates.put(tp, new InitialFetchState(topicId, new BrokerEndPoint(3, "host", 3000), 0, 0));
BaseRecords fetched = new BaseRecords() {
@Override
public int sizeInBytes() {
return 0;
}
@Override
public RecordsSend<? extends BaseRecords> toSend() {
return null;
}
};
initialFetched.put(new TopicIdPartition(topicId.get(), tp), new FetchResponseData.PartitionData().setPartitionIndex(tp.partition()).setLastStableOffset(0).setLogStartOffset(0).setRecords(fetched));
updatePartitionState.add(new UpdateMetadataRequestData.UpdateMetadataPartitionState().setTopicName("topic").setPartitionIndex(i).setControllerEpoch(0).setLeader(0).setLeaderEpoch(0).setIsr(replicas).setZkVersion(1).setReplicas(replicas));
}
UpdateMetadataRequest updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), 0, 0, 0, updatePartitionState, Collections.emptyList(), topicIds).build();
// TODO: fix to support raft
ZkMetadataCache metadataCache = new ZkMetadataCache(0);
metadataCache.updateMetadata(0, updateMetadataRequest);
replicaManager = new ReplicaManagerBuilder().setConfig(config).setMetrics(metrics).setTime(new MockTime()).setZkClient(Mockito.mock(KafkaZkClient.class)).setScheduler(scheduler).setLogManager(logManager).setQuotaManagers(Mockito.mock(QuotaFactory.QuotaManagers.class)).setBrokerTopicStats(brokerTopicStats).setMetadataCache(metadataCache).setLogDirFailureChannel(new LogDirFailureChannel(logDirs.size())).setAlterIsrManager(TestUtils.createAlterIsrManager()).build();
fetcher = new ReplicaFetcherBenchThread(config, replicaManager, pool);
fetcher.addPartitions(initialFetchStates);
// force a pass to move partitions to fetching state. We do this in the setup phase
// so that we do not measure this time as part of the steady state work
fetcher.doWork();
// handle response to engage the incremental fetch session handler
fetcher.fetchSessionHandler().handleResponse(FetchResponse.of(Errors.NONE, 0, 999, initialFetched), ApiKeys.FETCH.latestVersion());
}
use of org.apache.kafka.common.requests.UpdateMetadataRequest in project kafka by apache.
the class MetadataRequestBenchmark method initializeMetadataCache.
private void initializeMetadataCache() {
List<UpdateMetadataBroker> liveBrokers = new LinkedList<>();
List<UpdateMetadataPartitionState> partitionStates = new LinkedList<>();
IntStream.range(0, 5).forEach(brokerId -> liveBrokers.add(new UpdateMetadataBroker().setId(brokerId).setEndpoints(endpoints(brokerId)).setRack("rack1")));
IntStream.range(0, topicCount).forEach(topicId -> {
String topicName = "topic-" + topicId;
IntStream.range(0, partitionCount).forEach(partitionId -> {
partitionStates.add(new UpdateMetadataPartitionState().setTopicName(topicName).setPartitionIndex(partitionId).setControllerEpoch(1).setLeader(partitionCount % 5).setLeaderEpoch(0).setIsr(Arrays.asList(0, 1, 3)).setZkVersion(1).setReplicas(Arrays.asList(0, 1, 3)));
});
});
UpdateMetadataRequest updateMetadataRequest = new UpdateMetadataRequest.Builder(ApiKeys.UPDATE_METADATA.latestVersion(), 1, 1, 1, partitionStates, liveBrokers, Collections.emptyMap()).build();
metadataCache.updateMetadata(100, updateMetadataRequest);
}
Aggregations