use of com.alipay.sofa.jraft.rhea.metadata.Peer in project sofa-jraft by sofastack.
the class ChaosTestCluster method randomTransferLeader.
public synchronized void randomTransferLeader() {
final RheaKVStore leader = getLeaderStore();
final Endpoint leaderEndpoint = getSelfEndpoint(leader);
final PlacementDriverClient pdClient = leader.getPlacementDriverClient();
final Peer randomPeer = JRaftHelper.toPeer(getRandomPeer());
boolean result = pdClient.transferLeader(Constants.DEFAULT_REGION_ID, randomPeer, false);
if (!result) {
throw new RuntimeException("fail to transfer leader [" + leaderEndpoint + " --> " + randomPeer);
}
LOG.info("Transfer leader from {} to {}", leaderEndpoint, randomPeer.getEndpoint());
}
use of com.alipay.sofa.jraft.rhea.metadata.Peer in project sofa-jraft by sofastack.
the class JRaftHelper method toPeer.
public static Peer toPeer(final PeerId peerId) {
Requires.requireNonNull(peerId, "peerId");
final Endpoint endpoint = peerId.getEndpoint();
Requires.requireNonNull(endpoint, "peerId.endpoint");
final Peer peer = new Peer();
peer.setId(-1);
peer.setStoreId(-1);
peer.setEndpoint(endpoint.copy());
return peer;
}
use of com.alipay.sofa.jraft.rhea.metadata.Peer in project sofa-jraft by sofastack.
the class StatsCollector method collectRegionStats.
public RegionStats collectRegionStats(final Region region, final TimeInterval timeInterval) {
final RegionStats stats = new RegionStats();
stats.setRegionId(region.getId());
// Leader Peer sending the heartbeat
stats.setLeader(new Peer(region.getId(), this.storeEngine.getStoreId(), this.storeEngine.getSelfEndpoint()));
// Leader considers that these peers are down
// TODO
// Pending peers are the peers that the leader can't consider as working followers
// TODO
// Bytes written for the region during this period
stats.setBytesWritten(getRegionBytesWritten(region, true));
// Bytes read for the region during this period
stats.setBytesRead(getRegionBytesRead(region, true));
// Keys written for the region during this period
stats.setKeysWritten(getRegionKeysWritten(region, true));
// Keys read for the region during this period
stats.setKeysRead(getRegionKeysRead(region, true));
// Approximate region size
// TODO very important
// Approximate number of keys
stats.setApproximateKeys(this.rawKVStore.getApproximateKeysInRange(region.getStartKey(), region.getEndKey()));
// Actually reported time interval
stats.setInterval(timeInterval);
LOG.info("Collect [RegionStats]: {}.", stats);
return stats;
}
use of com.alipay.sofa.jraft.rhea.metadata.Peer in project sofa-jraft by sofastack.
the class RegionLeaderBalanceHandler method readMessage.
@Override
public void readMessage(final HandlerContext ctx, final RegionPingEvent event) throws Exception {
if (event.isReady()) {
return;
}
final MetadataStore metadataStore = event.getMetadataStore();
final RegionHeartbeatRequest request = event.getMessage();
final long clusterId = request.getClusterId();
final long storeId = request.getStoreId();
final ClusterStatsManager clusterStatsManager = ClusterStatsManager.getInstance(clusterId);
final List<Pair<Region, RegionStats>> regionStatsList = request.getRegionStatsList();
for (final Pair<Region, RegionStats> stats : regionStatsList) {
final Region region = stats.getKey();
clusterStatsManager.addOrUpdateLeader(storeId, region.getId());
}
// check if the modelWorker
final Pair<Set<Long>, Integer> modelWorkers = clusterStatsManager.findModelWorkerStores(1);
final Set<Long> modelWorkerStoreIds = modelWorkers.getKey();
final int modelWorkerLeaders = modelWorkers.getValue();
if (!modelWorkerStoreIds.contains(storeId)) {
return;
}
LOG.info("[Cluster: {}] model worker stores is: {}, it has {} leaders.", clusterId, modelWorkerStoreIds, modelWorkerLeaders);
for (final Pair<Region, RegionStats> pair : regionStatsList) {
final Region region = pair.getKey();
final List<Peer> peers = region.getPeers();
if (peers == null) {
continue;
}
final List<Endpoint> endpoints = Lists.transform(peers, Peer::getEndpoint);
final Map<Long, Endpoint> storeIds = metadataStore.unsafeGetStoreIdsByEndpoints(clusterId, endpoints);
// find lazyWorkers
final List<Pair<Long, Integer>> lazyWorkers = clusterStatsManager.findLazyWorkerStores(storeIds.keySet());
if (lazyWorkers.isEmpty()) {
return;
}
for (int i = lazyWorkers.size() - 1; i >= 0; i--) {
final Pair<Long, Integer> worker = lazyWorkers.get(i);
if (modelWorkerLeaders - worker.getValue() <= 1) {
// no need to transfer
lazyWorkers.remove(i);
}
}
if (lazyWorkers.isEmpty()) {
continue;
}
final Pair<Long, Integer> laziestWorker = tryToFindLaziestWorker(clusterId, metadataStore, lazyWorkers);
if (laziestWorker == null) {
continue;
}
final Long lazyWorkerStoreId = laziestWorker.getKey();
LOG.info("[Cluster: {}], lazy worker store is: {}, it has {} leaders.", clusterId, lazyWorkerStoreId, laziestWorker.getValue());
final Instruction.TransferLeader transferLeader = new Instruction.TransferLeader();
transferLeader.setMoveToStoreId(lazyWorkerStoreId);
transferLeader.setMoveToEndpoint(storeIds.get(lazyWorkerStoreId));
final Instruction instruction = new Instruction();
instruction.setRegion(region.copy());
instruction.setTransferLeader(transferLeader);
event.addInstruction(instruction);
LOG.info("[Cluster: {}], send 'instruction.transferLeader': {} to region: {}.", clusterId, instruction, region);
// Only do one thing at a time
break;
}
}
Aggregations