Search in sources :

Example 1 with Peer

use of io.dingodb.store.row.metadata.Peer in project dingo by dingodb.

the class RegionHeartbeatSender method collectRegionStats.

public RegionStats collectRegionStats(final TimeInterval timeInterval) {
    final RegionStats stats = new RegionStats();
    stats.setRegionId(region.getId());
    // Leader Peer sending the heartbeat
    stats.setLeader(new Peer(region.getId(), this.storeId, this.regionEngine.getStoreEngine().getSelfEndpoint()));
    // Leader considers that these peers are down
    // TODO
    // Pending peers are the peers that the leader can't consider as working followers
    // TODO
    // Bytes written for the region during this period
    stats.setBytesWritten(bytesWritten());
    // Bytes read for the region during this period
    stats.setBytesRead(bytesRead());
    // Keys written for the region during this period
    stats.setKeysWritten(keysWritten());
    // Keys read for the region during this period
    stats.setKeysRead(keysRead());
    // Approximate region size
    // TODO very important
    // Approximate number of keys
    ApproximateKVStats rangeStats = store().getApproximateKVStatsInRange(region.getStartKey(), region.getEndKey());
    stats.setApproximateKeys(rangeStats.getKeysCnt());
    stats.setApproximateSize(rangeStats.getSizeInBytes());
    // Actually reported time interval
    stats.setInterval(timeInterval);
    if (log.isDebugEnabled()) {
        log.info("Collect [RegionStats]: {}.", stats);
    }
    return stats;
}
Also used : RegionStats(io.dingodb.store.row.metadata.RegionStats) Peer(io.dingodb.store.row.metadata.Peer) ApproximateKVStats(io.dingodb.store.row.ApproximateKVStats)

Example 2 with Peer

use of io.dingodb.store.row.metadata.Peer in project dingo by dingodb.

the class StatsCollector method collectRegionStats.

public RegionStats collectRegionStats(final Region region, final TimeInterval timeInterval) {
    final RegionStats stats = new RegionStats();
    stats.setRegionId(region.getId());
    // Leader Peer sending the heartbeat
    stats.setLeader(new Peer(region.getId(), this.storeEngine.getStoreId(), this.storeEngine.getSelfEndpoint()));
    // Leader considers that these peers are down
    // TODO
    // Pending peers are the peers that the leader can't consider as working followers
    // TODO
    // Bytes written for the region during this period
    stats.setBytesWritten(getRegionBytesWritten(region, true));
    // Bytes read for the region during this period
    stats.setBytesRead(getRegionBytesRead(region, true));
    // Keys written for the region during this period
    stats.setKeysWritten(getRegionKeysWritten(region, true));
    // Keys read for the region during this period
    stats.setKeysRead(getRegionKeysRead(region, true));
    // Approximate region size
    // TODO very important
    // Approximate number of keys
    // ApproximateKVStats rangeStats = this.rawKVStore.getApproximateKVStatsInRange(
    // region.getStartKey(), region.getEndKey());
    // stats.setApproximateKeys(rangeStats.getKeysCnt());
    // stats.setApproximateSize(rangeStats.getSizeInBytes());
    // Actually reported time interval
    stats.setInterval(timeInterval);
    LOG.info("Collect [RegionStats]: {}.", stats);
    return stats;
}
Also used : RegionStats(io.dingodb.store.row.metadata.RegionStats) Peer(io.dingodb.store.row.metadata.Peer)

Example 3 with Peer

use of io.dingodb.store.row.metadata.Peer in project dingo by dingodb.

the class JRaftHelper method toPeer.

public static Peer toPeer(final PeerId peerId) {
    Requires.requireNonNull(peerId, "peerId");
    final Endpoint endpoint = peerId.getEndpoint();
    Requires.requireNonNull(endpoint, "peerId.endpoint");
    final Peer peer = new Peer();
    peer.setId("-1");
    peer.setStoreId("-1");
    peer.setEndpoint(endpoint.copy());
    return peer;
}
Also used : Endpoint(io.dingodb.raft.util.Endpoint) Peer(io.dingodb.store.row.metadata.Peer)

Example 4 with Peer

use of io.dingodb.store.row.metadata.Peer in project dingo by dingodb.

the class RegionHeartbeatHandler method balance.

private Instruction balance(Region region, RegionStats regionStats) {
    final long clusterId = 0;
    final String storeId = regionStats.getLeader().getStoreId();
    final ClusterStatsManager clusterStatsManager = ClusterStatsManager.getInstance(clusterId);
    clusterStatsManager.addOrUpdateLeader(storeId, region.getId());
    // check if the modelWorker
    final Pair<Set<String>, Integer> modelWorkers = clusterStatsManager.findModelWorkerStores(1);
    final Set<String> modelWorkerStoreIds = modelWorkers.getKey();
    final int modelWorkerLeaders = modelWorkers.getValue();
    if (!modelWorkerStoreIds.contains(storeId)) {
        return null;
    }
    log.info("[Cluster] model worker stores is: {}, it has {} leaders.", modelWorkerStoreIds, modelWorkerLeaders);
    final List<Peer> peers = region.getPeers();
    if (peers == null) {
        return null;
    }
    final List<Endpoint> endpoints = Lists.transform(peers, Peer::getEndpoint);
    final Map<String, Endpoint> storeIds = rowStoreMetaAdaptor.storeLocation();
    // find lazyWorkers
    final List<Pair<String, Integer>> lazyWorkers = clusterStatsManager.findLazyWorkerStores(storeIds.keySet());
    if (lazyWorkers.isEmpty()) {
        return null;
    }
    for (int i = lazyWorkers.size() - 1; i >= 0; i--) {
        final Pair<String, Integer> worker = lazyWorkers.get(i);
        if (modelWorkerLeaders - worker.getValue() <= 1) {
            // no need to transfer
            lazyWorkers.remove(i);
        }
    }
    if (lazyWorkers.isEmpty()) {
        return null;
    }
    final Pair<String, Integer> laziestWorker = tryToFindLaziestWorker(lazyWorkers);
    if (laziestWorker == null) {
        return null;
    }
    final String lazyWorkerStoreId = laziestWorker.getKey();
    log.info("[Cluster: {}], lazy worker store is: {}, it has {} leaders.", clusterId, lazyWorkerStoreId, laziestWorker.getValue());
    final Instruction.TransferLeader transferLeader = new Instruction.TransferLeader();
    transferLeader.setMoveToStoreId(lazyWorkerStoreId);
    transferLeader.setMoveToEndpoint(storeIds.get(lazyWorkerStoreId));
    final Instruction instruction = new Instruction();
    instruction.setRegion(region.copy());
    instruction.setTransferLeader(transferLeader);
    log.info("[Cluster: {}], send 'instruction.transferLeader': {} to region: {}.", clusterId, instruction, region);
    return instruction;
}
Also used : ClusterStatsManager(io.dingodb.server.coordinator.meta.ClusterStatsManager) Set(java.util.Set) Peer(io.dingodb.store.row.metadata.Peer) Instruction(io.dingodb.store.row.metadata.Instruction) Endpoint(io.dingodb.raft.util.Endpoint) Endpoint(io.dingodb.raft.util.Endpoint) Pair(io.dingodb.store.row.util.Pair)

Example 5 with Peer

use of io.dingodb.store.row.metadata.Peer in project dingo by dingodb.

the class RowStoreMetaAdaptorImpl method saveRegionHeartbeat.

@Override
public void saveRegionHeartbeat(Region region, RegionStats regionStats) {
    final GeneralId regionId = GeneralIdHelper.region(region.getId());
    RegionApp regionApp = mapping(region);
    RegionView regionView = mapping(regionApp, regionStats);
    GeneralId storeId = GeneralId.fromStr(regionStats.getLeader().getStoreId());
    Set<GeneralId> nodes = region.getPeers().stream().map(Peer::getEndpoint).map(scheduleMetaAdaptor::storeId).collect(Collectors.toSet());
    regionView.nodes(nodes);
    regionView.leader(storeId);
    regionView.followers(nodes);
    scheduleMetaAdaptor.updateRegionView(regionApp, regionView);
    updateLeader(regionId, storeId);
    regionMap.put(regionId, region);
    regionStatsMap.put(GeneralId.appViewOf(regionId.seqNo(), regionId.name()), regionStats);
}
Also used : RegionView(io.dingodb.server.coordinator.app.impl.RegionView) Peer(io.dingodb.store.row.metadata.Peer) RegionApp(io.dingodb.server.coordinator.app.impl.RegionApp) GeneralId(io.dingodb.server.coordinator.GeneralId)

Aggregations

Peer (io.dingodb.store.row.metadata.Peer)6 Endpoint (io.dingodb.raft.util.Endpoint)3 RegionStats (io.dingodb.store.row.metadata.RegionStats)3 GeneralId (io.dingodb.server.coordinator.GeneralId)2 RegionApp (io.dingodb.server.coordinator.app.impl.RegionApp)2 RegionView (io.dingodb.server.coordinator.app.impl.RegionView)2 Set (java.util.Set)2 ConcurrentHashSet (com.alipay.remoting.util.ConcurrentHashSet)1 Optional (io.dingodb.common.util.Optional)1 ClusterStatsManager (io.dingodb.server.coordinator.meta.ClusterStatsManager)1 GeneralIdHelper (io.dingodb.server.coordinator.meta.GeneralIdHelper)1 RowStoreMetaAdaptor (io.dingodb.server.coordinator.meta.RowStoreMetaAdaptor)1 ScheduleMetaAdaptor (io.dingodb.server.coordinator.meta.ScheduleMetaAdaptor)1 ExecutorView (io.dingodb.server.coordinator.resource.impl.ExecutorView)1 ApproximateKVStats (io.dingodb.store.row.ApproximateKVStats)1 Cluster (io.dingodb.store.row.metadata.Cluster)1 Instruction (io.dingodb.store.row.metadata.Instruction)1 Region (io.dingodb.store.row.metadata.Region)1 RegionEpoch (io.dingodb.store.row.metadata.RegionEpoch)1 Store (io.dingodb.store.row.metadata.Store)1