Search in sources :

Example 16 with Endpoint

use of io.dingodb.raft.util.Endpoint in project dingo by dingodb.

the class AbstractPlacementDriverClient method getLuckyPeer.

@Override
public Endpoint getLuckyPeer(final String regionId, final boolean forceRefresh, final long timeoutMillis, final Endpoint unExpect) {
    final String raftGroupId = JRaftHelper.getJRaftGroupId(this.clusterName, regionId);
    final RouteTable routeTable = RouteTable.getInstance();
    if (forceRefresh) {
        final long deadline = System.currentTimeMillis() + timeoutMillis;
        final StringBuilder error = new StringBuilder();
        // or in the 'leader-transfer' state, it needs to be re-tried
        for (; ; ) {
            try {
                final Status st = routeTable.refreshConfiguration(this.cliClientService, raftGroupId, 5000);
                if (st.isOk()) {
                    break;
                }
                error.append(st.toString());
            } catch (final InterruptedException e) {
                ThrowUtil.throwException(e);
            } catch (final TimeoutException e) {
                error.append(e.getMessage());
            }
            if (System.currentTimeMillis() < deadline) {
                LOG.debug("Fail to get peers, retry again, {}.", error);
                error.append(", ");
                try {
                    Thread.sleep(5);
                } catch (final InterruptedException e) {
                    ThrowUtil.throwException(e);
                }
            } else {
                throw new RouteTableException(error.toString());
            }
        }
    }
    final Configuration configs = routeTable.getConfiguration(raftGroupId);
    if (configs == null) {
        throw new RouteTableException("empty configs in group: " + raftGroupId);
    }
    final List<PeerId> peerList = configs.getPeers();
    if (peerList == null || peerList.isEmpty()) {
        throw new RouteTableException("empty peers in group: " + raftGroupId);
    }
    final int size = peerList.size();
    if (size == 1) {
        return peerList.get(0).getEndpoint();
    }
    final RoundRobinLoadBalancer balancer = RoundRobinLoadBalancer.getInstance(regionId);
    for (int i = 0; i < size; i++) {
        final PeerId candidate = balancer.select(peerList);
        final Endpoint luckyOne = candidate.getEndpoint();
        if (!luckyOne.equals(unExpect)) {
            return luckyOne;
        }
    }
    throw new RouteTableException("have no choice in group(peers): " + raftGroupId);
}
Also used : Status(io.dingodb.raft.Status) Configuration(io.dingodb.raft.conf.Configuration) RoundRobinLoadBalancer(io.dingodb.store.row.client.RoundRobinLoadBalancer) RouteTableException(io.dingodb.store.row.errors.RouteTableException) Endpoint(io.dingodb.raft.util.Endpoint) RegionRouteTable(io.dingodb.store.row.client.RegionRouteTable) RouteTable(io.dingodb.raft.RouteTable) Endpoint(io.dingodb.raft.util.Endpoint) TimeoutException(java.util.concurrent.TimeoutException) PeerId(io.dingodb.raft.entity.PeerId)

Example 17 with Endpoint

use of io.dingodb.raft.util.Endpoint in project dingo by dingodb.

the class DefaultPlacementDriverRpcService method callPdServerWithRpc.

@Override
public <V> CompletableFuture<V> callPdServerWithRpc(final BaseRequest request, final FailoverClosure<V> closure, final Errors lastCause) {
    final boolean forceRefresh = ErrorsHelper.isInvalidPeer(lastCause);
    final Endpoint endpoint = this.pdClient.getPdLeader(forceRefresh, this.rpcTimeoutMillis);
    internalCallPdWithRpc(endpoint, request, closure);
    return closure.future();
}
Also used : Endpoint(io.dingodb.raft.util.Endpoint)

Example 18 with Endpoint

use of io.dingodb.raft.util.Endpoint in project dingo by dingodb.

the class MetadataRpcClient method internalCreateRegionId.

private void internalCreateRegionId(final long clusterId, final Endpoint endpoint, final CompletableFuture<Long> future, final int retriesLeft, final Errors lastCause) {
    final RetryRunner retryRunner = retryCause -> internalCreateRegionId(clusterId, endpoint, future, retriesLeft - 1, retryCause);
    final FailoverClosure<Long> closure = new FailoverClosureImpl<>(future, retriesLeft, retryRunner);
    final CreateRegionIdRequest request = new CreateRegionIdRequest();
    request.setClusterId(clusterId);
    request.setEndpoint(endpoint);
    this.pdRpcService.callPdServerWithRpc(request, closure, lastCause);
}
Also used : CreateRegionIdRequest(io.dingodb.store.row.cmd.pd.CreateRegionIdRequest) GetClusterInfoRequest(io.dingodb.store.row.cmd.pd.GetClusterInfoRequest) Errors(io.dingodb.store.row.errors.Errors) FailoverClosure(io.dingodb.store.row.client.failover.FailoverClosure) Store(io.dingodb.store.row.metadata.Store) CompletableFuture(java.util.concurrent.CompletableFuture) Cluster(io.dingodb.store.row.metadata.Cluster) SetStoreInfoRequest(io.dingodb.store.row.cmd.pd.SetStoreInfoRequest) RetryRunner(io.dingodb.store.row.client.failover.RetryRunner) FailoverClosureImpl(io.dingodb.store.row.client.failover.impl.FailoverClosureImpl) GetStoreInfoRequest(io.dingodb.store.row.cmd.pd.GetStoreInfoRequest) GetStoreIdRequest(io.dingodb.store.row.cmd.pd.GetStoreIdRequest) Endpoint(io.dingodb.raft.util.Endpoint) FutureHelper(io.dingodb.store.row.client.FutureHelper) FailoverClosureImpl(io.dingodb.store.row.client.failover.impl.FailoverClosureImpl) CreateRegionIdRequest(io.dingodb.store.row.cmd.pd.CreateRegionIdRequest) RetryRunner(io.dingodb.store.row.client.failover.RetryRunner)

Example 19 with Endpoint

use of io.dingodb.raft.util.Endpoint in project dingo by dingodb.

the class Store method copy.

@Override
public Store copy() {
    Endpoint endpoint = null;
    if (this.endpoint != null) {
        endpoint = this.endpoint.copy();
    }
    List<Region> regions = null;
    if (this.regions != null) {
        regions = Lists.newArrayListWithCapacity(this.regions.size());
        for (Region region : this.regions) {
            regions.add(region.copy());
        }
    }
    List<StoreLabel> labels = null;
    if (this.labels != null) {
        labels = Lists.newArrayListWithCapacity(this.labels.size());
        for (StoreLabel label : this.labels) {
            labels.add(label.copy());
        }
    }
    return new Store(this.id, endpoint, this.state, regions, labels);
}
Also used : Endpoint(io.dingodb.raft.util.Endpoint)

Example 20 with Endpoint

use of io.dingodb.raft.util.Endpoint in project dingo by dingodb.

the class CoordinatorServer method start.

public void start(final CoordinatorOptions opts) throws Exception {
    this.svrOpts = opts;
    log.info("Coordinator all configuration: {}.", this.svrOpts);
    log.info("instance configuration: {}.", DingoOptions.instance());
    this.context = new CoordinatorContext();
    final String raftId = svrOpts.getRaft().getGroup();
    final Endpoint endpoint = new Endpoint(svrOpts.getIp(), svrOpts.getRaft().getPort());
    final RocksRawKVStore rawKVStore = createRocksDB();
    final CoordinatorStateMachine stateMachine = createStateMachine(raftId, rawKVStore, context);
    final Node node = RaftServiceFactory.createRaftNode(raftId, new PeerId(endpoint, 0));
    final AsyncKeyValueStore keyValueStore = createStore(rawKVStore, node);
    final ScheduleMetaAdaptor scheduleMetaAdaptor = createScheduleMetaAdaptor(keyValueStore);
    final TableMetaAdaptor tableMetaAdaptor = createTableMetaAdaptor(keyValueStore, scheduleMetaAdaptor);
    final CoordinatorMetaService metaService = createMetaService();
    final RowStoreMetaAdaptor rowStoreMetaAdaptor = createRowStoreMetaAdaptor(scheduleMetaAdaptor);
    context.coordOpts(svrOpts).endpoint(endpoint).netService(createNetService()).rocksKVStore(rawKVStore).stateMachine(stateMachine).keyValueStore(keyValueStore).node(node).scheduleMetaAdaptor(scheduleMetaAdaptor).serviceProvider(createServiceProvider()).tableMetaAdaptor(tableMetaAdaptor).rowStoreMetaAdaptor(rowStoreMetaAdaptor).metaService(metaService);
    NodeManager.getInstance().addAddress(endpoint);
    stateMachine.init();
    final NodeOptions nodeOptions = initNodeOptions(stateMachine);
    node.init(nodeOptions);
    keyValueStore.init();
}
Also used : RowStoreMetaAdaptor(io.dingodb.server.coordinator.meta.RowStoreMetaAdaptor) CoordinatorStateMachine(io.dingodb.server.coordinator.state.CoordinatorStateMachine) Node(io.dingodb.raft.Node) RocksRawKVStore(io.dingodb.store.row.storage.RocksRawKVStore) ScheduleMetaAdaptor(io.dingodb.server.coordinator.meta.ScheduleMetaAdaptor) NodeOptions(io.dingodb.raft.option.NodeOptions) CoordinatorContext(io.dingodb.server.coordinator.context.CoordinatorContext) AsyncKeyValueStore(io.dingodb.server.coordinator.store.AsyncKeyValueStore) RaftAsyncKeyValueStore(io.dingodb.server.coordinator.store.RaftAsyncKeyValueStore) TableMetaAdaptor(io.dingodb.server.coordinator.meta.TableMetaAdaptor) Endpoint(io.dingodb.raft.util.Endpoint) CoordinatorMetaService(io.dingodb.server.coordinator.meta.service.CoordinatorMetaService) PeerId(io.dingodb.raft.entity.PeerId)

Aggregations

Endpoint (io.dingodb.raft.util.Endpoint)25 Store (io.dingodb.store.row.metadata.Store)6 PeerId (io.dingodb.raft.entity.PeerId)5 Cluster (io.dingodb.store.row.metadata.Cluster)5 NodeOptions (io.dingodb.raft.option.NodeOptions)4 Status (io.dingodb.raft.Status)3 FutureHelper (io.dingodb.store.row.client.FutureHelper)3 FailoverClosure (io.dingodb.store.row.client.failover.FailoverClosure)3 RetryRunner (io.dingodb.store.row.client.failover.RetryRunner)3 FailoverClosureImpl (io.dingodb.store.row.client.failover.impl.FailoverClosureImpl)3 CreateRegionIdRequest (io.dingodb.store.row.cmd.pd.CreateRegionIdRequest)3 GetClusterInfoRequest (io.dingodb.store.row.cmd.pd.GetClusterInfoRequest)3 GetStoreIdRequest (io.dingodb.store.row.cmd.pd.GetStoreIdRequest)3 GetStoreInfoRequest (io.dingodb.store.row.cmd.pd.GetStoreInfoRequest)3 SetStoreInfoRequest (io.dingodb.store.row.cmd.pd.SetStoreInfoRequest)3 Region (io.dingodb.store.row.metadata.Region)3 StoreStats (io.dingodb.store.row.metadata.StoreStats)3 RouteTable (io.dingodb.raft.RouteTable)2 Configuration (io.dingodb.raft.conf.Configuration)2 RpcServer (io.dingodb.raft.rpc.RpcServer)2