use of io.dingodb.raft.entity.Task in project dingo by dingodb.
the class RaftRawKVStore method applyOperation.
private void applyOperation(final KVOperation op, final KVStoreClosure closure) {
if (!isLeader()) {
closure.setError(Errors.NOT_LEADER);
closure.run(new Status(RaftError.EPERM, "Not leader"));
return;
}
final Task task = new Task();
task.setData(ByteBuffer.wrap(Serializers.getDefault().writeObject(op)));
task.setDone(new KVClosureAdapter(closure, op));
this.node.apply(task);
}
use of io.dingodb.raft.entity.Task in project dingo by dingodb.
the class NodeImpl method apply.
@Override
public void apply(final Task task) {
if (this.shutdownLatch != null) {
Utils.runClosureInThread(task.getDone(), new Status(RaftError.ENODESHUTDOWN, "Node is shutting down."));
throw new IllegalStateException("Node is shutting down");
}
Requires.requireNonNull(task, "Null task");
final LogEntry entry = new LogEntry();
entry.setData(task.getData());
int retryTimes = 0;
try {
final EventTranslator<LogEntryAndClosure> translator = (event, sequence) -> {
event.reset();
event.done = task.getDone();
event.entry = entry;
event.expectedTerm = task.getExpectedTerm();
};
this.applyQueue.publishEvent(translator);
/*
while (true) {
if (this.applyQueue.tryPublishEvent(translator)) {
break;
} else {
retryTimes++;
if (retryTimes > MAX_APPLY_RETRY_TIMES) {
Utils.runClosureInThread(task.getDone(),
new Status(RaftError.EBUSY, "Node is busy, has too many tasks."));
LOG.warn("Node {} applyQueue is overload.", getNodeId());
this.metrics.recordTimes("apply-task-overload-times", 1);
return;
}
ThreadHelper.onSpinWait();
}
}
*/
} catch (final Exception e) {
LOG.error("Fail to apply task.", e);
Utils.runClosureInThread(task.getDone(), new Status(RaftError.EPERM, "Node is down."));
}
}
use of io.dingodb.raft.entity.Task in project dingo by dingodb.
the class NodeImpl method executeApplyingTasks.
private void executeApplyingTasks(final List<LogEntryAndClosure> tasks) {
this.writeLock.lock();
try {
final int size = tasks.size();
if (this.state != State.STATE_LEADER) {
final Status st = new Status();
if (this.state != State.STATE_TRANSFERRING) {
st.setError(RaftError.EPERM, "Is not leader.");
} else {
st.setError(RaftError.EBUSY, "Is transferring leadership.");
}
LOG.debug("Node {} can't apply, status={}.", getNodeId(), st);
final List<Closure> dones = tasks.stream().map(ele -> ele.done).collect(Collectors.toList());
Utils.runInThread(() -> {
for (final Closure done : dones) {
done.run(st);
}
});
return;
}
final List<LogEntry> entries = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
final LogEntryAndClosure task = tasks.get(i);
if (task.expectedTerm != -1 && task.expectedTerm != this.currTerm) {
LOG.debug("Node {} can't apply task whose expectedTerm={} doesn't match currTerm={}.", getNodeId(), task.expectedTerm, this.currTerm);
if (task.done != null) {
final Status st = new Status(RaftError.EPERM, "expected_term=%d doesn't match current_term=%d", task.expectedTerm, this.currTerm);
Utils.runClosureInThread(task.done, st);
task.reset();
}
continue;
}
if (!this.ballotBox.appendPendingTask(this.conf.getConf(), this.conf.isStable() ? null : this.conf.getOldConf(), task.done)) {
Utils.runClosureInThread(task.done, new Status(RaftError.EINTERNAL, "Fail to append task."));
task.reset();
continue;
}
// set task entry info before adding to list.
task.entry.getId().setTerm(this.currTerm);
task.entry.setType(EnumOutter.EntryType.ENTRY_TYPE_DATA);
entries.add(task.entry);
task.reset();
}
this.logManager.appendEntries(entries, new LeaderStableClosure(entries));
// update conf.first
checkAndSetConfiguration(true);
} finally {
this.writeLock.unlock();
}
}
use of io.dingodb.raft.entity.Task in project dingo by dingodb.
the class StoreEngine method applySplit.
public void applySplit(final String regionId, final String newRegionId, final KVStoreClosure closure) {
Requires.requireNonNull(regionId, "regionId");
Requires.requireNonNull(newRegionId, "newRegionId");
if (this.regionEngineTable.containsKey(newRegionId)) {
closure.setError(Errors.CONFLICT_REGION_ID);
closure.run(new Status(-1, "Conflict region id %d", newRegionId));
return;
}
if (!this.splitting.compareAndSet(false, true)) {
closure.setError(Errors.SERVER_BUSY);
closure.run(new Status(-1, "Server is busy now"));
return;
}
final RegionEngine parentEngine = getRegionEngine(regionId);
if (parentEngine == null) {
closure.setError(Errors.NO_REGION_FOUND);
closure.run(new Status(-1, "RegionEngine[%s] not found", regionId));
this.splitting.set(false);
return;
}
if (!parentEngine.isLeader()) {
closure.setError(Errors.NOT_LEADER);
closure.run(new Status(-1, "RegionEngine[%s] not leader", regionId));
this.splitting.set(false);
return;
}
final Region parentRegion = parentEngine.getRegion();
final byte[] startKey = BytesUtil.nullToEmpty(parentRegion.getStartKey());
final byte[] endKey = parentRegion.getEndKey();
ApproximateKVStats stats = this.rawKVStore.getApproximateKVStatsInRange(startKey, endKey);
final long approximateKeys = stats.keysCnt;
final long approximateBytes = stats.sizeInBytes / 1024 / 1024;
final long leastKeysOnSplit = this.storeOpts.getLeastKeysOnSplit();
boolean isSplitOK = (approximateBytes >= 64 || approximateKeys > leastKeysOnSplit);
LOG.info("Region:{} Split Condition is {}!. Disk Used:{} >= 64M, Write Keys:{} >= Expected Keys:{}", parentEngine, isSplitOK, approximateBytes, approximateKeys, leastKeysOnSplit);
if (!isSplitOK) {
closure.setError(Errors.TOO_SMALL_TO_SPLIT);
closure.run(new Status(-1, "RegionEngine[%s]'s split condition is not OK!. " + "Write Keys:%d bytes(M): %d, Expected: keys:%d, bytes: 64M", regionId, approximateKeys, approximateBytes, leastKeysOnSplit));
this.splitting.set(false);
return;
}
final byte[] splitKey = this.rawKVStore.jumpOver(startKey, approximateKeys >> 1);
if (splitKey == null) {
closure.setError(Errors.STORAGE_ERROR);
closure.run(new Status(-1, "Fail to scan split key"));
this.splitting.set(false);
return;
}
final KVOperation op = KVOperation.createRangeSplit(splitKey, regionId, newRegionId);
LOG.info("Store receive region split instruction: Old Region:{}, oldStartKey:{}, oldEndKey:{}, " + "approximateKeys:{}, newRegionId:{}, splitKey:{}", parentEngine.toString(), startKey != null ? BytesUtil.toHex(startKey) : "null", endKey != null ? BytesUtil.toHex(endKey) : "null", approximateKeys, newRegionId, splitKey != null ? BytesUtil.toHex(splitKey) : "null");
final Task task = new Task();
task.setData(ByteBuffer.wrap(Serializers.getDefault().writeObject(op)));
task.setDone(new KVClosureAdapter(closure, op));
parentEngine.getNode().apply(task);
}
Aggregations