use of org.elasticsearch.action.support.replication.ReplicationResponse in project elasticsearch by elastic.
the class GlobalCheckpointSyncAction method shardOperationOnPrimary.
@Override
protected PrimaryResult shardOperationOnPrimary(PrimaryRequest request, IndexShard indexShard) throws Exception {
long checkpoint = indexShard.getGlobalCheckpoint();
indexShard.getTranslog().sync();
return new PrimaryResult(new ReplicaRequest(request, checkpoint), new ReplicationResponse());
}
use of org.elasticsearch.action.support.replication.ReplicationResponse in project crate by crate.
the class TransportShardRefreshAction method shardOperationOnPrimary.
@Override
protected void shardOperationOnPrimary(BasicReplicationRequest shardRequest, IndexShard primary, ActionListener<PrimaryResult<BasicReplicationRequest, ReplicationResponse>> listener) {
ActionListener.completeWith(listener, () -> {
primary.refresh("api");
logger.trace("{} refresh request executed on primary", primary.shardId());
return new PrimaryResult<>(shardRequest, new ReplicationResponse());
});
}
use of org.elasticsearch.action.support.replication.ReplicationResponse in project crate by crate.
the class RecoverySourceHandler method createRetentionLease.
void createRetentionLease(final long startingSeqNo, ActionListener<RetentionLease> listener) {
runUnderPrimaryPermit(() -> {
// Clone the peer recovery retention lease belonging to the source shard. We are retaining history between the the local
// checkpoint of the safe commit we're creating and this lease's retained seqno with the retention lock, and by cloning an
// existing lease we (approximately) know that all our peers are also retaining history as requested by the cloned lease. If
// the recovery now fails before copying enough history over then a subsequent attempt will find this lease, determine it is
// not enough, and fall back to a file-based recovery.
//
// (approximately) because we do not guarantee to be able to satisfy every lease on every peer.
logger.trace("cloning primary's retention lease");
try {
final StepListener<ReplicationResponse> cloneRetentionLeaseStep = new StepListener<>();
final RetentionLease clonedLease = shard.cloneLocalPeerRecoveryRetentionLease(request.targetNode().getId(), new ThreadedActionListener<>(logger, shard.getThreadPool(), ThreadPool.Names.GENERIC, cloneRetentionLeaseStep, false));
logger.trace("cloned primary's retention lease as [{}]", clonedLease);
cloneRetentionLeaseStep.whenComplete(rr -> listener.onResponse(clonedLease), listener::onFailure);
} catch (RetentionLeaseNotFoundException e) {
// recovery as a conservative estimate for the global checkpoint.
assert shard.indexSettings().getIndexVersionCreated().before(Version.V_4_3_0) || shard.indexSettings().isSoftDeleteEnabled() == false;
final StepListener<ReplicationResponse> addRetentionLeaseStep = new StepListener<>();
final long estimatedGlobalCheckpoint = startingSeqNo - 1;
final RetentionLease newLease = shard.addPeerRecoveryRetentionLease(request.targetNode().getId(), estimatedGlobalCheckpoint, new ThreadedActionListener<>(logger, shard.getThreadPool(), ThreadPool.Names.GENERIC, addRetentionLeaseStep, false));
addRetentionLeaseStep.whenComplete(rr -> listener.onResponse(newLease), listener::onFailure);
logger.trace("created retention lease with estimated checkpoint of [{}]", estimatedGlobalCheckpoint);
}
}, shardId + " establishing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger);
}
use of org.elasticsearch.action.support.replication.ReplicationResponse in project crate by crate.
the class ReplicationTracker method createMissingPeerRecoveryRetentionLeases.
/**
* Create any required peer-recovery retention leases that do not currently exist because we just did a rolling upgrade from a version
* prior to {@link Version#V_4_3_0} that does not create peer-recovery retention leases.
*/
public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener<Void> listener) {
if (hasAllPeerRecoveryRetentionLeases == false) {
final List<ShardRouting> shardRoutings = routingTable.assignedShards();
final GroupedActionListener<ReplicationResponse> groupedActionListener = new GroupedActionListener<>(ActionListener.wrap(vs -> {
setHasAllPeerRecoveryRetentionLeases();
listener.onResponse(null);
}, listener::onFailure), shardRoutings.size());
for (ShardRouting shardRouting : shardRoutings) {
if (retentionLeases.contains(getPeerRecoveryRetentionLeaseId(shardRouting))) {
groupedActionListener.onResponse(null);
} else {
final CheckpointState checkpointState = checkpoints.get(shardRouting.allocationId().getId());
if (checkpointState.tracked == false) {
groupedActionListener.onResponse(null);
} else {
logger.trace("createMissingPeerRecoveryRetentionLeases: adding missing lease for {}", shardRouting);
try {
addPeerRecoveryRetentionLease(shardRouting.currentNodeId(), Math.max(SequenceNumbers.NO_OPS_PERFORMED, checkpointState.globalCheckpoint), groupedActionListener);
} catch (Exception e) {
groupedActionListener.onFailure(e);
}
}
}
}
} else {
logger.trace("createMissingPeerRecoveryRetentionLeases: nothing to do");
listener.onResponse(null);
}
}
use of org.elasticsearch.action.support.replication.ReplicationResponse in project crate by crate.
the class RetentionLeaseBackgroundSyncAction method backgroundSync.
final void backgroundSync(ShardId shardId, String primaryAllocationId, long primaryTerm, RetentionLeases retentionLeases) {
final Request request = new Request(shardId, retentionLeases);
transportService.sendChildRequest(clusterService.localNode(), transportPrimaryAction, new ConcreteShardRequest<>(request, primaryAllocationId, primaryTerm), transportOptions, new TransportResponseHandler<ReplicationResponse>() {
@Override
public ReplicationResponse read(StreamInput in) throws IOException {
return newResponseInstance(in);
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
@Override
public void handleResponse(ReplicationResponse response) {
}
@Override
public void handleException(TransportException e) {
if (ExceptionsHelper.unwrap(e, NodeClosedException.class) != null) {
// node shutting down
return;
}
if (ExceptionsHelper.unwrap(e, AlreadyClosedException.class, IndexShardClosedException.class) != null) {
// the shard is closed
return;
}
getLogger().warn(new ParameterizedMessage("{} retention lease background sync failed", shardId), e);
}
});
}
Aggregations