use of org.elasticsearch.action.support.replication.ReplicationResponse in project crate by crate.
the class RetentionLeaseSyncAction method shardOperationOnPrimary.
@Override
protected void shardOperationOnPrimary(Request request, IndexShard primary, ActionListener<PrimaryResult<Request, ReplicationResponse>> listener) {
ActionListener.completeWith(listener, () -> {
assert request.waitForActiveShards().equals(ActiveShardCount.NONE) : request.waitForActiveShards();
Objects.requireNonNull(request);
Objects.requireNonNull(primary);
primary.persistRetentionLeases();
return new WritePrimaryResult<>(request, new ReplicationResponse(), null, null, primary);
});
}
use of org.elasticsearch.action.support.replication.ReplicationResponse in project crate by crate.
the class PrimaryReplicaSyncerTests method testSyncerOnClosingShard.
public void testSyncerOnClosingShard() throws Exception {
IndexShard shard = newStartedShard(true);
AtomicBoolean syncActionCalled = new AtomicBoolean();
PrimaryReplicaSyncer.SyncAction syncAction = (request, allocationId, primaryTerm, listener) -> {
logger.info("Sending off {} operations", request.getOperations().length);
syncActionCalled.set(true);
threadPool.generic().execute(() -> listener.onResponse(new ReplicationResponse()));
};
PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(syncAction);
// every document is sent off separately
syncer.setChunkSize(new ByteSizeValue(1));
int numDocs = 10;
for (int i = 0; i < numDocs; i++) {
// Index doc but not advance local checkpoint.
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, new SourceToParse(shard.shardId().getIndexName(), Integer.toString(i), new BytesArray("{}"), XContentType.JSON), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, -1L, false);
}
String allocationId = shard.routingEntry().allocationId().getId();
shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build());
CountDownLatch syncCalledLatch = new CountDownLatch(1);
PlainActionFuture<PrimaryReplicaSyncer.ResyncTask> fut = new PlainActionFuture<PrimaryReplicaSyncer.ResyncTask>() {
@Override
public void onFailure(Exception e) {
try {
super.onFailure(e);
} finally {
syncCalledLatch.countDown();
}
}
@Override
public void onResponse(PrimaryReplicaSyncer.ResyncTask result) {
try {
super.onResponse(result);
} finally {
syncCalledLatch.countDown();
}
}
};
threadPool.generic().execute(() -> {
syncer.resync(shard, fut);
});
if (randomBoolean()) {
syncCalledLatch.await();
}
closeShards(shard);
try {
fut.actionGet();
assertTrue("Sync action was not called", syncActionCalled.get());
} catch (AlreadyClosedException | IndexShardClosedException ignored) {
// ignore
}
}
use of org.elasticsearch.action.support.replication.ReplicationResponse in project crate by crate.
the class RetentionLeaseIT method testRetentionLeasesSyncedOnAdd.
@Test
public void testRetentionLeasesSyncedOnAdd() throws Exception {
final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2);
internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas);
execute("create table doc.tbl (x int) clustered into 1 shards " + "with (number_of_replicas = ?, \"soft_deletes.enabled\" = true)", new Object[] { numberOfReplicas });
ensureGreen("tbl");
final String primaryShardNodeId = clusterService().state().routingTable().index("tbl").shard(0).primaryShard().currentNodeId();
final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName();
final IndexShard primary = internalCluster().getInstance(IndicesService.class, primaryShardNodeName).getShardOrNull(new ShardId(resolveIndex("tbl"), 0));
// we will add multiple retention leases and expect to see them synced to all replicas
final int length = randomIntBetween(1, 8);
final Map<String, RetentionLease> currentRetentionLeases = new HashMap<>();
for (int i = 0; i < length; i++) {
final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8));
final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE);
final String source = randomAlphaOfLength(8);
final CountDownLatch latch = new CountDownLatch(1);
final ActionListener<ReplicationResponse> listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString()));
// simulate a peer recovery which locks the soft deletes policy on the primary
final Closeable retentionLock = randomBoolean() ? primary.acquireHistoryRetentionLock(Engine.HistorySource.INDEX) : () -> {
};
currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener));
latch.await();
retentionLock.close();
// check retention leases have been written on the primary
assertThat(currentRetentionLeases, equalTo(RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(primary.loadRetentionLeases())));
// check current retention leases have been synced to all replicas
for (final ShardRouting replicaShard : clusterService().state().routingTable().index("tbl").shard(0).replicaShards()) {
final String replicaShardNodeId = replicaShard.currentNodeId();
final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName();
final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName).getShardOrNull(new ShardId(resolveIndex("tbl"), 0));
final Map<String, RetentionLease> retentionLeasesOnReplica = RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.getRetentionLeases());
assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases));
// check retention leases have been written on the replica
assertThat(currentRetentionLeases, equalTo(RetentionLeaseUtils.toMapExcludingPeerRecoveryRetentionLeases(replica.loadRetentionLeases())));
}
}
}
use of org.elasticsearch.action.support.replication.ReplicationResponse in project crate by crate.
the class RetentionLeaseIT method runUnderBlockTest.
private void runUnderBlockTest(final String idForInitialRetentionLease, final long initialRetainingSequenceNumber, final BiConsumer<IndexShard, ActionListener<ReplicationResponse>> primaryConsumer, final Consumer<IndexShard> afterSync) throws InterruptedException {
execute("create table doc.tbl (x int) clustered into 1 shards " + "with (" + " number_of_replicas = 0, " + " \"soft_deletes.enabled\" = true, " + " \"soft_deletes.retention_lease.sync_interval\" = '1s' " + ")");
ensureGreen("tbl");
final String primaryShardNodeId = clusterService().state().routingTable().index("tbl").shard(0).primaryShard().currentNodeId();
final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName();
final IndexShard primary = internalCluster().getInstance(IndicesService.class, primaryShardNodeName).getShardOrNull(new ShardId(resolveIndex("tbl"), 0));
final String source = randomAlphaOfLength(8);
final CountDownLatch latch = new CountDownLatch(1);
final ActionListener<ReplicationResponse> listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString()));
primary.addRetentionLease(idForInitialRetentionLease, initialRetainingSequenceNumber, source, listener);
latch.await();
final String block = randomFrom("read_only", "read_only_allow_delete", "read", "write", "metadata");
execute("alter table doc.tbl set (\"blocks." + block + "\" = true)");
try {
final CountDownLatch actionLatch = new CountDownLatch(1);
final AtomicBoolean success = new AtomicBoolean();
primaryConsumer.accept(primary, new ActionListener<ReplicationResponse>() {
@Override
public void onResponse(final ReplicationResponse replicationResponse) {
success.set(true);
actionLatch.countDown();
}
@Override
public void onFailure(final Exception e) {
fail(e.toString());
}
});
actionLatch.await();
assertTrue(success.get());
afterSync.accept(primary);
} finally {
execute("alter table doc.tbl reset (\"blocks." + block + "\")");
}
}
Aggregations