use of org.elasticsearch.index.mapper.SourceToParse in project elasticsearch by elastic.
the class TransportShardBulkAction method executeIndexRequestOnReplica.
/**
* Execute the given {@link IndexRequest} on a replica shard, throwing a
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
*/
public static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request, IndexShard replica) throws IOException {
final ShardId shardId = replica.shardId();
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source(), request.getContentType()).routing(request.routing()).parent(request.parent());
final Engine.Index operation;
final long version = primaryResponse.getVersion();
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
assert versionType.validateVersionForWrites(version);
final long seqNo = primaryResponse.getSeqNo();
try {
operation = replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType, request.getAutoGeneratedTimestamp(), request.isRetry());
} catch (MapperParsingException e) {
return new Engine.IndexResult(e, version, seqNo);
}
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
}
return replica.index(operation);
}
use of org.elasticsearch.index.mapper.SourceToParse in project crate by crate.
the class TransportShardUpsertAction method processRequestItemsOnReplica.
@Override
protected WriteReplicaResult<ShardUpsertRequest> processRequestItemsOnReplica(IndexShard indexShard, ShardUpsertRequest request) throws IOException {
Translog.Location location = null;
for (ShardUpsertRequest.Item item : request.items()) {
if (item.source() == null) {
if (logger.isTraceEnabled()) {
logger.trace("[{} (R)] Document with id {}, has no source, primary operation must have failed", indexShard.shardId(), item.id());
}
continue;
}
SourceToParse sourceToParse = new SourceToParse(indexShard.shardId().getIndexName(), item.id(), item.source(), XContentType.JSON);
Engine.IndexResult indexResult = indexShard.applyIndexOperationOnReplica(item.seqNo(), item.primaryTerm(), item.version(), Translog.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
if (indexResult.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
// applied the new mapping, so there is no other option than to wait.
throw new TransportReplicationAction.RetryOnReplicaException(indexShard.shardId(), "Mappings are not available on the replica yet, triggered update: " + indexResult.getRequiredMappingUpdate());
}
location = indexResult.getTranslogLocation();
}
return new WriteReplicaResult<>(request, location, null, indexShard, logger);
}
use of org.elasticsearch.index.mapper.SourceToParse in project crate by crate.
the class TransportShardUpsertAction method index.
private Engine.IndexResult index(ShardUpsertRequest.Item item, IndexShard indexShard, boolean isRetry, long seqNo, long primaryTerm, long version) throws Exception {
SourceToParse sourceToParse = new SourceToParse(indexShard.shardId().getIndexName(), item.id(), item.source(), XContentType.JSON);
Engine.IndexResult indexResult = executeOnPrimaryHandlingMappingUpdate(indexShard.shardId(), () -> indexShard.applyIndexOperationOnPrimary(version, VersionType.INTERNAL, sourceToParse, seqNo, primaryTerm, Translog.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry), e -> indexShard.getFailedIndexResult(e, Versions.MATCH_ANY));
switch(indexResult.getResultType()) {
case SUCCESS:
// update the seqNo and version on request for the replicas
if (logger.isTraceEnabled()) {
logger.trace("SUCCESS - id={}, primary_term={}, seq_no={}", item.id(), primaryTerm, indexResult.getSeqNo());
}
item.seqNo(indexResult.getSeqNo());
item.version(indexResult.getVersion());
item.primaryTerm(indexResult.getTerm());
return indexResult;
case FAILURE:
Exception failure = indexResult.getFailure();
if (logger.isTraceEnabled()) {
logger.trace("FAILURE - id={}, primary_term={}, seq_no={}", item.id(), primaryTerm, indexResult.getSeqNo());
}
assert failure != null : "Failure must not be null if resultType is FAILURE";
throw failure;
case MAPPING_UPDATE_REQUIRED:
default:
throw new AssertionError("IndexResult must either succeed or fail. Required mapping updates must have been handled.");
}
}
use of org.elasticsearch.index.mapper.SourceToParse in project crate by crate.
the class PeerRecoveryTargetServiceTests method populateRandomData.
private SeqNoStats populateRandomData(IndexShard shard) throws IOException {
List<Long> seqNos = LongStream.range(0, 100).boxed().collect(Collectors.toList());
Randomness.shuffle(seqNos);
for (long seqNo : seqNos) {
shard.applyIndexOperationOnReplica(seqNo, 1, shard.getOperationPrimaryTerm(), Translog.UNSET_AUTO_GENERATED_TIMESTAMP, false, new SourceToParse(shard.shardId().getIndexName(), UUIDs.randomBase64UUID(), new BytesArray("{}"), XContentType.JSON));
if (randomInt(100) < 5) {
shard.flush(new FlushRequest().waitIfOngoing(true));
}
}
shard.sync();
long globalCheckpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, shard.getLocalCheckpoint());
shard.updateGlobalCheckpointOnReplica(globalCheckpoint, "test");
shard.sync();
return shard.seqNoStats();
}
use of org.elasticsearch.index.mapper.SourceToParse in project crate by crate.
the class PrimaryReplicaSyncerTests method testSyncerSendsOffCorrectDocuments.
@Test
public void testSyncerSendsOffCorrectDocuments() throws Exception {
IndexShard shard = newStartedShard(true);
AtomicBoolean syncActionCalled = new AtomicBoolean();
List<ResyncReplicationRequest> resyncRequests = new ArrayList<>();
PrimaryReplicaSyncer.SyncAction syncAction = (request, allocationId, primaryTerm, listener) -> {
logger.info("Sending off {} operations", request.getOperations().length);
syncActionCalled.set(true);
resyncRequests.add(request);
listener.onResponse(new ReplicationResponse());
};
PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(syncAction);
syncer.setChunkSize(new ByteSizeValue(randomIntBetween(1, 10)));
int numDocs = randomInt(10);
for (int i = 0; i < numDocs; i++) {
// Index doc but not advance local checkpoint.
shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, new SourceToParse(shard.shardId().getIndexName(), Integer.toString(i), new BytesArray("{}"), XContentType.JSON), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, -1L, true);
}
long globalCheckPoint = numDocs > 0 ? randomIntBetween(0, numDocs - 1) : 0;
boolean syncNeeded = numDocs > 0;
String allocationId = shard.routingEntry().allocationId().getId();
shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build());
shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint);
assertEquals(globalCheckPoint, shard.getLastKnownGlobalCheckpoint());
logger.info("Total ops: {}, global checkpoint: {}", numDocs, globalCheckPoint);
PlainActionFuture<PrimaryReplicaSyncer.ResyncTask> fut = new PlainActionFuture<>();
syncer.resync(shard, fut);
PrimaryReplicaSyncer.ResyncTask resyncTask = fut.get();
if (syncNeeded) {
assertTrue("Sync action was not called", syncActionCalled.get());
ResyncReplicationRequest resyncRequest = resyncRequests.remove(0);
assertThat(resyncRequest.getTrimAboveSeqNo(), equalTo(numDocs - 1L));
assertThat("trimAboveSeqNo has to be specified in request #0 only", resyncRequests.stream().mapToLong(ResyncReplicationRequest::getTrimAboveSeqNo).filter(seqNo -> seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO).findFirst().isPresent(), is(false));
assertThat(resyncRequest.getMaxSeenAutoIdTimestampOnPrimary(), equalTo(shard.getMaxSeenAutoIdTimestamp()));
}
if (syncNeeded && globalCheckPoint < numDocs - 1) {
assertThat(resyncTask.getSkippedOperations(), equalTo(0));
assertThat(resyncTask.getResyncedOperations(), equalTo(Math.toIntExact(numDocs - 1 - globalCheckPoint)));
if (shard.indexSettings.isSoftDeleteEnabled()) {
assertThat(resyncTask.getTotalOperations(), equalTo(Math.toIntExact(numDocs - 1 - globalCheckPoint)));
} else {
assertThat(resyncTask.getTotalOperations(), equalTo(numDocs));
}
} else {
assertThat(resyncTask.getSkippedOperations(), equalTo(0));
assertThat(resyncTask.getResyncedOperations(), equalTo(0));
assertThat(resyncTask.getTotalOperations(), equalTo(0));
}
closeShards(shard);
}
Aggregations