Search in sources :

Example 1 with SourceToParse

use of org.elasticsearch.index.mapper.SourceToParse in project elasticsearch by elastic.

the class TransportShardBulkAction method executeIndexRequestOnReplica.

/**
     * Execute the given {@link IndexRequest} on a replica shard, throwing a
     * {@link RetryOnReplicaException} if the operation needs to be re-tried.
     */
public static Engine.IndexResult executeIndexRequestOnReplica(DocWriteResponse primaryResponse, IndexRequest request, IndexShard replica) throws IOException {
    final ShardId shardId = replica.shardId();
    SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(), request.type(), request.id(), request.source(), request.getContentType()).routing(request.routing()).parent(request.parent());
    final Engine.Index operation;
    final long version = primaryResponse.getVersion();
    final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
    assert versionType.validateVersionForWrites(version);
    final long seqNo = primaryResponse.getSeqNo();
    try {
        operation = replica.prepareIndexOnReplica(sourceToParse, seqNo, version, versionType, request.getAutoGeneratedTimestamp(), request.isRetry());
    } catch (MapperParsingException e) {
        return new Engine.IndexResult(e, version, seqNo);
    }
    Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
    if (update != null) {
        throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
    }
    return replica.index(operation);
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) MapperParsingException(org.elasticsearch.index.mapper.MapperParsingException) SourceToParse(org.elasticsearch.index.mapper.SourceToParse) Mapping(org.elasticsearch.index.mapper.Mapping) Engine(org.elasticsearch.index.engine.Engine) VersionType(org.elasticsearch.index.VersionType)

Example 2 with SourceToParse

use of org.elasticsearch.index.mapper.SourceToParse in project crate by crate.

the class TransportShardUpsertAction method processRequestItemsOnReplica.

@Override
protected WriteReplicaResult<ShardUpsertRequest> processRequestItemsOnReplica(IndexShard indexShard, ShardUpsertRequest request) throws IOException {
    Translog.Location location = null;
    for (ShardUpsertRequest.Item item : request.items()) {
        if (item.source() == null) {
            if (logger.isTraceEnabled()) {
                logger.trace("[{} (R)] Document with id {}, has no source, primary operation must have failed", indexShard.shardId(), item.id());
            }
            continue;
        }
        SourceToParse sourceToParse = new SourceToParse(indexShard.shardId().getIndexName(), item.id(), item.source(), XContentType.JSON);
        Engine.IndexResult indexResult = indexShard.applyIndexOperationOnReplica(item.seqNo(), item.primaryTerm(), item.version(), Translog.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
        if (indexResult.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
            // applied the new mapping, so there is no other option than to wait.
            throw new TransportReplicationAction.RetryOnReplicaException(indexShard.shardId(), "Mappings are not available on the replica yet, triggered update: " + indexResult.getRequiredMappingUpdate());
        }
        location = indexResult.getTranslogLocation();
    }
    return new WriteReplicaResult<>(request, location, null, indexShard, logger);
}
Also used : SourceToParse(org.elasticsearch.index.mapper.SourceToParse) Engine(org.elasticsearch.index.engine.Engine) Translog(org.elasticsearch.index.translog.Translog)

Example 3 with SourceToParse

use of org.elasticsearch.index.mapper.SourceToParse in project crate by crate.

the class TransportShardUpsertAction method index.

private Engine.IndexResult index(ShardUpsertRequest.Item item, IndexShard indexShard, boolean isRetry, long seqNo, long primaryTerm, long version) throws Exception {
    SourceToParse sourceToParse = new SourceToParse(indexShard.shardId().getIndexName(), item.id(), item.source(), XContentType.JSON);
    Engine.IndexResult indexResult = executeOnPrimaryHandlingMappingUpdate(indexShard.shardId(), () -> indexShard.applyIndexOperationOnPrimary(version, VersionType.INTERNAL, sourceToParse, seqNo, primaryTerm, Translog.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry), e -> indexShard.getFailedIndexResult(e, Versions.MATCH_ANY));
    switch(indexResult.getResultType()) {
        case SUCCESS:
            // update the seqNo and version on request for the replicas
            if (logger.isTraceEnabled()) {
                logger.trace("SUCCESS - id={}, primary_term={}, seq_no={}", item.id(), primaryTerm, indexResult.getSeqNo());
            }
            item.seqNo(indexResult.getSeqNo());
            item.version(indexResult.getVersion());
            item.primaryTerm(indexResult.getTerm());
            return indexResult;
        case FAILURE:
            Exception failure = indexResult.getFailure();
            if (logger.isTraceEnabled()) {
                logger.trace("FAILURE - id={}, primary_term={}, seq_no={}", item.id(), primaryTerm, indexResult.getSeqNo());
            }
            assert failure != null : "Failure must not be null if resultType is FAILURE";
            throw failure;
        case MAPPING_UPDATE_REQUIRED:
        default:
            throw new AssertionError("IndexResult must either succeed or fail. Required mapping updates must have been handled.");
    }
}
Also used : SourceToParse(org.elasticsearch.index.mapper.SourceToParse) Engine(org.elasticsearch.index.engine.Engine) UncheckedIOException(java.io.UncheckedIOException) DocumentMissingException(org.elasticsearch.index.engine.DocumentMissingException) DocumentSourceMissingException(org.elasticsearch.index.engine.DocumentSourceMissingException) VersionConflictEngineException(org.elasticsearch.index.engine.VersionConflictEngineException) IOException(java.io.IOException)

Example 4 with SourceToParse

use of org.elasticsearch.index.mapper.SourceToParse in project crate by crate.

the class PeerRecoveryTargetServiceTests method populateRandomData.

private SeqNoStats populateRandomData(IndexShard shard) throws IOException {
    List<Long> seqNos = LongStream.range(0, 100).boxed().collect(Collectors.toList());
    Randomness.shuffle(seqNos);
    for (long seqNo : seqNos) {
        shard.applyIndexOperationOnReplica(seqNo, 1, shard.getOperationPrimaryTerm(), Translog.UNSET_AUTO_GENERATED_TIMESTAMP, false, new SourceToParse(shard.shardId().getIndexName(), UUIDs.randomBase64UUID(), new BytesArray("{}"), XContentType.JSON));
        if (randomInt(100) < 5) {
            shard.flush(new FlushRequest().waitIfOngoing(true));
        }
    }
    shard.sync();
    long globalCheckpoint = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, shard.getLocalCheckpoint());
    shard.updateGlobalCheckpointOnReplica(globalCheckpoint, "test");
    shard.sync();
    return shard.seqNoStats();
}
Also used : BytesArray(org.elasticsearch.common.bytes.BytesArray) FlushRequest(org.elasticsearch.action.admin.indices.flush.FlushRequest) SourceToParse(org.elasticsearch.index.mapper.SourceToParse)

Example 5 with SourceToParse

use of org.elasticsearch.index.mapper.SourceToParse in project crate by crate.

the class PrimaryReplicaSyncerTests method testSyncerSendsOffCorrectDocuments.

@Test
public void testSyncerSendsOffCorrectDocuments() throws Exception {
    IndexShard shard = newStartedShard(true);
    AtomicBoolean syncActionCalled = new AtomicBoolean();
    List<ResyncReplicationRequest> resyncRequests = new ArrayList<>();
    PrimaryReplicaSyncer.SyncAction syncAction = (request, allocationId, primaryTerm, listener) -> {
        logger.info("Sending off {} operations", request.getOperations().length);
        syncActionCalled.set(true);
        resyncRequests.add(request);
        listener.onResponse(new ReplicationResponse());
    };
    PrimaryReplicaSyncer syncer = new PrimaryReplicaSyncer(syncAction);
    syncer.setChunkSize(new ByteSizeValue(randomIntBetween(1, 10)));
    int numDocs = randomInt(10);
    for (int i = 0; i < numDocs; i++) {
        // Index doc but not advance local checkpoint.
        shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, new SourceToParse(shard.shardId().getIndexName(), Integer.toString(i), new BytesArray("{}"), XContentType.JSON), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, -1L, true);
    }
    long globalCheckPoint = numDocs > 0 ? randomIntBetween(0, numDocs - 1) : 0;
    boolean syncNeeded = numDocs > 0;
    String allocationId = shard.routingEntry().allocationId().getId();
    shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build());
    shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint);
    assertEquals(globalCheckPoint, shard.getLastKnownGlobalCheckpoint());
    logger.info("Total ops: {}, global checkpoint: {}", numDocs, globalCheckPoint);
    PlainActionFuture<PrimaryReplicaSyncer.ResyncTask> fut = new PlainActionFuture<>();
    syncer.resync(shard, fut);
    PrimaryReplicaSyncer.ResyncTask resyncTask = fut.get();
    if (syncNeeded) {
        assertTrue("Sync action was not called", syncActionCalled.get());
        ResyncReplicationRequest resyncRequest = resyncRequests.remove(0);
        assertThat(resyncRequest.getTrimAboveSeqNo(), equalTo(numDocs - 1L));
        assertThat("trimAboveSeqNo has to be specified in request #0 only", resyncRequests.stream().mapToLong(ResyncReplicationRequest::getTrimAboveSeqNo).filter(seqNo -> seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO).findFirst().isPresent(), is(false));
        assertThat(resyncRequest.getMaxSeenAutoIdTimestampOnPrimary(), equalTo(shard.getMaxSeenAutoIdTimestamp()));
    }
    if (syncNeeded && globalCheckPoint < numDocs - 1) {
        assertThat(resyncTask.getSkippedOperations(), equalTo(0));
        assertThat(resyncTask.getResyncedOperations(), equalTo(Math.toIntExact(numDocs - 1 - globalCheckPoint)));
        if (shard.indexSettings.isSoftDeleteEnabled()) {
            assertThat(resyncTask.getTotalOperations(), equalTo(Math.toIntExact(numDocs - 1 - globalCheckPoint)));
        } else {
            assertThat(resyncTask.getTotalOperations(), equalTo(numDocs));
        }
    } else {
        assertThat(resyncTask.getSkippedOperations(), equalTo(0));
        assertThat(resyncTask.getResyncedOperations(), equalTo(0));
        assertThat(resyncTask.getTotalOperations(), equalTo(0));
    }
    closeShards(shard);
}
Also used : Arrays(java.util.Arrays) Versions(org.elasticsearch.common.lucene.uid.Versions) IsInstanceOf.instanceOf(org.hamcrest.core.IsInstanceOf.instanceOf) XContentType(org.elasticsearch.common.xcontent.XContentType) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) VersionType(org.elasticsearch.index.VersionType) Matchers.anyString(org.mockito.Matchers.anyString) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) Settings(org.elasticsearch.common.settings.Settings) Matchers.eq(org.mockito.Matchers.eq) Matchers.anyLong(org.mockito.Matchers.anyLong) TestTranslog(org.elasticsearch.index.translog.TestTranslog) Mockito.doReturn(org.mockito.Mockito.doReturn) SourceToParse(org.elasticsearch.index.mapper.SourceToParse) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) SequenceNumbers(org.elasticsearch.index.seqno.SequenceNumbers) PlainActionFuture(org.elasticsearch.action.support.PlainActionFuture) IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) Test(org.junit.Test) Collectors(java.util.stream.Collectors) Engine(org.elasticsearch.index.engine.Engine) CountDownLatch(java.util.concurrent.CountDownLatch) Mockito(org.mockito.Mockito) List(java.util.List) ReplicationResponse(org.elasticsearch.action.support.replication.ReplicationResponse) Matchers.equalTo(org.hamcrest.Matchers.equalTo) Translog(org.elasticsearch.index.translog.Translog) Matchers.is(org.hamcrest.Matchers.is) ResyncReplicationRequest(org.elasticsearch.action.resync.ResyncReplicationRequest) Collections(java.util.Collections) IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) BytesArray(org.elasticsearch.common.bytes.BytesArray) ArrayList(java.util.ArrayList) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) SourceToParse(org.elasticsearch.index.mapper.SourceToParse) Matchers.anyString(org.mockito.Matchers.anyString) ReplicationResponse(org.elasticsearch.action.support.replication.ReplicationResponse) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) PlainActionFuture(org.elasticsearch.action.support.PlainActionFuture) ResyncReplicationRequest(org.elasticsearch.action.resync.ResyncReplicationRequest) Test(org.junit.Test)

Aggregations

SourceToParse (org.elasticsearch.index.mapper.SourceToParse)19 Test (org.junit.Test)11 CrateDummyClusterServiceUnitTest (io.crate.test.integration.CrateDummyClusterServiceUnitTest)9 BytesReference (org.elasticsearch.common.bytes.BytesReference)9 DocumentMapper (org.elasticsearch.index.mapper.DocumentMapper)9 ParsedDocument (org.elasticsearch.index.mapper.ParsedDocument)8 Engine (org.elasticsearch.index.engine.Engine)7 Translog (org.elasticsearch.index.translog.Translog)5 BytesArray (org.elasticsearch.common.bytes.BytesArray)4 VersionType (org.elasticsearch.index.VersionType)4 ArrayList (java.util.ArrayList)3 Arrays (java.util.Arrays)2 Collections (java.util.Collections)2 List (java.util.List)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 Collectors (java.util.stream.Collectors)2 AlreadyClosedException (org.apache.lucene.store.AlreadyClosedException)2 ResyncReplicationRequest (org.elasticsearch.action.resync.ResyncReplicationRequest)2 PlainActionFuture (org.elasticsearch.action.support.PlainActionFuture)2